diff --git a/README.md b/README.md new file mode 100644 index 00000000..9be6cc60 --- /dev/null +++ b/README.md @@ -0,0 +1,30 @@ +# Confluent + +![Python 3](https://img.shields.io/badge/python-3-blue.svg) [![License](https://img.shields.io/badge/license-Apache%202.0-blue.svg)](https://github.com/xcat2/confluent/blob/master/LICENSE) + +Confluent is a software package to handle essential bootstrap and operation of scale-out server configurations. +It supports stateful and stateless deployments for various operating systems. + +Check [this page](https://hpc.lenovo.com/users/documentation/whatisconfluent.html +) for a more detailed list of features. + +Confluent is the modern successor of [xCAT](https://github.com/xcat2/xcat-core). +If you're coming from xCAT, check out [this comparison](https://hpc.lenovo.com/users/documentation/confluentvxcat.html). + +# Documentation + +Confluent documentation is hosted on hpc.lenovo.com: https://hpc.lenovo.com/users/documentation/ + +# Download + +Get the latest version from: https://hpc.lenovo.com/users/downloads/ + +Check release notes on: https://hpc.lenovo.com/users/news/ + +# Open Source License + +Confluent is made available under the Apache 2.0 license: https://opensource.org/license/apache-2-0 + +# Developers + +Want to help? Submit a [Pull Request](https://github.com/xcat2/confluent/pulls). diff --git a/confluent_client/COPYRIGHT b/confluent_client/COPYRIGHT index 6bcd9986..6f435939 100644 --- a/confluent_client/COPYRIGHT +++ b/confluent_client/COPYRIGHT @@ -1,6 +1,8 @@ -Name: confluent-client Project: https://hpc.lenovo.com/users/ Source: https://github.com/lenovo/confluent +Upstream-Name: confluent-client + +All file of the Confluent-client software is distributed under the terms of an Apache-2.0 license as indicated below: Files: * Copyright: 2014-2019 Lenovo @@ -11,7 +13,8 @@ Copyright: 2014 IBM Corporation 2015-2019 Lenovo License: Apache-2.0 -File: sortutil.py +File: sortutil.py, + tlvdata.py Copyright: 2014 IBM Corporation 2015-2016 Lenovo License: Apache-2.0 @@ -19,3 +22,56 @@ License: Apache-2.0 File: tlv.py Copyright: 2014 IBM Corporation License: Apache-2.0 + +License: Apache-2.0 +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/confluent_client/bin/collate b/confluent_client/bin/collate index 07095901..2a086303 100755 --- a/confluent_client/bin/collate +++ b/confluent_client/bin/collate @@ -21,6 +21,7 @@ import optparse import os +import re import select import sys @@ -84,6 +85,7 @@ fullline = sys.stdin.readline() printpending = True clearpending = False holdoff = 0 +padded = None while fullline: for line in fullline.split('\n'): if not line: @@ -92,13 +94,18 @@ while fullline: line = 'UNKNOWN: ' + line if options.log: node, output = line.split(':', 1) - output = output.lstrip() + if padded is None: + if output.startswith(' '): + padded = True + else: + padded = False + if padded: + output = re.sub(r'^ ', '', output) currlog = options.log.format(node=node, nodename=node) with open(currlog, mode='a') as log: log.write(output + '\n') continue node, output = line.split(':', 1) - output = output.lstrip() grouped.add_line(node, output) if options.watch: if not holdoff: diff --git a/confluent_client/bin/confluent2hosts b/confluent_client/bin/confluent2hosts index bbf989b1..b467e5cc 100644 --- a/confluent_client/bin/confluent2hosts +++ b/confluent_client/bin/confluent2hosts @@ -157,7 +157,7 @@ def main(): elif attrib.endswith('.ipv6_address') and val: ip6bynode[node][currnet] = val.split('/', 1)[0] elif attrib.endswith('.hostname'): - namesbynode[node][currnet] = re.split('\s+|,', val) + namesbynode[node][currnet] = re.split(r'\s+|,', val) for node in ip4bynode: mydomain = domainsbynode.get(node, None) for ipdb in (ip4bynode, ip6bynode): diff --git a/confluent_client/bin/l2traceroute b/confluent_client/bin/l2traceroute new file mode 100755 index 00000000..e8f9705e --- /dev/null +++ b/confluent_client/bin/l2traceroute @@ -0,0 +1,173 @@ +#!/usr/libexec/platform-python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2017 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tkucherera' + +import optparse +import os +import signal +import sys +import subprocess + +try: + signal.signal(signal.SIGPIPE, signal.SIG_DFL) +except AttributeError: + pass +path = os.path.dirname(os.path.realpath(__file__)) +path = os.path.realpath(os.path.join(path, '..', 'lib', 'python')) +if path.startswith('/opt'): + sys.path.append(path) + +import confluent.client as client + +argparser = optparse.OptionParser( + usage="Usage: %prog -i -e ", +) +argparser.add_option('-i', '--interface', type='str', + help='interface to check path against for the start node') +argparser.add_option('-e', '--eface', type='str', + help='interface to check path against for the end node') +argparser.add_option('-c', '--cumulus', action="store_true", dest="cumulus", + help='return layer 2 route through cumulus switches only') + +(options, args) = argparser.parse_args() + +try: + start_node = args[0] + end_node = args[1] + interface = options.interface + eface = options.eface +except IndexError: + argparser.print_help() + sys.exit(1) + +session = client.Command() + +def get_neighbors(switch): + switch_neigbors = [] + url = '/networking/neighbors/by-switch/{0}/by-peername/'.format(switch) + for neighbor in session.read(url): + switch = neighbor['item']['href'].strip('/') + if switch in all_switches: + switch_neigbors.append(switch) + return switch_neigbors + +def find_path(start, end, path=[]): + path = path + [start] + if start == end: + return path # If start and end are the same, return the path + + for node in get_neighbors(start): + if node not in path: + new_path = find_path(node, end, path) + if new_path: + return new_path # If a path is found, return it + + return None # If no path is found, return None + +def is_cumulus(switch): + try: + read_attrib = subprocess.check_output(['nodeattrib', switch, 'hardwaremanagement.method']) + except subprocess.CalledProcessError: + return False + for attribs in read_attrib.decode('utf-8').split('\n'): + if len(attribs.split(':')) > 1: + attrib = attribs.split(':') + if attrib[2].strip() == 'affluent': + return True + else: + return False + else: + return False + + +def host_to_switch(node, interface=None): + # first check the the node config to see what switches are connected + # if host is in rhel can use nmstate package + if node in all_switches: + return [node] + switches = [] + netarg = 'net.*.switch' + if interface: + netarg = 'net.{0}.switch'.format(interface) + try: + read_attrib = subprocess.check_output(['nodeattrib', node, netarg]) + except subprocess.CalledProcessError: + return False + for attribs in read_attrib.decode('utf-8').split('\n'): + attrib = attribs.split(':') + try: + if ' net.mgt.switch' in attrib or attrib[2] == '': + continue + except IndexError: + continue + switch = attrib[2].strip() + if is_cumulus(switch) and options.cumulus: + switches.append(switch) + else: + switches.append(switch) + return switches + +def path_between_nodes(start_switches, end_switches): + for start_switch in start_switches: + for end_switch in end_switches: + if start_switch == end_switch: + return [start_switch] + else: + path = find_path(start_switch, end_switch) + if path: + return path + else: + return 'No path found' + + +all_switches = [] +for res in session.read('/networking/neighbors/by-switch/'): + if 'error' in res: + sys.stderr.write(res['error'] + '\n') + exitcode = 1 + else: + switch = (res['item']['href'].replace('/', '')) + all_switches.append(switch) + +end_nodeslist = [] +nodelist = '/noderange/{0}/nodes/'.format(end_node) +for res in session.read(nodelist): + if 'error' in res: + sys.stderr.write(res['error'] + '\n') + exitcode = 1 + else: + elem=(res['item']['href'].replace('/', '')) + end_nodeslist.append(elem) + +start_switches = host_to_switch(start_node, interface) +for end_node in end_nodeslist: + if end_node: + end_switches = host_to_switch(end_node, eface) + if not end_switches: + print('Error: net.{0}.switch attribute is not valid') + continue + path = path_between_nodes(start_switches, end_switches) + print(f'{start_node} to {end_node}: {path}') + +# TODO dont put switches that are connected through management interfaces. + + + + + + diff --git a/confluent_client/bin/nodeapply b/confluent_client/bin/nodeapply index e39447bc..2e798742 100755 --- a/confluent_client/bin/nodeapply +++ b/confluent_client/bin/nodeapply @@ -102,9 +102,9 @@ def run(): cmdv = ['ssh', sshnode] + cmdvbase + cmdstorun[0] if currprocs < concurrentprocs: currprocs += 1 - run_cmdv(node, cmdv, all, pipedesc) + run_cmdv(sshnode, cmdv, all, pipedesc) else: - pendingexecs.append((node, cmdv)) + pendingexecs.append((sshnode, cmdv)) if not all or exitcode: sys.exit(exitcode) rdy, _, _ = select.select(all, [], [], 10) diff --git a/confluent_client/bin/nodeattrib b/confluent_client/bin/nodeattrib index 8c6f078d..265fe917 100755 --- a/confluent_client/bin/nodeattrib +++ b/confluent_client/bin/nodeattrib @@ -22,6 +22,7 @@ import optparse import os import signal import sys +import shlex try: signal.signal(signal.SIGPIPE, signal.SIG_DFL) @@ -125,13 +126,14 @@ elif options.set: argset = argset.strip() if argset: arglist += shlex.split(argset) - argset = argfile.readline() + argset = argfile.readline() session.stop_if_noderange_over(noderange, options.maxnodes) exitcode=client.updateattrib(session,arglist,nodetype, noderange, options, None) if exitcode != 0: sys.exit(exitcode) # Lists all attributes + if len(args) > 0: # setting output to all so it can search since if we do have something to search, we want to show all outputs even if it is blank. if requestargs is None: diff --git a/confluent_client/bin/nodebmcpassword b/confluent_client/bin/nodebmcpassword new file mode 100755 index 00000000..135abb96 --- /dev/null +++ b/confluent_client/bin/nodebmcpassword @@ -0,0 +1,121 @@ +#!/usr/libexec/platform-python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2015-2017 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +__author__ = 'tkucherera' + +from getpass import getpass +import optparse +import os +import signal +import sys +try: + signal.signal(signal.SIGPIPE, signal.SIG_DFL) +except AttributeError: + pass + +path = os.path.dirname(os.path.realpath(__file__)) +path = os.path.realpath(os.path.join(path, '..', 'lib', 'python')) +if path.startswith('/opt'): + sys.path.append(path) + +import confluent.client as client + +argparser = optparse.OptionParser(usage="Usage: %prog ") +argparser.add_option('-m', '--maxnodes', type='int', + help='Number of nodes to affect before prompting for confirmation') +argparser.add_option('-p', '--prompt', action='store_true', + help='Prompt for password values interactively') +argparser.add_option('-e', '--environment', action='store_true', + help='Set passwod, but from environment variable of ' + 'same name') + + + +(options, args) = argparser.parse_args() + + +try: + noderange = args[0] + username = args[1] +except IndexError: + argparser.print_help() + sys.exit(1) + +client.check_globbing(noderange) +session = client.Command() +exitcode = 0 + +if options.prompt: + oneval = 1 + twoval = 2 + while oneval != twoval: + oneval = getpass('Enter pass for {0}: '.format(username)) + twoval = getpass('Confirm pass for {0}: '.format(username)) + if oneval != twoval: + print('Values did not match.') + new_password = twoval + +elif len(args) == 3: + if options.environment: + key = args[2] + new_password = os.environ.get(key, os.environ[key.upper()]) + else: + new_password = args[2] +else: + argparser.print_help() + sys.exit(1) + +errorNodes = set([]) +uid_dict = {} +session.stop_if_noderange_over(noderange, options.maxnodes) + +for rsp in session.read('/noderange/{0}/configuration/management_controller/users/all'.format(noderange)): + databynode = rsp["databynode"] + for node in databynode: + if 'error' in rsp['databynode'][node]: + print(node, ':', rsp['databynode'][node]['error']) + errorNodes.add(node) + continue + for user in rsp['databynode'][node]['users']: + if user['username'] == username: + if not user['uid'] in uid_dict: + uid_dict[user['uid']] = node + continue + uid_dict[user['uid']] = uid_dict[user['uid']] + ',{}'.format(node) + break + +if not uid_dict: + print("Error: Could not reach target node's bmc user") + sys.exit(1) + +for uid in uid_dict: + success = session.simple_noderange_command(uid_dict[uid], 'configuration/management_controller/users/{0}'.format(uid), new_password, key='password', errnodes=errorNodes) # = 0 if successful + +allNodes = set([]) + +for node in session.read('/noderange/{0}/nodes/'.format(noderange)): + if 'error' in node and success != 0: + sys.exit(success) + allNodes.add(node['item']['href'].replace("/", "")) + +goodNodes = allNodes - errorNodes + +for node in goodNodes: + print(node + ": Password Change Successful") + + +sys.exit(success) \ No newline at end of file diff --git a/confluent_client/bin/nodeconfig b/confluent_client/bin/nodeconfig index 06d512c7..4d3d17f3 100755 --- a/confluent_client/bin/nodeconfig +++ b/confluent_client/bin/nodeconfig @@ -303,9 +303,14 @@ else: '/noderange/{0}/configuration/management_controller/extended/all'.format(noderange), session, printbmc, options, attrprefix='bmc.') if options.extra: - rcode |= client.print_attrib_path( - '/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange), - session, printextbmc, options) + if options.advanced: + rcode |= client.print_attrib_path( + '/noderange/{0}/configuration/management_controller/extended/extra_advanced'.format(noderange), + session, printextbmc, options) + else: + rcode |= client.print_attrib_path( + '/noderange/{0}/configuration/management_controller/extended/extra'.format(noderange), + session, printextbmc, options) if printsys or options.exclude: if printsys == 'all': printsys = [] diff --git a/confluent_client/bin/nodeconsole b/confluent_client/bin/nodeconsole index 076913be..f05d5783 100755 --- a/confluent_client/bin/nodeconsole +++ b/confluent_client/bin/nodeconsole @@ -243,7 +243,7 @@ if options.windowed: elif 'Height' in line: window_height = int(line.split(':')[1]) elif '-geometry' in line: - l = re.split(' |x|-|\+', line) + l = re.split(' |x|-|\\+', line) l_nosp = [ele for ele in l if ele.strip()] wmxo = int(l_nosp[1]) wmyo = int(l_nosp[2]) diff --git a/confluent_client/bin/nodedeploy b/confluent_client/bin/nodedeploy index 2417f2c5..15e78f37 100755 --- a/confluent_client/bin/nodedeploy +++ b/confluent_client/bin/nodedeploy @@ -81,6 +81,12 @@ def main(args): if not args.profile and args.network: sys.stderr.write('Both noderange and a profile name are required arguments to request a network deployment\n') return 1 + if args.clear and args.profile: + sys.stderr.write( + 'The -c/--clear option should not be used with a profile, ' + 'it is a request to not deploy any profile, and will clear ' + 'whatever the current profile is without being specified\n') + return 1 if extra: sys.stderr.write('Unrecognized arguments: ' + repr(extra) + '\n') c = client.Command() @@ -90,17 +96,6 @@ def main(args): if 'error' in rsp: sys.stderr.write(rsp['error'] + '\n') sys.exit(1) - if not args.clear and args.network and not args.prepareonly: - rc = c.simple_noderange_command(args.noderange, '/boot/nextdevice', 'network', - bootmode='uefi', - persistent=False, - errnodes=errnodes) - if errnodes: - sys.stderr.write( - 'Unable to set boot device for following nodes: {0}\n'.format( - ','.join(errnodes))) - return 1 - rc |= c.simple_noderange_command(args.noderange, '/power/state', 'boot') if args.clear: cleararm(args.noderange, c) clearpending(args.noderange, c) @@ -120,7 +115,7 @@ def main(args): for profname in profnames: sys.stderr.write(' ' + profname + '\n') else: - sys.stderr.write('No deployment profiles available, try osdeploy fiimport or imgutil capture\n') + sys.stderr.write('No deployment profiles available, try osdeploy import or imgutil capture\n') sys.exit(1) armonce(args.noderange, c) setpending(args.noderange, args.profile, c) @@ -166,8 +161,17 @@ def main(args): else: print('{0}: {1}{2}'.format(node, profile, armed)) sys.exit(0) - if args.network and not args.prepareonly: - return rc + if not args.clear and args.network and not args.prepareonly: + rc = c.simple_noderange_command(args.noderange, '/boot/nextdevice', 'network', + bootmode='uefi', + persistent=False, + errnodes=errnodes) + if errnodes: + sys.stderr.write( + 'Unable to set boot device for following nodes: {0}\n'.format( + ','.join(errnodes))) + return 1 + rc |= c.simple_noderange_command(args.noderange, '/power/state', 'boot') return 0 if __name__ == '__main__': diff --git a/confluent_client/bin/nodelist b/confluent_client/bin/nodelist index 462ed922..c1b9c436 100755 --- a/confluent_client/bin/nodelist +++ b/confluent_client/bin/nodelist @@ -68,7 +68,7 @@ def main(): else: elem=(res['item']['href'].replace('/', '')) list.append(elem) - print(options.delim.join(list)) + print(options.delim.join(list)) sys.exit(exitcode) diff --git a/confluent_client/bin/stats b/confluent_client/bin/stats index 7158e7a7..94af75db 100755 --- a/confluent_client/bin/stats +++ b/confluent_client/bin/stats @@ -16,13 +16,10 @@ # limitations under the License. import argparse +import base64 import csv -import fcntl import io import numpy as np - -import os -import subprocess import sys try: @@ -35,7 +32,31 @@ except ImportError: pass -def plot(gui, output, plotdata, bins): +def iterm_draw(data): + databuf = data.getbuffer() + datalen = len(databuf) + data = base64.b64encode(databuf).decode('utf8') + sys.stdout.write( + '\x1b]1337;File=inline=1;size={}:'.format(datalen)) + sys.stdout.write(data) + sys.stdout.write('\a') + sys.stdout.write('\n') + sys.stdout.flush() + + +def kitty_draw(data): + data = base64.b64encode(data.getbuffer()) + while data: + chunk, data = data[:4096], data[4096:] + m = 1 if data else 0 + sys.stdout.write('\x1b_Ga=T,f=100,m={};'.format(m)) + sys.stdout.write(chunk.decode('utf8')) + sys.stdout.write('\x1b\\') + sys.stdout.flush() + sys.stdout.write('\n') + + +def plot(gui, output, plotdata, bins, fmt): import matplotlib as mpl if gui and mpl.get_backend() == 'agg': sys.stderr.write('Error: No GUI backend available and -g specified!\n') @@ -51,8 +72,13 @@ def plot(gui, output, plotdata, bins): tdata = io.BytesIO() plt.savefig(tdata) if not gui and not output: - writer = DumbWriter() - writer.draw(tdata) + if fmt == 'sixel': + writer = DumbWriter() + writer.draw(tdata) + elif fmt == 'kitty': + kitty_draw(tdata) + elif fmt == 'iterm': + iterm_draw(tdata) return n, bins def textplot(plotdata, bins): @@ -81,7 +107,8 @@ histogram = False aparser = argparse.ArgumentParser(description='Quick access to common statistics') aparser.add_argument('-c', type=int, default=0, help='Column number to analyze (default is last column)') aparser.add_argument('-d', default=None, help='Value used to separate columns') -aparser.add_argument('-x', default=False, action='store_true', help='Output histogram in sixel format') +aparser.add_argument('-x', default=False, action='store_true', help='Output histogram in graphical format') +aparser.add_argument('-f', default='sixel', help='Format for histogram output (sixel/iterm/kitty)') aparser.add_argument('-s', default=0, help='Number of header lines to skip before processing') aparser.add_argument('-g', default=False, action='store_true', help='Open histogram in separate graphical window') aparser.add_argument('-o', default=None, help='Output histogram to the specified filename in PNG format') @@ -138,7 +165,7 @@ while data: data = list(csv.reader([data], delimiter=delimiter))[0] n = None if args.g or args.o or args.x: - n, bins = plot(args.g, args.o, plotdata, bins=args.b) + n, bins = plot(args.g, args.o, plotdata, bins=args.b, fmt=args.f) if args.t: n, bins = textplot(plotdata, bins=args.b) print('Samples: {5} Min: {3} Median: {0} Mean: {1} Max: {4} StandardDeviation: {2} Sum: {6}'.format(np.median(plotdata), np.mean(plotdata), np.std(plotdata), np.min(plotdata), np.max(plotdata), len(plotdata), np.sum(plotdata))) diff --git a/confluent_client/confluent/client.py b/confluent_client/confluent/client.py index ad29ff02..a9957b96 100644 --- a/confluent_client/confluent/client.py +++ b/confluent_client/confluent/client.py @@ -595,7 +595,7 @@ def print_attrib_path(path, session, requestargs, options, rename=None, attrpref else: printmissing.add(attr) for missing in printmissing: - sys.stderr.write('Error: {0} not a valid attribute\n'.format(attr)) + sys.stderr.write('Error: {0} not a valid attribute\n'.format(missing)) return exitcode @@ -668,6 +668,9 @@ def updateattrib(session, updateargs, nodetype, noderange, options, dictassign=N for attrib in updateargs[1:]: keydata[attrib] = None for res in session.update(targpath, keydata): + for node in res.get('databynode', {}): + for warnmsg in res['databynode'][node].get('_warnings', []): + sys.stderr.write('Warning: ' + warnmsg + '\n') if 'error' in res: if 'errorcode' in res: exitcode = res['errorcode'] @@ -702,6 +705,14 @@ def updateattrib(session, updateargs, nodetype, noderange, options, dictassign=N noderange, 'attributes/all', dictassign[key], key) else: if "=" in updateargs[1]: + update_ready = True + for arg in updateargs[1:]: + if not '=' in arg: + update_ready = False + exitcode = 1 + if not update_ready: + sys.stderr.write('Error: {0} Can not set and read at the same time!\n'.format(str(updateargs[1:]))) + sys.exit(exitcode) try: for val in updateargs[1:]: val = val.split('=', 1) diff --git a/confluent_client/confluent/textgroup.py b/confluent_client/confluent/textgroup.py index cd35b6fa..e2f0dc7f 100644 --- a/confluent_client/confluent/textgroup.py +++ b/confluent_client/confluent/textgroup.py @@ -98,17 +98,24 @@ class GroupedData(object): self.byoutput = {} self.header = {} self.client = confluentconnection + self.detectedpad = None def generate_byoutput(self): self.byoutput = {} + thepad = self.detectedpad if self.detectedpad else '' for n in self.bynode: - output = '\n'.join(self.bynode[n]) + output = '' + for ln in self.bynode[n]: + output += ln.replace(thepad, '', 1) + '\n' if output not in self.byoutput: self.byoutput[output] = set([n]) else: self.byoutput[output].add(n) def add_line(self, node, line): + wspc = re.search(r'^\s*', line).group() + if self.detectedpad is None or len(wspc) < len(self.detectedpad): + self.detectedpad = wspc if node not in self.bynode: self.bynode[node] = [line] else: @@ -219,4 +226,4 @@ if __name__ == '__main__': if not line: continue groupoutput.add_line(*line.split(': ', 1)) - groupoutput.print_deviants() \ No newline at end of file + groupoutput.print_deviants() diff --git a/confluent_client/confluent_client.spec.tmpl b/confluent_client/confluent_client.spec.tmpl index 820b0bbb..ee786175 100644 --- a/confluent_client/confluent_client.spec.tmpl +++ b/confluent_client/confluent_client.spec.tmpl @@ -1,12 +1,16 @@ %define name confluent_client %define version #VERSION# +%define fversion %{lua: +sv, _ = string.gsub("#VERSION#", "[~+]", "-") +print(sv) +} %define release 1 Summary: Client libraries and utilities for confluent Name: %{name} Version: %{version} Release: %{release} -Source0: %{name}-%{version}.tar.gz +Source0: %{name}-%{fversion}.tar.gz License: Apache2 Group: Development/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot @@ -21,7 +25,7 @@ This package enables python development and command line access to a confluent server. %prep -%setup -n %{name}-%{version} -n %{name}-%{version} +%setup -n %{name}-%{fversion} %build %if "%{dist}" == ".el7" diff --git a/confluent_client/confluent_env.sh b/confluent_client/confluent_env.sh index 81a70198..925a873d 100644 --- a/confluent_client/confluent_env.sh +++ b/confluent_client/confluent_env.sh @@ -153,11 +153,11 @@ _confluent_osimage_completion() { _confluent_get_args if [ $NUMARGS == 2 ]; then - COMPREPLY=($(compgen -W "initialize import updateboot rebase" -- ${COMP_WORDS[COMP_CWORD]})) + COMPREPLY=($(compgen -W "initialize import importcheck updateboot rebase" -- ${COMP_WORDS[COMP_CWORD]})) return elif [ ${CMPARGS[1]} == 'initialize' ]; then COMPREPLY=($(compgen -W "-h -u -s -t -i" -- ${COMP_WORDS[COMP_CWORD]})) - elif [ ${CMPARGS[1]} == 'import' ]; then + elif [ ${CMPARGS[1]} == 'import' ] || [ ${CMPARGS[1]} == 'importcheck' ]; then compopt -o default COMPREPLY=() return diff --git a/confluent_client/doc/man/l2traceroute.ronn b/confluent_client/doc/man/l2traceroute.ronn new file mode 100644 index 00000000..16318567 --- /dev/null +++ b/confluent_client/doc/man/l2traceroute.ronn @@ -0,0 +1,38 @@ +l2traceroute(8) -- returns the layer 2 route through an Ethernet network managed by confluent given 2 end points. +============================== +## SYNOPSIS +`l2traceroute [options] ` + +## DESCRIPTION +**l2traceroute** is a command that returns the layer 2 route for the configered interfaces in nodeattrib. +It can also be used with the -i and -e options to check against specific interfaces on the endpoints. + + +## PREREQUISITES +**l2traceroute** the net..switch attributes have to be set on the end points if endpoint is not a switch + + +## OPTIONS +* ` -e` EFACE, --eface=INTERFACE + interface to check against for the second end point +* ` -i` INTERFACE, --interface=INTERFACE + interface to check against for the first end point +* ` -c` CUMULUS, --cumulus=CUMULUS + return layer 2 route through cumulus switches only +* `-h`, `--help`: + Show help message and exit + + +## EXAMPLES + * Checking route between two nodes: + `# l2traceroute_client n244 n1851` + `n244 to n1851: ['switch114']` + +* Checking route from one node to multiple nodes: + `# l2traceroute_client n244 n1833,n1851` + `n244 to n1833: ['switch114', 'switch7', 'switch32', 'switch253', 'switch85', 'switch72', 'switch21', 'switch2', 'switch96', 'switch103', 'switch115'] + n244 to n1851: ['switch114']` + + + + diff --git a/confluent_client/doc/man/nodeapply.ronn b/confluent_client/doc/man/nodeapply.ronn new file mode 100644 index 00000000..dae35025 --- /dev/null +++ b/confluent_client/doc/man/nodeapply.ronn @@ -0,0 +1,30 @@ +nodeapply(8) -- Execute command on many nodes in a noderange through ssh +========================================================================= + +## SYNOPSIS + +`nodeapply [options] ` + +## DESCRIPTION + +Provides shortcut access to a number of common operations against deployed +nodes. These operations include refreshing ssh certificates and configuration, +rerunning syncflies, and executing specified postscripts. + +## OPTIONS + +* `-k`, `--security` + Refresh SSH configuration (hosts.equiv and node SSH certificates) + +* `-F`, `--sync` + Rerun syncfiles from deployed profile + +* `-P SCRIPTS`, `--scripts=SCRIPTS` + Re-run specified scripts, with full path under scripts specified, e.g. post.d/scriptname,firstboot.d/otherscriptname + +* `-c COUNT`, `-f COUNT`, `--count=COUNT` + Specify the maximum number of instances to run concurrently + +* `-m MAXNODES`, `--maxnodes=MAXNODES` + Specify a maximum number of nodes to run remote ssh command to, prompting + if over the threshold diff --git a/confluent_client/doc/man/nodeattrib.ronn.tmpl b/confluent_client/doc/man/nodeattrib.ronn.tmpl index b1330198..c8127ad8 100644 --- a/confluent_client/doc/man/nodeattrib.ronn.tmpl +++ b/confluent_client/doc/man/nodeattrib.ronn.tmpl @@ -7,8 +7,8 @@ nodeattrib(8) -- List or change confluent nodes attributes `nodeattrib [ ...]` `nodeattrib -c ...` `nodeattrib -e ...` -`nodeattrib -p ...` -`nodeattrib -s ` +`nodeattrib -p ...` +`nodeattrib -s ...` ## DESCRIPTION @@ -54,14 +54,17 @@ to a blank value will allow masking a group defined attribute with an empty valu * `-e`, `--environment`: Set specified attributes based on exported environment variable of matching name. Environment variable names may be lower case or all upper case. - Replace . with _ as needed (e.g. info.note may be specified as either $info_note or $INFO_NOTE + Replace . with _ as needed (e.g. info.note may be specified as either $info_note or $INFO_NOTE) * `-p`, `--prompt`: Request interactive prompting to provide values rather than the command line or environment variables. * `-s`, `--set`: - Set attributes using a batch file + Set attributes using a batch file rather than the command line. The attributes in the batch file + can be specified as one line of key=value pairs simmilar to command line or each attribute can + be in its own line. Lines that start with # sign will be read as a comment. See EXAMPLES for batch + file syntax. * `-m MAXNODES`, `--maxnodes=MAXNODES`: Prompt if trying to set attributes on more than @@ -120,6 +123,25 @@ to a blank value will allow masking a group defined attribute with an empty valu `d1: net.pxe.switch: pxeswitch1` `d1: net.switch:` +* Setting attributes using a batch file with syntax similar to command line: + `# cat nodeattributes.batch` + `# power` + `power.psu1.outlet=3 power.psu1.pdu=pdu2` + `# nodeattrib n41 -s nodeattributes.batch` + `n41: 3` + `n41: pdu2` + +* Setting attributes using a batch file with syntax where each attribute is in its own line: + `# cat nodeattributes.batch` + `# management` + `custom.mgt.switch=switch_main` + `custom.mgt.switch.port=swp4` + `# nodeattrib n41 -s nodeattributes.batch` + `n41: switch_main` + `n41: swp4` + + + ## SEE ALSO nodegroupattrib(8), nodeattribexpressions(5) diff --git a/confluent_client/doc/man/nodebmcpassword.ronn b/confluent_client/doc/man/nodebmcpassword.ronn new file mode 100644 index 00000000..5927f670 --- /dev/null +++ b/confluent_client/doc/man/nodebmcpassword.ronn @@ -0,0 +1,28 @@ +nodebmcpassword(8) -- Change management controller password for a specified user +========================================================= + +## SYNOPSIS + +`nodebmcpassword ` + +## DESCRIPTION + +`nodebmcpassword` allows you to change the management controller password for a user on a specified noderange + +## OPTIONS + +* `-m MAXNODES`, `--maxnodes=MAXNODES`: + Number of nodes to affect before prompting for + confirmation + +* `-h`, `--help`: + Show help message and exit + +## EXAMPLES: + +* Reset the management controller for nodes n1 through n4: + `# nodebmcreset n1-n4` + `n1: Password Change Successful` + `n2: Password Change Successful` + `n3: Password Change Successful` + `n4: Password Change Successful` \ No newline at end of file diff --git a/confluent_osdeploy/buildrpm b/confluent_osdeploy/buildrpm index 6bd1d419..9e9cb582 100755 --- a/confluent_osdeploy/buildrpm +++ b/confluent_osdeploy/buildrpm @@ -1,7 +1,11 @@ VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi sed -e "s/#VERSION#/$VERSION/" confluent_osdeploy.spec.tmpl > confluent_osdeploy.spec cd .. diff --git a/confluent_osdeploy/buildrpm-aarch64 b/confluent_osdeploy/buildrpm-aarch64 index 867c0102..c269284b 100644 --- a/confluent_osdeploy/buildrpm-aarch64 +++ b/confluent_osdeploy/buildrpm-aarch64 @@ -1,7 +1,12 @@ +cd $(dirname $0) VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi sed -e "s/#VERSION#/$VERSION/" confluent_osdeploy-aarch64.spec.tmpl > confluent_osdeploy-aarch64.spec cd .. @@ -29,4 +34,5 @@ mv confluent_el8bin.tar.xz ~/rpmbuild/SOURCES/ mv confluent_el9bin.tar.xz ~/rpmbuild/SOURCES/ rm -rf el9bin rm -rf el8bin -rpmbuild -ba confluent_osdeploy-aarch64.spec +podman run --privileged --rm -v $HOME:/root el8builder rpmbuild -ba /root/confluent/confluent_osdeploy/confluent_osdeploy-aarch64.spec + diff --git a/confluent_osdeploy/common/profile/scripts/confignet b/confluent_osdeploy/common/profile/scripts/confignet index dec1808d..9092f631 100644 --- a/confluent_osdeploy/common/profile/scripts/confignet +++ b/confluent_osdeploy/common/profile/scripts/confignet @@ -86,7 +86,7 @@ def map_idx_to_name(): for line in subprocess.check_output(['ip', 'l']).decode('utf8').splitlines(): if line.startswith(' ') and 'link/' in line: typ = line.split()[0].split('/')[1] - devtype[prevdev] = typ if type != 'ether' else 'ethernet' + devtype[prevdev] = typ if typ != 'ether' else 'ethernet' if line.startswith(' '): continue idx, iface, rst = line.split(':', 2) @@ -151,13 +151,14 @@ class NetplanManager(object): needcfgapply = False for devname in devnames: needcfgwrite = False - if stgs['ipv6_method'] == 'static': + # ipv6_method missing at uconn... + if stgs.get('ipv6_method', None) == 'static': curraddr = stgs['ipv6_address'] currips = self.getcfgarrpath([devname, 'addresses']) if curraddr not in currips: needcfgwrite = True currips.append(curraddr) - if stgs['ipv4_method'] == 'static': + if stgs.get('ipv4_method', None) == 'static': curraddr = stgs['ipv4_address'] currips = self.getcfgarrpath([devname, 'addresses']) if curraddr not in currips: @@ -180,7 +181,7 @@ class NetplanManager(object): if dnsips: currdnsips = self.getcfgarrpath([devname, 'nameservers', 'addresses']) for dnsip in dnsips: - if dnsip not in currdnsips: + if dnsip and dnsip not in currdnsips: needcfgwrite = True currdnsips.append(dnsip) if dnsdomain: @@ -191,8 +192,10 @@ class NetplanManager(object): if needcfgwrite: needcfgapply = True newcfg = {'network': {'version': 2, 'ethernets': {devname: self.cfgbydev[devname]}}} + oumask = os.umask(0o77) with open('/etc/netplan/{0}-confluentcfg.yaml'.format(devname), 'w') as planout: planout.write(yaml.dump(newcfg)) + os.umask(oumask) if needcfgapply: subprocess.call(['netplan', 'apply']) @@ -294,7 +297,8 @@ class WickedManager(object): class NetworkManager(object): - def __init__(self, devtypes): + def __init__(self, devtypes, deploycfg): + self.deploycfg = deploycfg self.connections = {} self.uuidbyname = {} self.uuidbydev = {} @@ -344,7 +348,7 @@ class NetworkManager(object): bondcfg[stg] = deats[stg] if member in self.uuidbyname: subprocess.check_call(['nmcli', 'c', 'del', self.uuidbyname[member]]) - subprocess.check_call(['nmcli', 'c', 'add', 'type', 'team-slave', 'master', team, 'con-name', member, 'connection.interface-name', member]) + subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond-slave', 'master', team, 'con-name', member, 'connection.interface-name', member]) if bondcfg: args = [] for parm in bondcfg: @@ -366,6 +370,20 @@ class NetworkManager(object): cmdargs['ipv4.gateway'] = stgs['ipv4_gateway'] if stgs.get('ipv6_gateway', None): cmdargs['ipv6.gateway'] = stgs['ipv6_gateway'] + dnsips = self.deploycfg.get('nameservers', []) + if not dnsips: + dnsips = [] + dns4 = [] + dns6 = [] + for dnsip in dnsips: + if '.' in dnsip: + dns4.append(dnsip) + elif ':' in dnsip: + dns6.append(dnsip) + if dns4: + cmdargs['ipv4.dns'] = ','.join(dns4) + if dns6: + cmdargs['ipv6.dns'] = ','.join(dns6) if len(cfg['interfaces']) > 1: # team time.. should be.. if not cfg['settings'].get('team_mode', None): sys.stderr.write("Warning, multiple interfaces ({0}) without a team_mode, skipping setup\n".format(','.join(cfg['interfaces']))) @@ -378,26 +396,45 @@ class NetworkManager(object): for arg in cmdargs: cargs.append(arg) cargs.append(cmdargs[arg]) - subprocess.check_call(['nmcli', 'c', 'add', 'type', 'team', 'con-name', cname, 'connection.interface-name', cname, 'team.runner', stgs['team_mode']] + cargs) + if stgs['team_mode'] == 'lacp': + stgs['team_mode'] = '802.3ad' + subprocess.check_call(['nmcli', 'c', 'add', 'type', 'bond', 'con-name', cname, 'connection.interface-name', cname, 'bond.options', 'mode={}'.format(stgs['team_mode'])] + cargs) for iface in cfg['interfaces']: self.add_team_member(cname, iface) subprocess.check_call(['nmcli', 'c', 'u', cname]) else: cname = stgs.get('connection_name', None) iname = list(cfg['interfaces'])[0] - if not cname: - cname = iname + ctype = self.devtypes.get(iname, None) + if not ctype: + sys.stderr.write("Warning, no device found for interface_name ({0}), skipping setup\n".format(iname)) + return + if stgs.get('vlan_id', None): + vlan = stgs['vlan_id'] + if ctype == 'infiniband': + vlan = '0x{0}'.format(vlan) if not vlan.startswith('0x') else vlan + cmdargs['infiniband.parent'] = iname + cmdargs['infiniband.p-key'] = vlan + iname = '{0}.{1}'.format(iname, vlan[2:]) + elif ctype == 'ethernet': + ctype = 'vlan' + cmdargs['vlan.parent'] = iname + cmdargs['vlan.id'] = vlan + iname = '{0}.{1}'.format(iname, vlan) + else: + sys.stderr.write("Warning, unknown interface_name ({0}) device type ({1}) for VLAN/PKEY, skipping setup\n".format(iname, ctype)) + return + cname = iname if not cname else cname u = self.uuidbyname.get(cname, None) cargs = [] for arg in cmdargs: cargs.append(arg) cargs.append(cmdargs[arg]) if u: - cmdargs['connection.interface-name'] = iname - subprocess.check_call(['nmcli', 'c', 'm', u] + cargs) + subprocess.check_call(['nmcli', 'c', 'm', u, 'connection.interface-name', iname] + cargs) subprocess.check_call(['nmcli', 'c', 'u', u]) else: - subprocess.check_call(['nmcli', 'c', 'add', 'type', self.devtypes[iname], 'con-name', cname, 'connection.interface-name', iname] + cargs) + subprocess.check_call(['nmcli', 'c', 'add', 'type', ctype, 'con-name', cname, 'connection.interface-name', iname] + cargs) self.read_connections() u = self.uuidbyname.get(cname, None) if u: @@ -418,6 +455,12 @@ if __name__ == '__main__': srvs, _ = apiclient.scan_confluents() doneidxs = set([]) dc = None + if not srvs: # the multicast scan failed, fallback to deploycfg cfg file + with open('/etc/confluent/confluent.deploycfg', 'r') as dci: + for cfgline in dci.read().split('\n'): + if cfgline.startswith('deploy_server:'): + srvs = [cfgline.split()[1]] + break for srv in srvs: try: s = socket.create_connection((srv, 443)) @@ -435,10 +478,26 @@ if __name__ == '__main__': curridx = addr[-1] if curridx in doneidxs: continue - status, nc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/netcfg') + for tries in (1, 2, 3): + try: + status, nc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/netcfg') + break + except Exception: + if tries == 3: + raise + time.sleep(1) + continue nc = json.loads(nc) if not dc: - status, dc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/deploycfg2') + for tries in (1, 2, 3): + try: + status, dc = apiclient.HTTPSClient(usejson=True, host=srv).grab_url_with_status('/confluent-api/self/deploycfg2') + break + except Exception: + if tries == 3: + raise + time.sleep(1) + continue dc = json.loads(dc) iname = get_interface_name(idxmap[curridx], nc.get('default', {})) if iname: @@ -464,11 +523,13 @@ if __name__ == '__main__': netname_to_interfaces['default']['interfaces'] -= netname_to_interfaces[netn]['interfaces'] if not netname_to_interfaces['default']['interfaces']: del netname_to_interfaces['default'] + # Make sure VLAN/PKEY connections are created last + netname_to_interfaces = dict(sorted(netname_to_interfaces.items(), key=lambda item: 'vlan_id' in item[1]['settings'])) rm_tmp_llas(tmpllas) if os.path.exists('/usr/sbin/netplan'): nm = NetplanManager(dc) if os.path.exists('/usr/bin/nmcli'): - nm = NetworkManager(devtypes) + nm = NetworkManager(devtypes, dc) elif os.path.exists('/usr/sbin/wicked'): nm = WickedManager() for netn in netname_to_interfaces: diff --git a/confluent_osdeploy/common/profile/scripts/sample/consoleredirect b/confluent_osdeploy/common/profile/scripts/sample/consoleredirect new file mode 100644 index 00000000..4ebc3a8f --- /dev/null +++ b/confluent_osdeploy/common/profile/scripts/sample/consoleredirect @@ -0,0 +1,49 @@ +is_suse=false +is_rhel=false + +if test -f /boot/efi/EFI/redhat/grub.cfg; then + grubcfg="/boot/efi/EFI/redhat/grub.cfg" + grub2-mkconfig -o $grubcfg + is_rhel=true +elif test -f /boot/efi/EFI/sle_hpc/grub.cfg; then + grubcfg="/boot/efi/EFI/sle_hpc/grub.cfg" + grub2-mkconfig -o $grubcfg + is_suse=true +else + echo "Expected File missing: Check if os sle_hpc or redhat" + exit +fi + +# working on SUSE +if $is_suse; then + start=false + num_line=0 + lines_to_edit=() + while read line; do + ((num_line++)) + if [[ $line == *"grub_platform"* ]]; then + start=true + fi + if $start; then + if [[ $line != "#"* ]];then + lines_to_edit+=($num_line) + fi + fi + if [[ ${#line} -eq 2 && $line == *"fi" ]]; then + if $start; then + start=false + fi + fi + done < grub_cnf.cfg + + for line_num in "${lines_to_edit[@]}"; do + line_num+="s" + sed -i "${line_num},^,#," $grubcfg + done + sed -i 's,^terminal,#terminal,' $grubcfg +fi + +# Working on Redhat +if $is_rhel; then + sed -i 's,^serial,#serial, ; s,^terminal,#terminal,' $grubcfg +fi \ No newline at end of file diff --git a/confluent_osdeploy/common/profile/scripts/setupssh b/confluent_osdeploy/common/profile/scripts/setupssh index 3fdf0ef5..eb989bb7 100644 --- a/confluent_osdeploy/common/profile/scripts/setupssh +++ b/confluent_osdeploy/common/profile/scripts/setupssh @@ -3,6 +3,9 @@ [ -f /opt/confluent/bin/apiclient ] && confapiclient=/opt/confluent/bin/apiclient [ -f /etc/confluent/apiclient ] && confapiclient=/etc/confluent/apiclient for pubkey in /etc/ssh/ssh_host*key.pub; do + if [ "$pubkey" = /etc/ssh/ssh_host_key.pub ]; then + continue + fi certfile=${pubkey/.pub/-cert.pub} rm $certfile confluentpython $confapiclient /confluent-api/self/sshcert $pubkey -o $certfile diff --git a/confluent_osdeploy/confluent_osdeploy-aarch64.spec.tmpl b/confluent_osdeploy/confluent_osdeploy-aarch64.spec.tmpl index f1b7c804..fb6f6ddc 100644 --- a/confluent_osdeploy/confluent_osdeploy-aarch64.spec.tmpl +++ b/confluent_osdeploy/confluent_osdeploy-aarch64.spec.tmpl @@ -26,7 +26,7 @@ mkdir -p opt/confluent/bin mkdir -p stateless-bin cp -a el8bin/* . ln -s el8 el9 -for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 coreos el9; do +for os in rhvh4 el7 genesis el8 suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9; do mkdir ${os}out cd ${os}out if [ -d ../${os}bin ]; then @@ -76,7 +76,7 @@ cp -a esxi7 esxi8 %install mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/ #cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/ -for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu22.04 esxi6 esxi7 esxi8 coreos; do +for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/ cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs/aarch64/ if [ -d ${os}disklessout ]; then diff --git a/confluent_osdeploy/confluent_osdeploy.spec.tmpl b/confluent_osdeploy/confluent_osdeploy.spec.tmpl index d939a0c3..5faab31f 100644 --- a/confluent_osdeploy/confluent_osdeploy.spec.tmpl +++ b/confluent_osdeploy/confluent_osdeploy.spec.tmpl @@ -28,7 +28,7 @@ This contains support utilities for enabling deployment of x86_64 architecture s #cp start_root urlmount ../stateless-bin/ #cd .. ln -s el8 el9 -for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreos el9; do +for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 ubuntu24.04 coreos el9; do mkdir ${os}out cd ${os}out if [ -d ../${os}bin ]; then @@ -42,7 +42,7 @@ for os in rhvh4 el7 genesis el8 suse15 ubuntu18.04 ubuntu20.04 ubuntu22.04 coreo mv ../addons.cpio . cd .. done -for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04; do +for os in el7 el8 suse15 el9 ubuntu20.04 ubuntu22.04 ubuntu24.04; do mkdir ${os}disklessout cd ${os}disklessout if [ -d ../${os}bin ]; then @@ -78,7 +78,7 @@ cp -a esxi7 esxi8 %install mkdir -p %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/ cp LICENSE %{buildroot}/opt/confluent/share/licenses/confluent_osdeploy/ -for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu18.04 ubuntu22.04 esxi6 esxi7 esxi8 coreos; do +for os in rhvh4 el7 el8 el9 genesis suse15 ubuntu20.04 ubuntu18.04 ubuntu22.04 ubuntu24.04 esxi6 esxi7 esxi8 coreos; do mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs mkdir -p %{buildroot}/opt/confluent/lib/osdeploy/$os/profiles cp ${os}out/addons.* %{buildroot}/opt/confluent/lib/osdeploy/$os/initramfs diff --git a/confluent_osdeploy/el7-diskless/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/el7-diskless/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/el7-diskless/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/el7-diskless/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/el7-diskless/profiles/default/scripts/syncfileclient b/confluent_osdeploy/el7-diskless/profiles/default/scripts/syncfileclient index 8d37d43a..cca0f57d 100644 --- a/confluent_osdeploy/el7-diskless/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/el7-diskless/profiles/default/scripts/syncfileclient @@ -1,4 +1,5 @@ #!/usr/bin/python +import time import importlib import tempfile import json @@ -223,6 +224,7 @@ def synchronize(): if status == 202: lastrsp = '' while status != 204: + time.sleep(2) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') diff --git a/confluent_osdeploy/el7/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/el7/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/el7/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/el7/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/el7/profiles/default/scripts/syncfileclient b/confluent_osdeploy/el7/profiles/default/scripts/syncfileclient index 8d37d43a..02dbcc4d 100644 --- a/confluent_osdeploy/el7/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/el7/profiles/default/scripts/syncfileclient @@ -5,6 +5,7 @@ import json import os import shutil import pwd +import time import grp try: from importlib.machinery import SourceFileLoader @@ -223,6 +224,7 @@ def synchronize(): if status == 202: lastrsp = '' while status != 204: + time.sleep(2) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') diff --git a/confluent_osdeploy/el8-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh b/confluent_osdeploy/el8-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh index b2881e0b..cdcc12fd 100644 --- a/confluent_osdeploy/el8-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh +++ b/confluent_osdeploy/el8-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh @@ -155,7 +155,7 @@ fi ready=0 while [ $ready = "0" ]; do get_remote_apikey - if [[ $confluent_mgr == *:* ]]; then + if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then confluent_mgr="[$confluent_mgr]" fi tmperr=$(mktemp) @@ -189,7 +189,7 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC EOC echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection -linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}') +linktype=$(ip link show dev ${ifname}|grep link/|awk '{print $1}') if [ "$linktype" = link/infiniband ]; then linktype="infiniband" else @@ -324,7 +324,7 @@ fi echo '[proxy]' >> /run/NetworkManager/system-connections/$ifname.nmconnection chmod 600 /run/NetworkManager/system-connections/*.nmconnection confluent_websrv=$confluent_mgr -if [[ $confluent_websrv == *:* ]]; then +if [[ $confluent_websrv == *:* ]] && [[ $confluent_websrv != "["* ]]; then confluent_websrv="[$confluent_websrv]" fi echo -n "Initializing ssh..." diff --git a/confluent_osdeploy/el8-diskless/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/el8-diskless/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/el8-diskless/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/el8-diskless/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/el8-diskless/profiles/default/scripts/image2disk.py b/confluent_osdeploy/el8-diskless/profiles/default/scripts/image2disk.py index aaaca9d4..655aaedc 100644 --- a/confluent_osdeploy/el8-diskless/profiles/default/scripts/image2disk.py +++ b/confluent_osdeploy/el8-diskless/profiles/default/scripts/image2disk.py @@ -10,6 +10,7 @@ import stat import struct import sys import subprocess +import traceback bootuuid = None @@ -426,4 +427,9 @@ def install_to_disk(imgpath): if __name__ == '__main__': - install_to_disk(os.environ['mountsrc']) + try: + install_to_disk(os.environ['mountsrc']) + except Exception: + traceback.print_exc() + time.sleep(86400) + raise diff --git a/confluent_osdeploy/el8-diskless/profiles/default/scripts/imageboot.sh b/confluent_osdeploy/el8-diskless/profiles/default/scripts/imageboot.sh index ee2a8125..fe53bf38 100644 --- a/confluent_osdeploy/el8-diskless/profiles/default/scripts/imageboot.sh +++ b/confluent_osdeploy/el8-diskless/profiles/default/scripts/imageboot.sh @@ -1,6 +1,6 @@ . /lib/dracut-lib.sh confluent_whost=$confluent_mgr -if [[ "$confluent_whost" == *:* ]]; then +if [[ "$confluent_whost" == *:* ]] && [[ "$confluent_whost" != "["* ]]; then confluent_whost="[$confluent_mgr]" fi mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay diff --git a/confluent_osdeploy/el8-diskless/profiles/default/scripts/onboot.sh b/confluent_osdeploy/el8-diskless/profiles/default/scripts/onboot.sh index 3c99ad12..b2c0d1b3 100644 --- a/confluent_osdeploy/el8-diskless/profiles/default/scripts/onboot.sh +++ b/confluent_osdeploy/el8-diskless/profiles/default/scripts/onboot.sh @@ -16,6 +16,7 @@ if [ -z "$confluent_mgr" ]; then fi confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}') +hostnamectl set-hostname $nodename export nodename confluent_mgr confluent_profile . /etc/confluent/functions mkdir -p /var/log/confluent diff --git a/confluent_osdeploy/el8-diskless/profiles/default/scripts/syncfileclient b/confluent_osdeploy/el8-diskless/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/el8-diskless/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/el8-diskless/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/cmdline/01-confluent.sh b/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/cmdline/01-confluent.sh index f441504e..bc327610 100644 --- a/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/cmdline/01-confluent.sh +++ b/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/cmdline/01-confluent.sh @@ -7,8 +7,8 @@ if ! grep console= /proc/cmdline >& /dev/null; then if [ -n "$autocons" ]; then echo console=$autocons |sed -e 's!/dev/!!' >> /tmp/01-autocons.conf autocons=${autocons%,*} - echo $autocons > /tmp/01-autocons.devnode - echo "Detected firmware specified console at $(cat /tmp/01-autocons.conf)" > $autocons + echo $autocons > /tmp/01-autocons.devnode + echo "Detected firmware specified console at $(cat /tmp/01-autocons.conf)" > $autocons echo "Initializing auto detected console when installer starts" > $autocons fi fi @@ -16,4 +16,5 @@ if grep console=ttyS /proc/cmdline >& /dev/null; then echo "Serial console has been requested in the kernel arguments, the local video may not show progress" > /dev/tty1 fi . /lib/anaconda-lib.sh +echo rd.fcoe=0 > /etc/cmdline.d/nofcoe.conf wait_for_kickstart diff --git a/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/pre-trigger/01-confluent.sh b/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/pre-trigger/01-confluent.sh index 6db95276..f8b576a2 100644 --- a/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/pre-trigger/01-confluent.sh +++ b/confluent_osdeploy/el8/initramfs/usr/lib/dracut/hooks/pre-trigger/01-confluent.sh @@ -2,6 +2,15 @@ [ -e /tmp/confluent.initq ] && return 0 . /lib/dracut-lib.sh setsid sh -c 'exec bash <> /dev/tty2 >&0 2>&1' & +if [ -f /tmp/dd_disk ]; then + for dd in $(cat /tmp/dd_disk); do + if [ -e $dd ]; then + driver-updates --disk $dd $dd + rm $dd + fi + done + rm /tmp/dd_disk +fi udevadm trigger udevadm trigger --type=devices --action=add udevadm settle @@ -20,13 +29,6 @@ function confluentpython() { /usr/bin/python2 $* fi } -if [ -f /tmp/dd_disk ]; then - for dd in $(cat /tmp/dd_disk); do - if [ -e $dd ]; then - driver-updates --disk $dd $dd - fi - done -fi vlaninfo=$(getarg vlan) if [ ! -z "$vlaninfo" ]; then vldev=${vlaninfo#*:} @@ -61,43 +63,52 @@ if [ -e /dev/disk/by-label/CNFLNT_IDNT ]; then udevadm info $i | grep ID_NET_DRIVER=cdc_ether > /dev/null && continue ip link set $(basename $i) up done - for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do - if [ "$autoconfigmethod" = "dhcp" ]; then - /usr/libexec/nm-initrd-generator ip=$NICGUESS:dhcp - else - v4addr=$(grep ^ipv4_address: $tcfg) - v4addr=${v4addr#ipv4_address: } - v4plen=${v4addr#*/} - v4addr=${v4addr%/*} - v4gw=$(grep ^ipv4_gateway: $tcfg) - v4gw=${v4gw#ipv4_gateway: } - ip addr add dev $NICGUESS $v4addr/$v4plen - if [ "$v4gw" = "null" ]; then - v4gw="" - fi - if [ ! -z "$v4gw" ]; then - ip route add default via $v4gw - fi - v4nm=$(grep ipv4_netmask: $tcfg) - v4nm=${v4nm#ipv4_netmask: } - DETECTED=0 - for dsrv in $deploysrvs; do - if curl --capath /tls/ -s --connect-timeout 3 https://$dsrv/confluent-public/ > /dev/null; then - rm /run/NetworkManager/system-connections/* - /usr/libexec/nm-initrd-generator ip=$v4addr::$v4gw:$v4nm:$hostname:$NICGUESS:none - DETECTED=1 - ifname=$NICGUESS + TRIES=30 + DETECTED=0 + while [ "$DETECTED" = 0 ] && [ $TRIES -gt 0 ]; do + TRIES=$((TRIES - 1)) + for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do + if [ "$autoconfigmethod" = "dhcp" ]; then + /usr/libexec/nm-initrd-generator ip=$NICGUESS:dhcp + else + v4addr=$(grep ^ipv4_address: $tcfg) + v4addr=${v4addr#ipv4_address: } + v4plen=${v4addr#*/} + v4addr=${v4addr%/*} + v4gw=$(grep ^ipv4_gateway: $tcfg) + v4gw=${v4gw#ipv4_gateway: } + ip addr add dev $NICGUESS $v4addr/$v4plen + if [ "$v4gw" = "null" ]; then + v4gw="" + fi + if [ ! -z "$v4gw" ]; then + ip route add default via $v4gw + fi + v4nm=$(grep ipv4_netmask: $tcfg) + v4nm=${v4nm#ipv4_netmask: } + DETECTED=0 + for dsrv in $deploysrvs; do + if curl --capath /tls/ -s --connect-timeout 3 https://$dsrv/confluent-public/ > /dev/null; then + rm /run/NetworkManager/system-connections/* + /usr/libexec/nm-initrd-generator ip=$v4addr::$v4gw:$v4nm:$hostname:$NICGUESS:none + DETECTED=1 + ifname=$NICGUESS + break + fi + done + if [ ! -z "$v4gw" ]; then + ip route del default via $v4gw + fi + ip addr flush dev $NICGUESS + if [ $DETECTED = 1 ]; then break fi - done - if [ ! -z "$v4gw" ]; then - ip route del default via $v4gw fi - ip addr flush dev $NICGUESS - if [ $DETECTED = 1 ]; then - break - fi - fi + done + done + for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do + ip addr flush dev $NICGUESS + ip link set $NICGUESS down done NetworkManager --configure-and-quit=initrd --no-daemon hmackeyfile=/tmp/cnflnthmackeytmp @@ -175,7 +186,7 @@ if [ ! -z "$autocons" ]; then errout="-e $autocons" fi while ! confluentpython /opt/confluent/bin/apiclient $errout /confluent-api/self/deploycfg2 > /etc/confluent/confluent.deploycfg; do - sleep 10 + sleep 10 done ifidx=$(cat /tmp/confluent.ifidx 2> /dev/null) if [ -z "$ifname" ]; then @@ -216,23 +227,38 @@ proto=${proto#protocol: } textconsole=$(grep ^textconsole: /etc/confluent/confluent.deploycfg) textconsole=${textconsole#textconsole: } if [ "$textconsole" = "true" ] && ! grep console= /proc/cmdline > /dev/null; then - autocons=$(cat /tmp/01-autocons.devnode) - if [ ! -z "$autocons" ]; then - echo Auto-configuring installed system to use text console - echo Auto-configuring installed system to use text console > $autocons + autocons=$(cat /tmp/01-autocons.devnode) + if [ ! -z "$autocons" ]; then + echo Auto-configuring installed system to use text console + echo Auto-configuring installed system to use text console > $autocons /opt/confluent/bin/autocons -c > /dev/null - cp /tmp/01-autocons.conf /etc/cmdline.d/ - else - echo "Unable to automatically detect requested text console" - fi + cp /tmp/01-autocons.conf /etc/cmdline.d/ + else + echo "Unable to automatically detect requested text console" + fi fi -echo inst.repo=$proto://$mgr/confluent-public/os/$profilename/distribution >> /etc/cmdline.d/01-confluent.conf +. /etc/os-release +if [ "$ID" = "dracut" ]; then + ID=$(echo $PRETTY_NAME|awk '{print $1}') + VERSION_ID=$(echo $VERSION|awk '{print $1}') + if [ "$ID" = "Oracle" ]; then + ID=OL + elif [ "$ID" = "Red" ]; then + ID=RHEL + fi +fi +ISOSRC=$(blkid -t TYPE=iso9660|grep -Ei ' LABEL="'$ID-$VERSION_ID|sed -e s/:.*//) +if [ -z "$ISOSRC" ]; then + echo inst.repo=$proto://$mgr/confluent-public/os/$profilename/distribution >> /etc/cmdline.d/01-confluent.conf + root=anaconda-net:$proto://$mgr/confluent-public/os/$profilename/distribution + export root +else + echo inst.repo=cdrom:$ISOSRC >> /etc/cmdline.d/01-confluent.conf +fi echo inst.ks=$proto://$mgr/confluent-public/os/$profilename/kickstart >> /etc/cmdline.d/01-confluent.conf kickstart=$proto://$mgr/confluent-public/os/$profilename/kickstart -root=anaconda-net:$proto://$mgr/confluent-public/os/$profilename/distribution export kickstart -export root autoconfigmethod=$(grep ipv4_method /etc/confluent/confluent.deploycfg) autoconfigmethod=${autoconfigmethod#ipv4_method: } if [ "$autoconfigmethod" = "dhcp" ]; then @@ -312,4 +338,8 @@ if [ -e /lib/nm-lib.sh ]; then fi fi fi +for NICGUESS in $(ip link|grep LOWER_UP|grep -v LOOPBACK| awk '{print $2}' | sed -e 's/:$//'); do + ip addr flush dev $NICGUESS + ip link set $NICGUESS down +done diff --git a/confluent_osdeploy/el8/profiles/default/scripts/add_local_repositories b/confluent_osdeploy/el8/profiles/default/scripts/add_local_repositories index fb26d5ef..79b0b6c5 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/add_local_repositories +++ b/confluent_osdeploy/el8/profiles/default/scripts/add_local_repositories @@ -27,7 +27,7 @@ with open('/etc/confluent/confluent.deploycfg') as dplcfgfile: _, profile = line.split(' ', 1) if line.startswith('ipv4_method: '): _, v4cfg = line.split(' ', 1) -if v4cfg == 'static' or v4cfg =='dhcp': +if v4cfg == 'static' or v4cfg =='dhcp' or not server6: server = server4 if not server: server = '[{}]'.format(server6) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/el8/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/el8/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh index 4d76aaa3..880d22ac 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/pre.sh @@ -90,8 +90,14 @@ touch /tmp/cryptpkglist touch /tmp/pkglist touch /tmp/addonpackages if [ "$cryptboot" == "tpm2" ]; then - LUKSPARTY="--encrypted --passphrase=$(cat /etc/confluent/confluent.apikey)" - echo $cryptboot >> /tmp/cryptboot + lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null) + if [ -z "$lukspass" ]; then + lukspass=$(python3 -c 'import os;import base64;print(base64.b64encode(os.urandom(66)).decode())') + fi + echo $lukspass > /etc/confluent/luks.key + chmod 000 /etc/confluent/luks.key + LUKSPARTY="--encrypted --passphrase=$lukspass" + echo $cryptboot >> /tmp/cryptboot echo clevis-dracut >> /tmp/cryptpkglist fi @@ -114,8 +120,8 @@ confluentpython /etc/confluent/apiclient /confluent-public/os/$confluent_profile grep '^%include /tmp/partitioning' /tmp/kickstart.* > /dev/null || rm /tmp/installdisk if [ -e /tmp/installdisk -a ! -e /tmp/partitioning ]; then INSTALLDISK=$(cat /tmp/installdisk) - sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e s/%%LUKSHOOK%%/$LUKSPARTY/ /tmp/partitioning.template > /tmp/partitioning - dd if=/dev/zero of=/dev/$(cat /tmp/installdisk) bs=1M count=1 >& /dev/null + sed -e s/%%INSTALLDISK%%/$INSTALLDISK/ -e "s!%%LUKSHOOK%%!$LUKSPARTY!" /tmp/partitioning.template > /tmp/partitioning vgchange -a n >& /dev/null + wipefs -a -f /dev/$INSTALLDISK >& /dev/null fi kill $logshowpid diff --git a/confluent_osdeploy/el8/profiles/default/scripts/setupssh.sh b/confluent_osdeploy/el8/profiles/default/scripts/setupssh.sh index f06c4d61..bc74faf5 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/setupssh.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/setupssh.sh @@ -1,8 +1,12 @@ #!/bin/sh -grep HostCert /etc/ssh/sshd_config.anaconda >> /mnt/sysimage/etc/ssh/sshd_config -echo HostbasedAuthentication yes >> /mnt/sysimage/etc/ssh/sshd_config -echo HostbasedUsesNameFromPacketOnly yes >> /mnt/sysimage/etc/ssh/sshd_config -echo IgnoreRhosts no >> /mnt/sysimage/etc/ssh/sshd_config +targssh=/mnt/sysimage/etc/ssh/sshd_config +if [ -d /mnt/sysimage/etc/ssh/sshd_config.d/ ]; then + targssh=/mnt/sysimage/etc/ssh/sshd_config.d/90-confluent.conf +fi +grep HostCert /etc/ssh/sshd_config.anaconda >> $targssh +echo HostbasedAuthentication yes >> $targssh +echo HostbasedUsesNameFromPacketOnly yes >> $targssh +echo IgnoreRhosts no >> $targssh sshconf=/mnt/sysimage/etc/ssh/ssh_config if [ -d /mnt/sysimage/etc/ssh/ssh_config.d/ ]; then sshconf=/mnt/sysimage/etc/ssh/ssh_config.d/01-confluent.conf diff --git a/confluent_osdeploy/el8/profiles/default/scripts/syncfileclient b/confluent_osdeploy/el8/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/el8/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh index df9c857f..359c46f6 100644 --- a/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh +++ b/confluent_osdeploy/el8/profiles/default/scripts/tpm_luks.sh @@ -1,4 +1,5 @@ #!/bin/sh cryptdisk=$(blkid -t TYPE="crypto_LUKS"|sed -e s/:.*//) -clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/confluent.apikey -cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey +clevis luks bind -f -d $cryptdisk -k - tpm2 '{}' < /etc/confluent/luks.key +chmod 000 /etc/confluent/luks.key +#cryptsetup luksRemoveKey $cryptdisk < /etc/confluent/confluent.apikey diff --git a/confluent_osdeploy/el9-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh b/confluent_osdeploy/el9-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh index 4fca92cf..9b885e82 100644 --- a/confluent_osdeploy/el9-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh +++ b/confluent_osdeploy/el9-diskless/initramfs/usr/lib/dracut/hooks/cmdline/10-confluentdiskless.sh @@ -120,7 +120,7 @@ fi ready=0 while [ $ready = "0" ]; do get_remote_apikey - if [[ $confluent_mgr == *:* ]]; then + if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then confluent_mgr="[$confluent_mgr]" fi tmperr=$(mktemp) @@ -154,7 +154,7 @@ cat > /run/NetworkManager/system-connections/$ifname.nmconnection << EOC EOC echo id=${ifname} >> /run/NetworkManager/system-connections/$ifname.nmconnection echo uuid=$(uuidgen) >> /run/NetworkManager/system-connections/$ifname.nmconnection -linktype=$(ip link |grep -A2 ${ifname}|tail -n 1|awk '{print $1}') +linktype=$(ip link show dev ${ifname}|grep link/|awk '{print $1}') if [ "$linktype" = link/infiniband ]; then linktype="infiniband" else @@ -171,6 +171,13 @@ permissions= wait-device-timeout=60000 EOC +if [ "$linktype" = infiniband ]; then +cat >> /run/NetworkManager/system-connections/$ifname.nmconnection << EOC +[infiniband] +transport-mode=datagram + +EOC +fi autoconfigmethod=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg |awk '{print $2}') auto6configmethod=$(grep ^ipv6_method: /etc/confluent/confluent.deploycfg |awk '{print $2}') if [ "$autoconfigmethod" = "dhcp" ]; then @@ -281,7 +288,7 @@ fi echo '[proxy]' >> /run/NetworkManager/system-connections/$ifname.nmconnection chmod 600 /run/NetworkManager/system-connections/*.nmconnection confluent_websrv=$confluent_mgr -if [[ $confluent_websrv == *:* ]]; then +if [[ $confluent_websrv == *:* ]] && [[ $confluent_websrv != "["* ]]; then confluent_websrv="[$confluent_websrv]" fi echo -n "Initializing ssh..." diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/firstboot.sh b/confluent_osdeploy/el9-diskless/profiles/default/scripts/firstboot.sh index 2bab4136..fabb9385 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/firstboot.sh +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/firstboot.sh @@ -9,9 +9,16 @@ HOME=$(getent passwd $(whoami)|cut -d: -f 6) export HOME nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}') confluent_apikey=$(cat /etc/confluent/confluent.apikey) -confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}') +confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}') +if [ -z "$confluent_mgr" ] || [ "$confluent_mgr" == "null" ] || ! ping -c 1 $confluent_mgr >& /dev/null; then + confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}') +fi +confluent_websrv=$confluent_mgr +if [[ "$confluent_mgr" == *:* ]]; then + confluent_websrv="[$confluent_mgr]" +fi confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') -export nodename confluent_mgr confluent_profile +export nodename confluent_mgr confluent_profile confluent_websrv . /etc/confluent/functions ( exec >> /var/log/confluent/confluent-firstboot.log @@ -34,7 +41,7 @@ if [ ! -f /etc/confluent/firstboot.ran ]; then run_remote_config firstboot.d fi -curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus +curl -X POST -d 'status: complete' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_websrv/confluent-api/self/updatestatus systemctl disable firstboot rm /etc/systemd/system/firstboot.service rm /etc/confluent/firstboot.ran diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/el9-diskless/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/image2disk.py b/confluent_osdeploy/el9-diskless/profiles/default/scripts/image2disk.py index 7b312a93..6a924964 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/image2disk.py +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/image2disk.py @@ -10,8 +10,16 @@ import stat import struct import sys import subprocess +import traceback bootuuid = None +vgname = 'localstorage' +oldvgname = None + +def convert_lv(oldlvname): + if oldvgname is None: + return None + return oldlvname.replace(oldvgname, vgname) def get_partname(devname, idx): if devname[-1] in '0123456789': @@ -53,6 +61,8 @@ def get_image_metadata(imgpath): header = img.read(16) if header == b'\x63\x7b\x9d\x26\xb7\xfd\x48\x30\x89\xf9\x11\xcf\x18\xfd\xff\xa1': for md in get_multipart_image_meta(img): + if md.get('device', '').startswith('/dev/zram'): + continue yield md else: raise Exception('Installation from single part image not supported') @@ -86,14 +96,14 @@ def fixup(rootdir, vols): if tab.startswith('#ORIGFSTAB#'): if entry[1] in devbymount: targetdev = devbymount[entry[1]] - if targetdev.startswith('/dev/localstorage/'): + if targetdev.startswith('/dev/{}/'.format(vgname)): entry[0] = targetdev else: uuid = subprocess.check_output(['blkid', '-s', 'UUID', '-o', 'value', targetdev]).decode('utf8') uuid = uuid.strip() entry[0] = 'UUID={}'.format(uuid) elif entry[2] == 'swap': - entry[0] = '/dev/mapper/localstorage-swap' + entry[0] = '/dev/mapper/{}-swap'.format(vgname.replace('-', '--')) entry[0] = entry[0].ljust(42) entry[1] = entry[1].ljust(16) entry[3] = entry[3].ljust(28) @@ -141,6 +151,46 @@ def fixup(rootdir, vols): grubsyscfg = os.path.join(rootdir, 'etc/sysconfig/grub') if not os.path.exists(grubsyscfg): grubsyscfg = os.path.join(rootdir, 'etc/default/grub') + kcmdline = os.path.join(rootdir, 'etc/kernel/cmdline') + if os.path.exists(kcmdline): + with open(kcmdline) as kcmdlinein: + kcmdlinecontent = kcmdlinein.read() + newkcmdlineent = [] + for ent in kcmdlinecontent.split(): + if ent.startswith('resume='): + newkcmdlineent.append('resume={}'.format(newswapdev)) + elif ent.startswith('root='): + newkcmdlineent.append('root={}'.format(newrootdev)) + elif ent.startswith('rd.lvm.lv='): + ent = convert_lv(ent) + if ent: + newkcmdlineent.append(ent) + else: + newkcmdlineent.append(ent) + with open(kcmdline, 'w') as kcmdlineout: + kcmdlineout.write(' '.join(newkcmdlineent) + '\n') + for loadent in glob.glob(os.path.join(rootdir, 'boot/loader/entries/*.conf')): + with open(loadent) as loadentin: + currentry = loadentin.read().split('\n') + with open(loadent, 'w') as loadentout: + for cfgline in currentry: + cfgparts = cfgline.split() + if not cfgparts or cfgparts[0] != 'options': + loadentout.write(cfgline + '\n') + continue + newcfgparts = [cfgparts[0]] + for cfgpart in cfgparts[1:]: + if cfgpart.startswith('root='): + newcfgparts.append('root={}'.format(newrootdev)) + elif cfgpart.startswith('resume='): + newcfgparts.append('resume={}'.format(newswapdev)) + elif cfgpart.startswith('rd.lvm.lv='): + cfgpart = convert_lv(cfgpart) + if cfgpart: + newcfgparts.append(cfgpart) + else: + newcfgparts.append(cfgpart) + loadentout.write(' '.join(newcfgparts) + '\n') with open(grubsyscfg) as defgrubin: defgrub = defgrubin.read().split('\n') with open(grubsyscfg, 'w') as defgrubout: @@ -148,9 +198,18 @@ def fixup(rootdir, vols): gline = gline.split() newline = [] for ent in gline: - if ent.startswith('resume=') or ent.startswith('rd.lvm.lv'): - continue - newline.append(ent) + if ent.startswith('resume='): + newline.append('resume={}'.format(newswapdev)) + elif ent.startswith('root='): + newline.append('root={}'.format(newrootdev)) + elif ent.startswith('rd.lvm.lv='): + ent = convert_lv(ent) + if ent: + newline.append(ent) + elif '""' in ent: + newline.append('""') + else: + newline.append(ent) defgrubout.write(' '.join(newline) + '\n') grubcfg = subprocess.check_output(['find', os.path.join(rootdir, 'boot'), '-name', 'grub.cfg']).decode('utf8').strip().replace(rootdir, '/').replace('//', '/') grubcfg = grubcfg.split('\n') @@ -227,8 +286,14 @@ def had_swap(): return True return False +newrootdev = None +newswapdev = None def install_to_disk(imgpath): global bootuuid + global newrootdev + global newswapdev + global vgname + global oldvgname lvmvols = {} deftotsize = 0 mintotsize = 0 @@ -260,6 +325,13 @@ def install_to_disk(imgpath): biggestfs = fs biggestsize = fs['initsize'] if fs['device'].startswith('/dev/mapper'): + oldvgname = fs['device'].rsplit('/', 1)[-1] + # if node has - then /dev/mapper will double up the hypen + if '_' in oldvgname and '-' in oldvgname.split('_')[-1]: + oldvgname = oldvgname.rsplit('-', 1)[0].replace('--', '-') + osname = oldvgname.split('_')[0] + nodename = socket.gethostname().split('.')[0] + vgname = '{}_{}'.format(osname, nodename) lvmvols[fs['device'].replace('/dev/mapper/', '')] = fs deflvmsize += fs['initsize'] minlvmsize += fs['minsize'] @@ -304,6 +376,8 @@ def install_to_disk(imgpath): end = sectors parted.run('mkpart primary {}s {}s'.format(curroffset, end)) vol['targetdisk'] = get_partname(instdisk, volidx) + if vol['mount'] == '/': + newrootdev = vol['targetdisk'] curroffset += size + 1 if not lvmvols: if swapsize: @@ -313,13 +387,14 @@ def install_to_disk(imgpath): if end > sectors: end = sectors parted.run('mkpart swap {}s {}s'.format(curroffset, end)) - subprocess.check_call(['mkswap', get_partname(instdisk, volidx + 1)]) + newswapdev = get_partname(instdisk, volidx + 1) + subprocess.check_call(['mkswap', newswapdev]) else: parted.run('mkpart lvm {}s 100%'.format(curroffset)) lvmpart = get_partname(instdisk, volidx + 1) subprocess.check_call(['pvcreate', '-ff', '-y', lvmpart]) - subprocess.check_call(['vgcreate', 'localstorage', lvmpart]) - vginfo = subprocess.check_output(['vgdisplay', 'localstorage', '--units', 'b']).decode('utf8') + subprocess.check_call(['vgcreate', vgname, lvmpart]) + vginfo = subprocess.check_output(['vgdisplay', vgname, '--units', 'b']).decode('utf8') vginfo = vginfo.split('\n') pesize = 0 pes = 0 @@ -346,13 +421,17 @@ def install_to_disk(imgpath): extents += 1 if vol['mount'] == '/': lvname = 'root' + else: lvname = vol['mount'].replace('/', '_') - subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, 'localstorage']) - vol['targetdisk'] = '/dev/localstorage/{}'.format(lvname) + subprocess.check_call(['lvcreate', '-l', '{}'.format(extents), '-y', '-n', lvname, vgname]) + vol['targetdisk'] = '/dev/{}/{}'.format(vgname, lvname) + if vol['mount'] == '/': + newrootdev = vol['targetdisk'] if swapsize: - subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', 'localstorage']) - subprocess.check_call(['mkswap', '/dev/localstorage/swap']) + subprocess.check_call(['lvcreate', '-y', '-l', '{}'.format(swapsize // pesize), '-n', 'swap', vgname]) + subprocess.check_call(['mkswap', '/dev/{}/swap'.format(vgname)]) + newswapdev = '/dev/{}/swap'.format(vgname) os.makedirs('/run/imginst/targ') for vol in allvols: with open(vol['targetdisk'], 'wb') as partition: @@ -426,4 +505,9 @@ def install_to_disk(imgpath): if __name__ == '__main__': - install_to_disk(os.environ['mountsrc']) + try: + install_to_disk(os.environ['mountsrc']) + except Exception: + traceback.print_exc() + time.sleep(86400) + raise diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/imageboot.sh b/confluent_osdeploy/el9-diskless/profiles/default/scripts/imageboot.sh index ee2a8125..fe53bf38 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/imageboot.sh +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/imageboot.sh @@ -1,6 +1,6 @@ . /lib/dracut-lib.sh confluent_whost=$confluent_mgr -if [[ "$confluent_whost" == *:* ]]; then +if [[ "$confluent_whost" == *:* ]] && [[ "$confluent_whost" != "["* ]]; then confluent_whost="[$confluent_mgr]" fi mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/installimage b/confluent_osdeploy/el9-diskless/profiles/default/scripts/installimage index 2e791ce6..56597086 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/installimage +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/installimage @@ -30,6 +30,7 @@ if [ ! -f /sysroot/tmp/installdisk ]; then done fi lvm vgchange -a n +/sysroot/usr/sbin/wipefs -a /dev/$(cat /sysroot/tmp/installdisk) udevadm control -e if [ -f /sysroot/etc/lvm/devices/system.devices ]; then rm /sysroot/etc/lvm/devices/system.devices diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/onboot.sh b/confluent_osdeploy/el9-diskless/profiles/default/scripts/onboot.sh index 3c99ad12..b2c0d1b3 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/onboot.sh +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/onboot.sh @@ -16,6 +16,7 @@ if [ -z "$confluent_mgr" ]; then fi confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}') +hostnamectl set-hostname $nodename export nodename confluent_mgr confluent_profile . /etc/confluent/functions mkdir -p /var/log/confluent diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/post.sh b/confluent_osdeploy/el9-diskless/profiles/default/scripts/post.sh index 3a52d128..7a7ac01e 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/post.sh @@ -5,9 +5,16 @@ nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}') confluent_apikey=$(cat /etc/confluent/confluent.apikey) -confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}') confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') -export nodename confluent_mgr confluent_profile +confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}') +if [ -z "$confluent_mgr" ] || [ "$confluent_mgr" == "null" ] || ! ping -c 1 $confluent_mgr >& /dev/null; then + confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}') +fi +confluent_websrv=$confluent_mgr +if [[ "$confluent_mgr" == *:* ]]; then + confluent_websrv="[$confluent_mgr]" +fi +export nodename confluent_mgr confluent_profile confluent_websrv . /etc/confluent/functions mkdir -p /var/log/confluent chmod 700 /var/log/confluent @@ -16,9 +23,9 @@ exec 2>> /var/log/confluent/confluent-post.log chmod 600 /var/log/confluent/confluent-post.log tail -f /var/log/confluent/confluent-post.log > /dev/console & logshowpid=$! -curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service +curl -f https://$confluent_websrv/confluent-public/os/$confluent_profile/scripts/firstboot.service > /etc/systemd/system/firstboot.service mkdir -p /opt/confluent/bin -curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh +curl -f https://$confluent_websrv/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /opt/confluent/bin/firstboot.sh chmod +x /opt/confluent/bin/firstboot.sh systemctl enable firstboot selinuxpolicy=$(grep ^SELINUXTYPE /etc/selinux/config |awk -F= '{print $2}') @@ -33,7 +40,7 @@ run_remote_parts post.d # Induce execution of remote configuration, e.g. ansible plays in ansible/post.d/ run_remote_config post.d -curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus +curl -sf -X POST -d 'status: staged' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_websrv/confluent-api/self/updatestatus kill $logshowpid diff --git a/confluent_osdeploy/el9-diskless/profiles/default/scripts/syncfileclient b/confluent_osdeploy/el9-diskless/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/el9-diskless/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/el9-diskless/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/esxi7/initramfs/bin/dcuiweasel b/confluent_osdeploy/esxi7/initramfs/bin/dcuiweasel index 74d47104..f88c730d 100644 --- a/confluent_osdeploy/esxi7/initramfs/bin/dcuiweasel +++ b/confluent_osdeploy/esxi7/initramfs/bin/dcuiweasel @@ -116,4 +116,5 @@ profile=$(grep ^profile: /etc/confluent/confluent.deploycfg.new | sed -e 's/^pro mv /etc/confluent/confluent.deploycfg.new /etc/confluent/confluent.deploycfg export node mgr profile . /tmp/modinstall +localcli network firewall load exec /bin/install diff --git a/confluent_osdeploy/genesis/initramfs/opt/confluent/bin/rungenesis b/confluent_osdeploy/genesis/initramfs/opt/confluent/bin/rungenesis index b7035fe0..ebf0a380 100644 --- a/confluent_osdeploy/genesis/initramfs/opt/confluent/bin/rungenesis +++ b/confluent_osdeploy/genesis/initramfs/opt/confluent/bin/rungenesis @@ -174,6 +174,8 @@ dnsdomain=${dnsdomain#dnsdomain: } echo search $dnsdomain >> /etc/resolv.conf echo -n "Initializing ssh..." ssh-keygen -A +mkdir -p /usr/share/empty.sshd +rm /etc/ssh/ssh_host_dsa_key* for pubkey in /etc/ssh/ssh_host*key.pub; do certfile=${pubkey/.pub/-cert.pub} privfile=${pubkey%.pub} diff --git a/confluent_osdeploy/genesis/profiles/default/scripts/syncfileclient b/confluent_osdeploy/genesis/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/genesis/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/genesis/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/rhvh4/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/rhvh4/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/rhvh4/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/rhvh4/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/suse15-diskless/initramfs/lib/dracut/hooks/cmdline/10-confluentdiskless.sh b/confluent_osdeploy/suse15-diskless/initramfs/lib/dracut/hooks/cmdline/10-confluentdiskless.sh index 146c4797..5586978c 100644 --- a/confluent_osdeploy/suse15-diskless/initramfs/lib/dracut/hooks/cmdline/10-confluentdiskless.sh +++ b/confluent_osdeploy/suse15-diskless/initramfs/lib/dracut/hooks/cmdline/10-confluentdiskless.sh @@ -116,7 +116,7 @@ fi ready=0 while [ $ready = "0" ]; do get_remote_apikey - if [[ $confluent_mgr == *:* ]]; then + if [[ $confluent_mgr == *:* ]] && [[ $confluent_mgr != "["* ]]; then confluent_mgr="[$confluent_mgr]" fi tmperr=$(mktemp) diff --git a/confluent_osdeploy/suse15-diskless/profiles/default/scripts/syncfileclient b/confluent_osdeploy/suse15-diskless/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/suse15-diskless/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/suse15-diskless/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/suse15/profiles/hpc/autoyast.leap b/confluent_osdeploy/suse15/profiles/hpc/autoyast.leap index 7f9d08f7..e92ec9fd 100644 --- a/confluent_osdeploy/suse15/profiles/hpc/autoyast.leap +++ b/confluent_osdeploy/suse15/profiles/hpc/autoyast.leap @@ -10,6 +10,12 @@ dynamic behavior and replace with static configuration. UTC %%TIMEZONE%% + + false + + + false + false diff --git a/confluent_osdeploy/suse15/profiles/hpc/initprofile.sh b/confluent_osdeploy/suse15/profiles/hpc/initprofile.sh index 9c6c295e..62a2663e 100644 --- a/confluent_osdeploy/suse15/profiles/hpc/initprofile.sh +++ b/confluent_osdeploy/suse15/profiles/hpc/initprofile.sh @@ -1,4 +1,7 @@ #!/bin/sh +# WARNING +# be careful when editing files here as this script is called +# in parallel to other copy operations, so changes to files can be lost discnum=$(basename $1) if [ "$discnum" != 1 ]; then exit 0; fi if [ -e $2/boot/kernel ]; then exit 0; fi diff --git a/confluent_osdeploy/suse15/profiles/hpc/scripts/getinstalldisk b/confluent_osdeploy/suse15/profiles/hpc/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/suse15/profiles/hpc/scripts/getinstalldisk +++ b/confluent_osdeploy/suse15/profiles/hpc/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/suse15/profiles/hpc/scripts/post.d/10-remove-online-repos.sh b/confluent_osdeploy/suse15/profiles/hpc/scripts/post.d/10-remove-online-repos.sh new file mode 100644 index 00000000..9ae8224e --- /dev/null +++ b/confluent_osdeploy/suse15/profiles/hpc/scripts/post.d/10-remove-online-repos.sh @@ -0,0 +1,3 @@ +#!/usr/bin/bash +# remove online repos +grep -lE "baseurl=https?://download.opensuse.org" /etc/zypp/repos.d/*repo | xargs rm -- diff --git a/confluent_osdeploy/suse15/profiles/hpc/scripts/syncfileclient b/confluent_osdeploy/suse15/profiles/hpc/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/suse15/profiles/hpc/scripts/syncfileclient +++ b/confluent_osdeploy/suse15/profiles/hpc/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/suse15/profiles/server/autoyast.leap b/confluent_osdeploy/suse15/profiles/server/autoyast.leap index 7f9d08f7..e92ec9fd 100644 --- a/confluent_osdeploy/suse15/profiles/server/autoyast.leap +++ b/confluent_osdeploy/suse15/profiles/server/autoyast.leap @@ -10,6 +10,12 @@ dynamic behavior and replace with static configuration. UTC %%TIMEZONE%% + + false + + + false + false diff --git a/confluent_osdeploy/suse15/profiles/server/initprofile.sh b/confluent_osdeploy/suse15/profiles/server/initprofile.sh index 9c6c295e..62a2663e 100644 --- a/confluent_osdeploy/suse15/profiles/server/initprofile.sh +++ b/confluent_osdeploy/suse15/profiles/server/initprofile.sh @@ -1,4 +1,7 @@ #!/bin/sh +# WARNING +# be careful when editing files here as this script is called +# in parallel to other copy operations, so changes to files can be lost discnum=$(basename $1) if [ "$discnum" != 1 ]; then exit 0; fi if [ -e $2/boot/kernel ]; then exit 0; fi diff --git a/confluent_osdeploy/suse15/profiles/server/scripts/getinstalldisk b/confluent_osdeploy/suse15/profiles/server/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/suse15/profiles/server/scripts/getinstalldisk +++ b/confluent_osdeploy/suse15/profiles/server/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/suse15/profiles/server/scripts/post.d/10-remove-online-repos.sh b/confluent_osdeploy/suse15/profiles/server/scripts/post.d/10-remove-online-repos.sh new file mode 100644 index 00000000..9ae8224e --- /dev/null +++ b/confluent_osdeploy/suse15/profiles/server/scripts/post.d/10-remove-online-repos.sh @@ -0,0 +1,3 @@ +#!/usr/bin/bash +# remove online repos +grep -lE "baseurl=https?://download.opensuse.org" /etc/zypp/repos.d/*repo | xargs rm -- diff --git a/confluent_osdeploy/suse15/profiles/server/scripts/syncfileclient b/confluent_osdeploy/suse15/profiles/server/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/suse15/profiles/server/scripts/syncfileclient +++ b/confluent_osdeploy/suse15/profiles/server/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/conf/conf.d/confluent b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/conf/conf.d/confluent index 64a3713d..79787074 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/conf/conf.d/confluent +++ b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/conf/conf.d/confluent @@ -1,4 +1,5 @@ if ! grep console= /proc/cmdline > /dev/null; then + mkdir -p /custom-installation /opt/confluent/bin/autocons > /custom-installation/autocons.info cons=$(cat /custom-installation/autocons.info) if [ ! -z "$cons" ]; then diff --git a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent index 2f7094b9..a4ca41cf 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent +++ b/confluent_osdeploy/ubuntu20.04-diskless/initramfs/scripts/init-premount/confluent @@ -58,6 +58,10 @@ if ! grep console= /proc/cmdline > /dev/null; then echo "Automatic console configured for $autocons" fi echo sshd:x:30:30:SSH User:/var/empty/sshd:/sbin/nologin >> /etc/passwd +modprobe ib_ipoib +modprobe ib_umad +modprobe hfi1 +modprobe mlx5_ib cd /sys/class/net for nic in *; do ip link set $nic up diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/image2disk.py b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/image2disk.py index 5d15e3d4..91afc5cb 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/image2disk.py +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/image2disk.py @@ -10,6 +10,7 @@ import stat import struct import sys import subprocess +import traceback bootuuid = None @@ -206,6 +207,8 @@ def fixup(rootdir, vols): partnum = re.search('(\d+)$', targdev).group(1) targblock = re.search('(.*)\d+$', targdev).group(1) if targblock: + if targblock.endswith('p') and 'nvme' in targblock: + targblock = targblock[:-1] shimpath = subprocess.check_output(['find', os.path.join(rootdir, 'boot/efi'), '-name', 'shimx64.efi']).decode('utf8').strip() shimpath = shimpath.replace(rootdir, '/').replace('/boot/efi', '').replace('//', '/').replace('/', '\\') subprocess.check_call(['efibootmgr', '-c', '-d', targblock, '-l', shimpath, '--part', partnum]) @@ -422,5 +425,10 @@ def install_to_disk(imgpath): if __name__ == '__main__': - install_to_disk(os.environ['mountsrc']) + try: + install_to_disk(os.environ['mountsrc']) + except Exception: + traceback.print_exc() + time.sleep(86400) + raise diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh index f1b8e45a..0db99754 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/imageboot.sh @@ -8,7 +8,7 @@ for addr in $(grep ^MANAGER: /etc/confluent/confluent.info|awk '{print $2}'|sed fi done mkdir -p /mnt/remoteimg /mnt/remote /mnt/overlay -if grep confluennt_imagemethtod=untethered /proc/cmdline > /dev/null; then +if grep confluent_imagemethod=untethered /proc/cmdline > /dev/null; then mount -t tmpfs untethered /mnt/remoteimg curl https://$confluent_mgr/confluent-public/os/$confluent_profile/rootimg.sfs -o /mnt/remoteimg/rootimg.sfs else diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.service b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.service new file mode 100644 index 00000000..f9235033 --- /dev/null +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.service @@ -0,0 +1,11 @@ +[Unit] +Description=Confluent onboot hook +Requires=network-online.target +After=network-online.target + +[Service] +ExecStart=/opt/confluent/bin/onboot.sh + +[Install] +WantedBy=multi-user.target + diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.sh b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.sh new file mode 100644 index 00000000..cc470d6f --- /dev/null +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/onboot.sh @@ -0,0 +1,40 @@ +#!/bin/bash + +# This script is executed on each boot as it is +# completed. It is best to edit the middle of the file as +# noted below so custom commands are executed before +# the script notifies confluent that install is fully complete. + +nodename=$(grep ^NODENAME /etc/confluent/confluent.info|awk '{print $2}') +confluent_apikey=$(cat /etc/confluent/confluent.apikey) +v4meth=$(grep ^ipv4_method: /etc/confluent/confluent.deploycfg|awk '{print $2}') +if [ "$v4meth" = "null" -o -z "$v4meth" ]; then + confluent_mgr=$(grep ^deploy_server_v6: /etc/confluent/confluent.deploycfg|awk '{print $2}') +fi +if [ -z "$confluent_mgr" ]; then + confluent_mgr=$(grep ^deploy_server: /etc/confluent/confluent.deploycfg|awk '{print $2}') +fi +confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') +timedatectl set-timezone $(grep ^timezone: /etc/confluent/confluent.deploycfg|awk '{print $2}') +hostnamectl set-hostname $nodename +export nodename confluent_mgr confluent_profile +. /etc/confluent/functions +mkdir -p /var/log/confluent +chmod 700 /var/log/confluent +exec >> /var/log/confluent/confluent-onboot.log +exec 2>> /var/log/confluent/confluent-onboot.log +chmod 600 /var/log/confluent/confluent-onboot.log +tail -f /var/log/confluent/confluent-onboot.log > /dev/console & +logshowpid=$! + +run_remote_python syncfileclient +run_remote_python confignet + +# onboot scripts may be placed into onboot.d, e.g. onboot.d/01-firstaction.sh, onboot.d/02-secondaction.sh +run_remote_parts onboot.d + +# Induce execution of remote configuration, e.g. ansible plays in ansible/onboot.d/ +run_remote_config onboot.d + +#curl -X POST -d 'status: booted' -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" https://$confluent_mgr/confluent-api/self/updatestatus +kill $logshowpid diff --git a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/syncfileclient b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/ubuntu20.04-diskless/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/ubuntu20.04/initramfs/custom-installation/post.sh b/confluent_osdeploy/ubuntu20.04/initramfs/custom-installation/post.sh index 5bd43bc6..d9dc27b2 100755 --- a/confluent_osdeploy/ubuntu20.04/initramfs/custom-installation/post.sh +++ b/confluent_osdeploy/ubuntu20.04/initramfs/custom-installation/post.sh @@ -4,4 +4,5 @@ confluent_mgr=$(grep ^deploy_server $deploycfg|awk '{print $2}') confluent_profile=$(grep ^profile: $deploycfg|awk '{print $2}') export deploycfg confluent_mgr confluent_profile curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/post.sh > /tmp/post.sh -. /tmp/post.sh +bash /tmp/post.sh +true diff --git a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/firstboot.sh b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/firstboot.sh index d14269cf..c0ba44ab 100755 --- a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/firstboot.sh +++ b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/firstboot.sh @@ -2,7 +2,10 @@ echo "Confluent first boot is running" HOME=$(getent passwd $(whoami)|cut -d: -f 6) export HOME -seems a potentially relevant thing to put i... by Jarrod Johnson +( +exec >> /target/var/log/confluent/confluent-firstboot.log +exec 2>> /target/var/log/confluent/confluent-firstboot.log +chmod 600 /target/var/log/confluent/confluent-firstboot.log cp -a /etc/confluent/ssh/* /etc/ssh/ systemctl restart sshd rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}') @@ -18,7 +21,10 @@ done hostnamectl set-hostname $(grep ^NODENAME: /etc/confluent/confluent.info | awk '{print $2}') touch /etc/cloud/cloud-init.disabled source /etc/confluent/functions - +confluent_profile=$(grep ^profile: /etc/confluent/confluent.deploycfg|awk '{print $2}') +export confluent_mgr confluent_profile run_remote_parts firstboot.d run_remote_config firstboot.d curl --capath /etc/confluent/tls -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus +) & +tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console diff --git a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/post.sh index 7b970285..d9730889 100755 --- a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/post.sh @@ -8,7 +8,6 @@ chmod go-rwx /etc/confluent/* for i in /custom-installation/ssh/*.ca; do echo '@cert-authority *' $(cat $i) >> /target/etc/ssh/ssh_known_hosts done - cp -a /etc/ssh/ssh_host* /target/etc/confluent/ssh/ cp -a /etc/ssh/sshd_config.d/confluent.conf /target/etc/confluent/ssh/sshd_config.d/ sshconf=/target/etc/ssh/ssh_config @@ -19,10 +18,15 @@ echo 'Host *' >> $sshconf echo ' HostbasedAuthentication yes' >> $sshconf echo ' EnableSSHKeysign yes' >> $sshconf echo ' HostbasedKeyTypes *ed25519*' >> $sshconf - +cp /etc/confluent/functions /target/etc/confluent/functions +source /etc/confluent/functions +mkdir -p /target/var/log/confluent +cp /var/log/confluent/* /target/var/log/confluent/ +( +exec >> /target/var/log/confluent/confluent-post.log +exec 2>> /target/var/log/confluent/confluent-post.log +chmod 600 /target/var/log/confluent/confluent-post.log curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/firstboot.sh > /target/etc/confluent/firstboot.sh -curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /target/etc/confluent/functions -source /target/etc/confluent/functions chmod +x /target/etc/confluent/firstboot.sh cp /tmp/allnodes /target/root/.shosts cp /tmp/allnodes /target/etc/ssh/shosts.equiv @@ -56,6 +60,7 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin mount -o bind /dev /target/dev mount -o bind /proc /target/proc mount -o bind /sys /target/sys +mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars if [ 1 = $updategrub ]; then chroot /target update-grub fi @@ -83,6 +88,8 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d source /target/etc/confluent/functions run_remote_config post +python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged' umount /target/sys /target/dev /target/proc - +) & +tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console diff --git a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/pre.sh index ddfe598b..5db222a7 100755 --- a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/pre.sh @@ -1,5 +1,16 @@ #!/bin/bash deploycfg=/custom-installation/confluent/confluent.deploycfg +mkdir -p /var/log/confluent +mkdir -p /opt/confluent/bin +mkdir -p /etc/confluent +cp /custom-installation/confluent/confluent.info /custom-installation/confluent/confluent.apikey /etc/confluent/ +cat /custom-installation/tls/*.pem >> /etc/confluent/ca.pem +cp /custom-installation/confluent/bin/apiclient /opt/confluent/bin +cp $deploycfg /etc/confluent/ +( +exec >> /var/log/confluent/confluent-pre.log +exec 2>> /var/log/confluent/confluent-pre.log +chmod 600 /var/log/confluent/confluent-pre.log cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //') if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then @@ -23,7 +34,17 @@ echo HostbasedAuthentication yes >> /etc/ssh/sshd_config.d/confluent.conf echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/confluent.conf echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/confluent.conf systemctl restart sshd +mkdir -p /etc/confluent +export nodename confluent_profile confluent_mgr +curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions +. /etc/confluent/functions +run_remote_parts pre.d curl -f -X POST -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $apikey" https://$confluent_mgr/confluent-api/self/nodelist > /tmp/allnodes -curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk -python3 /custom-installation/getinstalldisk +if [ ! -e /tmp/installdisk ]; then + curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/getinstalldisk > /custom-installation/getinstalldisk + python3 /custom-installation/getinstalldisk +fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml +) & +tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console + diff --git a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/syncfileclient b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/ubuntu20.04/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/ubuntu22.04/initramfs/custom-installation/post.sh b/confluent_osdeploy/ubuntu22.04/initramfs/custom-installation/post.sh index 5bd43bc6..d9dc27b2 100755 --- a/confluent_osdeploy/ubuntu22.04/initramfs/custom-installation/post.sh +++ b/confluent_osdeploy/ubuntu22.04/initramfs/custom-installation/post.sh @@ -4,4 +4,5 @@ confluent_mgr=$(grep ^deploy_server $deploycfg|awk '{print $2}') confluent_profile=$(grep ^profile: $deploycfg|awk '{print $2}') export deploycfg confluent_mgr confluent_profile curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/post.sh > /tmp/post.sh -. /tmp/post.sh +bash /tmp/post.sh +true diff --git a/confluent_osdeploy/ubuntu22.04/initramfs/scripts/casper-bottom/99confluent b/confluent_osdeploy/ubuntu22.04/initramfs/scripts/casper-bottom/99confluent index e066714e..d629cf32 100755 --- a/confluent_osdeploy/ubuntu22.04/initramfs/scripts/casper-bottom/99confluent +++ b/confluent_osdeploy/ubuntu22.04/initramfs/scripts/casper-bottom/99confluent @@ -26,12 +26,14 @@ if [ -e /tmp/cnflnthmackeytmp ]; then chroot . curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_CRYPTHMAC: $(cat /root/$hmacfile)" -d @/tmp/cnflntcryptfile https://$MGR/confluent-api/self/registerapikey cp /root/$passfile /root/custom-installation/confluent/confluent.apikey DEVICE=$(cat /tmp/autodetectnic) + IP=done else chroot . custom-installation/confluent/bin/clortho $NODENAME $MGR > /root/custom-installation/confluent/confluent.apikey MGR=[$MGR] nic=$(grep ^MANAGER /custom-installation/confluent/confluent.info|grep fe80::|sed -e s/.*%//|head -n 1) nic=$(ip link |grep ^$nic:|awk '{print $2}') DEVICE=${nic%:} + IP=done fi if [ -z "$MGTIFACE" ]; then chroot . usr/bin/curl -f -H "CONFLUENT_NODENAME: $NODENAME" -H "CONFLUENT_APIKEY: $(cat /root//custom-installation/confluent/confluent.apikey)" https://${MGR}/confluent-api/self/deploycfg > $deploycfg diff --git a/confluent_osdeploy/ubuntu22.04/initramfs/scripts/init-premount/confluent b/confluent_osdeploy/ubuntu22.04/initramfs/scripts/init-premount/confluent index 03761f3a..8a7e3777 100755 --- a/confluent_osdeploy/ubuntu22.04/initramfs/scripts/init-premount/confluent +++ b/confluent_osdeploy/ubuntu22.04/initramfs/scripts/init-premount/confluent @@ -79,8 +79,12 @@ if [ ! -z "$cons" ]; then fi echo "Preparing to deploy $osprofile from $MGR" echo $osprofile > /custom-installation/confluent/osprofile -echo URL=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso >> /conf/param.conf -fcmdline="$(cat /custom-installation/confluent/cmdline.orig) url=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso" +. /etc/os-release +DIRECTISO=$(blkid -t TYPE=iso9660 |grep -Ei ' LABEL="Ubuntu-Server '$VERSION_ID) +if [ -z "$DIRECTISO" ]; then + echo URL=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso >> /conf/param.conf + fcmdline="$(cat /custom-installation/confluent/cmdline.orig) url=http://${MGR}/confluent-public/os/$osprofile/distribution/install.iso" +fi if [ ! -z "$cons" ]; then fcmdline="$fcmdline console=${cons#/dev/}" fi diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/initprofile.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/initprofile.sh index 20e12471..cebcd41d 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/initprofile.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/initprofile.sh @@ -3,5 +3,12 @@ sed -i 's/label: ubuntu/label: Ubuntu/' $2/profile.yaml && \ ln -s $1/casper/vmlinuz $2/boot/kernel && \ ln -s $1/casper/initrd $2/boot/initramfs/distribution && \ mkdir -p $2/boot/efi/boot && \ -ln -s $1/EFI/boot/* $2/boot/efi/boot +if [ -d $1/EFI/boot/ ]; then + ln -s $1/EFI/boot/* $2/boot/efi/boot +elif [ -d $1/efi/boot/ ]; then + ln -s $1/efi/boot/* $2/boot/efi/boot +else + echo "Unrecogrized boot contents in media" >&2 + exit 1 +fi diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt new file mode 100644 index 00000000..750753c1 --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/addcrypt @@ -0,0 +1,12 @@ +import yaml +import os + +ainst = {} +with open('/autoinstall.yaml', 'r') as allin: + ainst = yaml.safe_load(allin) + +ainst['storage']['layout']['password'] = os.environ['lukspass'] + +with open('/autoinstall.yaml', 'w') as allout: + yaml.safe_dump(ainst, allout) + diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/firstboot.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/firstboot.sh index c0ba44ab..996bfffe 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/firstboot.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/firstboot.sh @@ -3,11 +3,11 @@ echo "Confluent first boot is running" HOME=$(getent passwd $(whoami)|cut -d: -f 6) export HOME ( -exec >> /target/var/log/confluent/confluent-firstboot.log -exec 2>> /target/var/log/confluent/confluent-firstboot.log -chmod 600 /target/var/log/confluent/confluent-firstboot.log +exec >> /var/log/confluent/confluent-firstboot.log +exec 2>> /var/log/confluent/confluent-firstboot.log +chmod 600 /var/log/confluent/confluent-firstboot.log cp -a /etc/confluent/ssh/* /etc/ssh/ -systemctl restart sshd +systemctl restart ssh rootpw=$(grep ^rootpassword: /etc/confluent/confluent.deploycfg |awk '{print $2}') if [ ! -z "$rootpw" -a "$rootpw" != "null" ]; then echo root:$rootpw | chpasswd -e @@ -27,4 +27,4 @@ run_remote_parts firstboot.d run_remote_config firstboot.d curl --capath /etc/confluent/tls -f -H "CONFLUENT_NODENAME: $nodename" -H "CONFLUENT_APIKEY: $confluent_apikey" -X POST -d "status: complete" https://$confluent_mgr/confluent-api/self/updatestatus ) & -tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console +tail --pid $! -n 0 -F /var/log/confluent/confluent-post.log > /dev/console diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/getinstalldisk b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/getinstalldisk index 522aba00..04c7708e 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/getinstalldisk +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/getinstalldisk @@ -3,6 +3,8 @@ import os class DiskInfo(object): def __init__(self, devname): + if devname.startswith('nvme') and 'c' in devname: + raise Exception("Skipping multipath devname") self.name = devname self.wwn = None self.path = None diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime new file mode 100644 index 00000000..7edb2632 --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/mergetime @@ -0,0 +1,26 @@ +#!/usr/bin/python3 +import yaml +import os + +ainst = {} +with open('/autoinstall.yaml', 'r') as allin: + ainst = yaml.safe_load(allin) + +tz = None +ntps = [] +with open('/etc/confluent/confluent.deploycfg', 'r') as confluentdeploycfg: + dcfg = yaml.safe_load(confluentdeploycfg) + tz = dcfg['timezone'] + ntps = dcfg.get('ntpservers', []) + +if ntps and not ainst.get('ntp', None): + ainst['ntp'] = {} + ainst['ntp']['enabled'] = True + ainst['ntp']['servers'] = ntps + +if tz and not ainst.get('timezone'): + ainst['timezone'] = tz + +with open('/autoinstall.yaml', 'w') as allout: + yaml.safe_dump(ainst, allout) + diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh index 773bf8ad..a86695ca 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/post.sh @@ -60,9 +60,12 @@ cp /custom-installation/confluent/bin/apiclient /target/opt/confluent/bin mount -o bind /dev /target/dev mount -o bind /proc /target/proc mount -o bind /sys /target/sys +mount -o bind /run /target/run +mount -o bind /sys/firmware/efi/efivars /target/sys/firmware/efi/efivars if [ 1 = $updategrub ]; then chroot /target update-grub fi + echo "Port 22" >> /etc/ssh/sshd_config echo "Port 2222" >> /etc/ssh/sshd_config echo "Match LocalPort 22" >> /etc/ssh/sshd_config @@ -87,8 +90,36 @@ chroot /target bash -c "source /etc/confluent/functions; run_remote_parts post.d source /target/etc/confluent/functions run_remote_config post + +if [ -f /etc/confluent_lukspass ]; then + numdevs=$(lsblk -lo name,uuid|grep $(awk '{print $2}' < /target/etc/crypttab |sed -e s/UUID=//)|wc -l) + if [ 0$numdevs -ne 1 ]; then + wall "Unable to identify the LUKS device, halting install" + while :; do sleep 86400; done + fi + CRYPTTAB_SOURCE=$(awk '{print $2}' /target/etc/crypttab) + . /target/usr/lib/cryptsetup/functions + crypttab_resolve_source + + if [ ! -e $CRYPTTAB_SOURCE ]; then + wall "Unable to find $CRYPTTAB_SOURCE, halting install" + while :; do sleep 86400; done + fi + cp /etc/confluent_lukspass /target/etc/confluent/luks.key + chmod 000 /target/etc/confluent/luks.key + lukspass=$(cat /etc/confluent_lukspass) + chroot /target apt install libtss2-rc0 + PASSWORD=$lukspass chroot /target systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs="" $CRYPTTAB_SOURCE + fetch_remote systemdecrypt + mv systemdecrypt /target/etc/initramfs-tools/scripts/local-top/systemdecrypt + fetch_remote systemdecrypt-hook + mv systemdecrypt-hook /target/etc/initramfs-tools/hooks/systemdecrypt + chmod 755 /target/etc/initramfs-tools/scripts/local-top/systemdecrypt /target/etc/initramfs-tools/hooks/systemdecrypt + chroot /target update-initramfs -u +fi python3 /opt/confluent/bin/apiclient /confluent-api/self/updatestatus -d 'status: staged' -umount /target/sys /target/dev /target/proc + +umount /target/sys /target/dev /target/proc /target/run ) & tail --pid $! -n 0 -F /target/var/log/confluent/confluent-post.log > /dev/console diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh index 4ff1878e..ad55120a 100755 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/pre.sh @@ -13,11 +13,6 @@ exec 2>> /var/log/confluent/confluent-pre.log chmod 600 /var/log/confluent/confluent-pre.log cryptboot=$(grep encryptboot: $deploycfg|sed -e 's/^encryptboot: //') -if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then - echo "****Encrypted boot requested, but not implemented for this OS, halting install" > /dev/console - [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but not implemented for this OS,halting install" >> $(cat /tmp/autoconsdev)) - while :; do sleep 86400; done -fi cat /custom-installation/ssh/*pubkey > /root/.ssh/authorized_keys @@ -35,7 +30,7 @@ echo HostbasedUsesNameFromPacketOnly yes >> /etc/ssh/sshd_config.d/confluent.con echo IgnoreRhosts no >> /etc/ssh/sshd_config.d/confluent.conf systemctl restart sshd mkdir -p /etc/confluent -export confluent_profile confluent_mgr +export nodename confluent_profile confluent_mgr curl -f https://$confluent_mgr/confluent-public/os/$confluent_profile/scripts/functions > /etc/confluent/functions . /etc/confluent/functions run_remote_parts pre.d @@ -45,6 +40,24 @@ if [ ! -e /tmp/installdisk ]; then python3 /custom-installation/getinstalldisk fi sed -i s!%%INSTALLDISK%%!/dev/$(cat /tmp/installdisk)! /autoinstall.yaml +run_remote_python mergetime +if [ "$cryptboot" != "" ] && [ "$cryptboot" != "none" ] && [ "$cryptboot" != "null" ]; then + lukspass=$(python3 /opt/confluent/bin/apiclient /confluent-api/self/profileprivate/pending/luks.key 2> /dev/null) + if [ -z "$lukspass" ]; then + lukspass=$(head -c 66 < /dev/urandom |base64 -w0) + fi + export lukspass + run_remote_python addcrypt + if ! grep 'password:' /autoinstall.yaml > /dev/null; then + echo "****Encrypted boot requested, but the user-data does not have a hook to enable,halting install" > /dev/console + [ -f '/tmp/autoconsdev' ] && (echo "****Encryptod boot requested, but the user-data does not have a hook to enable,halting install" >> $(cat /tmp/autoconsdev)) + while :; do sleep 86400; done + fi + sed -i s!%%CRYPTPASS%%!$lukspass! /autoinstall.yaml + sed -i s!'#CRYPTBOOT'!! /autoinstall.yaml + echo -n $lukspass > /etc/confluent_lukspass + chmod 000 /etc/confluent_lukspass +fi ) & tail --pid $! -n 0 -F /var/log/confluent/confluent-pre.log > /dev/console diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/syncfileclient b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/syncfileclient index f7d4c0b4..5f2efc5e 100644 --- a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/syncfileclient +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/syncfileclient @@ -1,4 +1,6 @@ #!/usr/bin/python3 +import random +import time import subprocess import importlib import tempfile @@ -7,6 +9,7 @@ import os import shutil import pwd import grp +import sys from importlib.machinery import SourceFileLoader try: apiclient = SourceFileLoader('apiclient', '/opt/confluent/bin/apiclient').load_module() @@ -227,9 +230,16 @@ def synchronize(): myips.append(addr) data = json.dumps({'merge': tmpdir, 'appendonce': appendoncedir, 'myips': myips}) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles', data) + if status >= 300: + sys.stderr.write("Error starting syncfiles - {}:\n".format(status)) + sys.stderr.write(rsp.decode('utf8')) + sys.stderr.write('\n') + sys.stderr.flush() + return status if status == 202: lastrsp = '' while status != 204: + time.sleep(1+(2*random.random())) status, rsp = ac.grab_url_with_status('/confluent-api/self/remotesyncfiles') if not isinstance(rsp, str): rsp = rsp.decode('utf8') @@ -277,10 +287,21 @@ def synchronize(): os.chmod(fname, int(opts[fname][opt], 8)) if uid != -1 or gid != -1: os.chown(fname, uid, gid) + return status finally: shutil.rmtree(tmpdir) shutil.rmtree(appendoncedir) if __name__ == '__main__': - synchronize() + status = 202 + while status not in (204, 200): + try: + status = synchronize() + except Exception as e: + sys.stderr.write(str(e)) + sys.stderr.write('\n') + sys.stderr.flush() + status = 300 + if status not in (204, 200): + time.sleep((random.random()*3)+2) diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt new file mode 100644 index 00000000..6f0cbaed --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt @@ -0,0 +1,17 @@ +#!/bin/sh +case $1 in +prereqs) + echo + exit 0 + ;; +esac + +systemdecryptnow() { +. /usr/lib/cryptsetup/functions +local CRYPTTAB_SOURCE=$(awk '{print $2}' /systemdecrypt/crypttab) +local CRYPTTAB_NAME=$(awk '{print $1}' /systemdecrypt/crypttab) +crypttab_resolve_source +/lib/systemd/systemd-cryptsetup attach "${CRYPTTAB_NAME}" "${CRYPTTAB_SOURCE}" none tpm2-device=auto +} + +systemdecryptnow diff --git a/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook new file mode 100644 index 00000000..ee602c7c --- /dev/null +++ b/confluent_osdeploy/ubuntu22.04/profiles/default/scripts/systemdecrypt-hook @@ -0,0 +1,26 @@ +#!/bin/sh +case "$1" in + prereqs) + echo + exit 0 + ;; +esac + +. /usr/share/initramfs-tools/hook-functions +mkdir -p $DESTDIR/systemdecrypt +copy_exec /lib/systemd/systemd-cryptsetup /lib/systemd +for i in /lib/x86_64-linux-gnu/libtss2* +do + copy_exec ${i} /lib/x86_64-linux-gnu +done +if [ -f /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so ]; then + mkdir -p $DESTDIR/lib/x86_64-linux-gnu/cryptsetup + copy_exec /lib/x86_64-linux-gnu/cryptsetup/libcryptsetup-token-systemd-tpm2.so /lib/x86_64-linux-gnu/cryptsetup +fi +mkdir -p $DESTDIR/scripts/local-top + +echo /scripts/local-top/systemdecrypt >> $DESTDIR/scripts/local-top/ORDER + +if [ -f $DESTDIR/cryptroot/crypttab ]; then + mv $DESTDIR/cryptroot/crypttab $DESTDIR/systemdecrypt/crypttab +fi diff --git a/confluent_osdeploy/ubuntu24.04 b/confluent_osdeploy/ubuntu24.04 new file mode 120000 index 00000000..13759564 --- /dev/null +++ b/confluent_osdeploy/ubuntu24.04 @@ -0,0 +1 @@ +ubuntu22.04 \ No newline at end of file diff --git a/confluent_osdeploy/ubuntu24.04-diskless b/confluent_osdeploy/ubuntu24.04-diskless new file mode 120000 index 00000000..00822b05 --- /dev/null +++ b/confluent_osdeploy/ubuntu24.04-diskless @@ -0,0 +1 @@ +ubuntu20.04-diskless \ No newline at end of file diff --git a/confluent_osdeploy/utils/copernicus.c b/confluent_osdeploy/utils/copernicus.c index 9c8ace60..3b92044c 100644 --- a/confluent_osdeploy/utils/copernicus.c +++ b/confluent_osdeploy/utils/copernicus.c @@ -31,6 +31,8 @@ int add_uuid(char* destination, int maxsize) { strncpy(destination, "/uuid=", maxsize); uuidsize = read(uuidf, destination + 6, maxsize - 6); close(uuidf); + if (uuidsize < 0) { return 0; } + if (uuidsize > 524288) { return 0; } if (destination[uuidsize + 5] == '\n') { destination[uuidsize + 5 ] = 0; } @@ -45,6 +47,8 @@ int add_confluent_uuid(char* destination, int maxsize) { strncpy(destination, "/confluentuuid=", maxsize); uuidsize = read(uuidf, destination + 15, maxsize - 15); close(uuidf); + if (uuidsize < 0) { return 0; } + if (uuidsize > 524288) { return 0; } if (destination[uuidsize + 14] == '\n') { destination[uuidsize + 14] = 0; } diff --git a/confluent_server/COPYRIGHT b/confluent_server/COPYRIGHT index 6bcd9986..05d7bac2 100644 --- a/confluent_server/COPYRIGHT +++ b/confluent_server/COPYRIGHT @@ -1,21 +1,102 @@ -Name: confluent-client Project: https://hpc.lenovo.com/users/ Source: https://github.com/lenovo/confluent +Upstream-Name: confluent-client + +All file of the Confluent-client software is distributed under the terms of an Apache-2.0 license as indicated below: Files: * -Copyright: 2014-2019 Lenovo +Copyright: 2014-2023 Lenovo License: Apache-2.0 -File: client.py: +File: client.py + sockapi.py + redfish.py + attributes.py + httpapi.py + configmanager.py Copyright: 2014 IBM Corporation 2015-2019 Lenovo License: Apache-2.0 File: sortutil.py + log.py Copyright: 2014 IBM Corporation 2015-2016 Lenovo License: Apache-2.0 +File: util.py + noderange.py + main.py + exceptions.py +Copyright: 2014 IBM Corporation + 2015-2017 Lenovo +License: Apache-2.0 + +File: ipmi.py + plugin.py + core.py + consoleserver.py +Copyright: 2014 IBM Corporation + 2015-2018 Lenovo +License: Apache-2.0 + File: tlv.py + shellmodule.py + conf.py + auth.py Copyright: 2014 IBM Corporation License: Apache-2.0 + + +License: Apache-2.0 +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. + +"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of this License; and +You must cause any modified files to carry prominent notices stating that You changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. +You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS \ No newline at end of file diff --git a/confluent_server/bin/confluent_selfcheck b/confluent_server/bin/confluent_selfcheck index 1b504e95..f1de6c71 100755 --- a/confluent_server/bin/confluent_selfcheck +++ b/confluent_server/bin/confluent_selfcheck @@ -15,12 +15,18 @@ import confluent.sshutil as sshutil import confluent.certutil as certutil import confluent.client as client import confluent.config.configmanager as configmanager +import confluent.netutil as netutil import eventlet.green.subprocess as subprocess import tempfile import shutil import eventlet.green.socket as socket import eventlet import greenlet +import pwd +import signal +import confluent.collective.manager as collective +import confluent.noderange as noderange + def fprint(txt): sys.stdout.write(txt) @@ -108,6 +114,8 @@ def nics_missing_ipv6(): iname, state = comps[:2] if iname == b'lo': continue + if iname == b'virbr0': + continue addrs = comps[2:] hasv6 = False hasv4 = False @@ -156,6 +164,7 @@ def lookup_node(node): if __name__ == '__main__': ap = argparse.ArgumentParser(description='Run configuration checks for a system running confluent service') ap.add_argument('-n', '--node', help='A node name to run node specific checks against') + ap.add_argument('-a', '--automation', help='Do checks against a deployed node for automation and syncfiles function', action='store_true') args, extra = ap.parse_known_args(sys.argv) if len(extra) > 1: ap.print_help() @@ -216,6 +225,7 @@ if __name__ == '__main__': print('OK') except subprocess.CalledProcessError: emprint('Failed to load confluent automation key, syncfiles and profile ansible plays will not work (Example resolution: osdeploy initialize -a)') + os.kill(int(sshutil.agent_pid), signal.SIGTERM) fprint('Checking for blocked insecure boot: ') if insecure_boot_attempts(): emprint('Some nodes are attempting network boot using PXE or HTTP boot, but the node is not configured to allow this (Example resolution: nodegroupattrib everything deployment.useinsecureprotocols=firmware)') @@ -244,13 +254,16 @@ if __name__ == '__main__': allok = False uuidok = True # not really, but suppress the spurious error dnsdomain = rsp.get('dns.domain', {}).get('value', '') - if ',' in dnsdomain or ' ' in dnsdomain: + if dnsdomain and (',' in dnsdomain or ' ' in dnsdomain): allok = False emprint(f'{args.node} has a dns.domain that appears to be a search instead of singular domain') uuidok = True # not really, but suppress the spurious error uuid = rsp.get('id.uuid', {}).get('value', None) if uuid: uuidok = True + if 'collective.managercandidates' in rsp: + # Check if current node in candidates + pass if 'deployment.useinsecureprotocols' in rsp: insec = rsp.get('deployment.useinsecureprotocols', {}).get('value', None) if insec != 'firmware': @@ -269,9 +282,51 @@ if __name__ == '__main__': switch_value = rsp[key].get('value',None) if switch_value and switch_value not in valid_nodes: emprint(f'{switch_value} is not a valid node name (as referenced by attribute "{key}" of node {args.node}).') + cfg = configmanager.ConfigManager(None) + cfd = cfg.get_node_attributes( + args.node, ('deployment.*', 'collective.managercandidates')) + profile = cfd.get(args.node, {}).get( + 'deployment.pendingprofile', {}).get('value', None) + if not profile: + emprint( + f'{args.node} is not currently set to deploy any ' + 'profile, network boot attempts will be ignored') + candmgrs = cfd.get(args.node, {}).get( + 'collective.managercandidates', {}).get('value', None) + if candmgrs: + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: # fallback to unverified noderange + candmgrs = noderange.NodeRange(candmgrs).nodes + if collective.get_myname() not in candmgrs: + emprint(f'{args.node} has deployment restricted to ' + 'certain collective managers excluding the ' + 'system running the selfcheck') + print(f"Checking network configuration for {args.node}") + bootablev4nics = [] + bootablev6nics = [] + targsships = [] + for nic in glob.glob("/sys/class/net/*/ifindex"): + idx = int(open(nic, "r").read()) + nicname = nic.split('/')[-2] + ncfg = netutil.get_nic_config(cfg, args.node, ifidx=idx) + if ncfg['ipv4_address']: + targsships.append(ncfg['ipv4_address']) + if ncfg['ipv4_address'] or ncfg['ipv4_method'] == 'dhcp': + bootablev4nics.append(nicname) + if ncfg['ipv6_address']: + targsships.append(ncfg['ipv6_address']) + bootablev6nics.append(nicname) + if bootablev4nics: + print("{} appears to have network configuration suitable for IPv4 deployment via: {}".format(args.node, ",".join(bootablev4nics))) + elif bootablev6nics: + print('{} appears to have networking configuration suitable for IPv6 deployment via: {}'.format(args.node, ",".join(bootablev6nics))) + else: + emprint(f"{args.node} may not have any viable IP network configuration (check name resolution (DNS or hosts file) " + "and/or net.*ipv4_address, and verify that the deployment server addresses and subnet mask/prefix length are accurate)") if not uuidok and not macok: allok = False - emprint(f'{args.node} does not have a uuid or mac address defined in id.uuid or net.*hwaddr, deployment will not work') + emprint(f'{args.node} does not have a uuid or mac address defined in id.uuid or net.*hwaddr, deployment will not work (Example resolution: nodeinventory {args.node} -s)') if allok: print(f'No issues detected with attributes of {args.node}') fprint("Checking name resolution: ") @@ -291,6 +346,34 @@ if __name__ == '__main__': emprint('Name resolution failed for node, it is normally a good idea for the node name to resolve to an IP') if result: print("OK") + if args.automation: + print(f'Checking confluent automation access to {args.node}...') + child = os.fork() + if child > 0: + pid, extcode = os.waitpid(child, 0) + else: + sshutil.ready_keys = {} + sshutil.agent_pid = None + cuser = pwd.getpwnam('confluent') + os.setgid(cuser.pw_gid) + os.setuid(cuser.pw_uid) + sshutil.prep_ssh_key('/etc/confluent/ssh/automation') + for targ in targsships: + srun = subprocess.run( + ['ssh', '-Tn', '-o', 'BatchMode=yes', '-l', 'root', + '-o', 'StrictHostKeyChecking=yes', targ, 'true'], + stdin=subprocess.DEVNULL, stderr=subprocess.PIPE) + if srun.returncode == 0: + print(f'Confluent automation access to {targ} seems OK') + else: + if b'Host key verification failed' in srun.stderr: + emprint(f'Confluent ssh unable to verify host key for {targ}, check /etc/ssh/ssh_known_hosts. (Example resolution: osdeploy initialize -k)') + elif b'ermission denied' in srun.stderr: + emprint(f'Confluent user unable to ssh in to {targ}, check /root/.ssh/authorized_keys on the target system versus /etc/confluent/ssh/automation.pub (Example resolution: osdeploy initialize -a)') + else: + emprint('Unknown error attempting confluent automation ssh:') + sys.stderr.buffer.write(srun.stderr) + os.kill(int(sshutil.agent_pid), signal.SIGTERM) else: print("Skipping node checks, no node specified (Example: confluent_selfcheck -n n1)") # possible checks: diff --git a/confluent_server/bin/confluentdbutil b/confluent_server/bin/confluentdbutil index 25a5acf8..e74c2ab4 100755 --- a/confluent_server/bin/confluentdbutil +++ b/confluent_server/bin/confluentdbutil @@ -1,7 +1,7 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2017 Lenovo +# Copyright 2017,2024 Lenovo # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -30,7 +30,7 @@ import confluent.config.conf as conf import confluent.main as main argparser = optparse.OptionParser( - usage="Usage: %prog [options] [dump|restore] [path]") + usage="Usage: %prog [options] [dump|restore|merge] [path]") argparser.add_option('-p', '--password', help='Password to use to protect/unlock a protected dump') argparser.add_option('-i', '--interactivepassword', help='Prompt for password', @@ -51,13 +51,13 @@ argparser.add_option('-s', '--skipkeys', action='store_true', 'data is needed. keys do not change and as such ' 'they do not require incremental backup') (options, args) = argparser.parse_args() -if len(args) != 2 or args[0] not in ('dump', 'restore'): +if len(args) != 2 or args[0] not in ('dump', 'restore', 'merge'): argparser.print_help() sys.exit(1) dumpdir = args[1] -if args[0] == 'restore': +if args[0] in ('restore', 'merge'): pid = main.is_running() if pid is not None: print("Confluent is running, must shut down to restore db") @@ -69,9 +69,22 @@ if args[0] == 'restore': if options.interactivepassword: password = getpass.getpass('Enter password to restore backup: ') try: - cfm.init(True) - cfm.statelessmode = True - cfm.restore_db_from_directory(dumpdir, password) + stateless = args[0] == 'restore' + cfm.init(stateless) + cfm.statelessmode = stateless + skipped = {'nodes': [], 'nodegroups': []} + cfm.restore_db_from_directory( + dumpdir, password, + merge="skip" if args[0] == 'merge' else False, skipped=skipped) + if skipped['nodes']: + skippedn = ','.join(skipped['nodes']) + print('The following nodes were skipped during merge: ' + '{}'.format(skippedn)) + if skipped['nodegroups']: + skippedn = ','.join(skipped['nodegroups']) + print('The following node groups were skipped during merge: ' + '{}'.format(skippedn)) + cfm.statelessmode = False cfm.ConfigManager.wait_for_sync(True) if owner != 0: diff --git a/confluent_server/bin/osdeploy b/confluent_server/bin/osdeploy index ed39e78c..0283553c 100644 --- a/confluent_server/bin/osdeploy +++ b/confluent_server/bin/osdeploy @@ -1,4 +1,4 @@ -#!/usr/bin/python2 +#!/usr/bin/python3 __author__ = 'jjohnson2,bfinley' @@ -49,8 +49,11 @@ def main(args): wiz.add_argument('-p', help='Copy in TFTP contents required for PXE support', action='store_true') wiz.add_argument('-i', help='Interactively prompt for behaviors', action='store_true') wiz.add_argument('-l', help='Set up local management node to allow login from managed nodes', action='store_true') + osip = sp.add_parser('importcheck', help='Check import of an OS image from an ISO image') + osip.add_argument('imagefile', help='File to use for source of importing') osip = sp.add_parser('import', help='Import an OS image from an ISO image') osip.add_argument('imagefile', help='File to use for source of importing') + osip.add_argument('-n', help='Specific a custom distribution name') upb = sp.add_parser( 'updateboot', help='Push profile.yaml of the named profile data into boot assets as appropriate') @@ -63,7 +66,9 @@ def main(args): if cmdset.command == 'list': return oslist() if cmdset.command == 'import': - return osimport(cmdset.imagefile) + return osimport(cmdset.imagefile, custname=cmdset.n) + if cmdset.command == 'importcheck': + return osimport(cmdset.imagefile, checkonly=True) if cmdset.command == 'initialize': return initialize(cmdset) if cmdset.command == 'updateboot': @@ -72,6 +77,12 @@ def main(args): return rebase(cmdset.profile) ap.print_help() +def symlinkp(src, trg): + try: + os.symlink(src, trg) + except Exception as e: + if e.errno != 17: + raise def initialize_genesis(): if not os.path.exists('/opt/confluent/genesis/x86_64/boot/kernel'): @@ -89,30 +100,33 @@ def initialize_genesis(): return retval[1] retcode = 0 try: + util.mkdirp('/var/lib/confluent', 0o755) if hasconfluentuser: + os.chown('/var/lib/confluent', hasconfluentuser.pw_uid, -1) os.setgid(hasconfluentuser.pw_gid) os.setuid(hasconfluentuser.pw_uid) os.umask(0o22) - os.makedirs('/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot', 0o755) - os.makedirs('/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs', 0o755) - os.symlink('/opt/confluent/genesis/x86_64/boot/efi/boot/BOOTX64.EFI', + util.mkdirp('/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot', 0o755) + util.mkdirp('/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs', 0o755) + symlinkp('/opt/confluent/genesis/x86_64/boot/efi/boot/BOOTX64.EFI', '/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot/BOOTX64.EFI') - os.symlink('/opt/confluent/genesis/x86_64/boot/efi/boot/grubx64.efi', + symlinkp('/opt/confluent/genesis/x86_64/boot/efi/boot/grubx64.efi', '/var/lib/confluent/public/os/genesis-x86_64/boot/efi/boot/grubx64.efi') - os.symlink('/opt/confluent/genesis/x86_64/boot/initramfs/distribution', + symlinkp('/opt/confluent/genesis/x86_64/boot/initramfs/distribution', '/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/distribution') - os.symlink('/var/lib/confluent/public/site/initramfs.cpio', + symlinkp('/var/lib/confluent/public/site/initramfs.cpio', '/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/site.cpio') - os.symlink('/opt/confluent/lib/osdeploy/genesis/initramfs/addons.cpio', + symlinkp('/opt/confluent/lib/osdeploy/genesis/initramfs/addons.cpio', '/var/lib/confluent/public/os/genesis-x86_64/boot/initramfs/addons.cpio') - os.symlink('/opt/confluent/genesis/x86_64/boot/kernel', + symlinkp('/opt/confluent/genesis/x86_64/boot/kernel', '/var/lib/confluent/public/os/genesis-x86_64/boot/kernel') - shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/ansible/', - '/var/lib/confluent/public/os/genesis-x86_64/ansible/') - shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/scripts/', - '/var/lib/confluent/public/os/genesis-x86_64/scripts/') - shutil.copyfile('/opt/confluent/lib/osdeploy/genesis/profiles/default/profile.yaml', - '/var/lib/confluent/public/os/genesis-x86_64/profile.yaml') + if not os.path.exists('/var/lib/confluent/public/os/genesis-x86_64/ansible/'): + shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/ansible/', + '/var/lib/confluent/public/os/genesis-x86_64/ansible/') + shutil.copytree('/opt/confluent/lib/osdeploy/genesis/profiles/default/scripts/', + '/var/lib/confluent/public/os/genesis-x86_64/scripts/') + shutil.copyfile('/opt/confluent/lib/osdeploy/genesis/profiles/default/profile.yaml', + '/var/lib/confluent/public/os/genesis-x86_64/profile.yaml') except Exception as e: sys.stderr.write(str(e) + '\n') retcode = 1 @@ -311,14 +325,14 @@ def initialize(cmdset): try: sshutil.initialize_ca() except sshutil.AlreadyExists: - emprint('Skipping generation of SSH CA, already present and would likely be more problematic to regenerate than to reuse (if absolutely sure you want to discard old CA, then delete /etc/confluent/ssh/ca*)') + emprint('Skipping generation of SSH CA, already present and would likely be more problematic to regenerate than to reuse (if absolutely sure you want to discard old CA, then delete /etc/confluent/ssh/ca* and restart confluent)') if cmdset.a: didsomething = True init_confluent_myname() try: sshutil.initialize_root_key(True, True) except sshutil.AlreadyExists: - emprint('Skipping generation of new automation key, already present and regeneration usually causes more problems. (If absolutely certain, delete /etc/confluent/ssh/automation*)') + emprint('Skipping generation of new automation key, already present and regeneration usually causes more problems. (If absolutely certain, delete /etc/confluent/ssh/automation* and restart confluent)') if cmdset.p: install_tftp_content() if cmdset.l: @@ -373,9 +387,14 @@ def initialize(cmdset): for rsp in c.read('/uuid'): uuid = rsp.get('uuid', {}).get('value', None) if uuid: - with open('confluent_uuid', 'w') as uuidout: - uuidout.write(uuid) - uuidout.write('\n') + oum = os.umask(0o11) + try: + with open('confluent_uuid', 'w') as uuidout: + uuidout.write(uuid) + uuidout.write('\n') + os.chmod('confluent_uuid', 0o644) + finally: + os.umask(oum) totar.append('confluent_uuid') topack.append('confluent_uuid') if os.path.exists('ssh'): @@ -403,7 +422,17 @@ def initialize(cmdset): if res: sys.stderr.write('Error occurred while packing site initramfs') sys.exit(1) - os.rename(tmpname, '/var/lib/confluent/public/site/initramfs.cpio') + oum = os.umask(0o22) + try: + os.rename(tmpname, '/var/lib/confluent/public/site/initramfs.cpio') + os.chmod('/var/lib/confluent/public/site/initramfs.cpio', 0o644) + finally: + os.umask(oum) + oum = os.umask(0o22) + try: + os.chmod('/var/lib/confluent/public/site/initramfs.cpio', 0o644) + finally: + os.umask(oum) if cmdset.g: updateboot('genesis-x86_64') if totar: @@ -411,6 +440,11 @@ def initialize(cmdset): tarcmd = ['tar', '-czf', tmptarname] + totar subprocess.check_call(tarcmd) os.rename(tmptarname, '/var/lib/confluent/public/site/initramfs.tgz') + oum = os.umask(0o22) + try: + os.chmod('/var/lib/confluent/public/site/initramfs.tgz', 0o644) + finally: + os.umask(0o22) os.chdir(opath) print('Site initramfs content packed successfully') @@ -421,6 +455,9 @@ def initialize(cmdset): def updateboot(profilename): + if not os.path.exists('/var/lib/confluent/public/site/initramfs.cpio'): + emprint('Must generate site content first (TLS (-t) and/or SSH (-s))') + return 1 c = client.Command() for rsp in c.update('/deployment/profiles/{0}'.format(profilename), {'updateboot': 1}): @@ -464,7 +501,7 @@ def oslist(): print("") -def osimport(imagefile): +def osimport(imagefile, checkonly=False, custname=None): c = client.Command() imagefile = os.path.abspath(imagefile) if c.unixdomain: @@ -475,11 +512,33 @@ def osimport(imagefile): pass importing = False shortname = None - for rsp in c.create('/deployment/importing/', {'filename': imagefile}): + apipath = '/deployment/importing/' + if checkonly: + apipath = '/deployment/fingerprint/' + apiargs = {'filename': imagefile} + if custname: + apiargs['custname'] = custname + for rsp in c.create(apipath, apiargs): if 'target' in rsp: importing = True shortname = rsp['name'] print('Importing from {0} to {1}'.format(imagefile, rsp['target'])) + elif 'targetpath' in rsp: + tpath = rsp.get('targetpath', None) + tname = rsp.get('name', None) + oscat = rsp.get('oscategory', None) + if tpath: + print('Detected target directory: ' + tpath) + if tname: + print('Detected distribution name: ' + tname) + if oscat: + print('Detected OS category: ' + oscat) + for err in rsp.get('errors', []): + sys.stderr.write('Error: ' + err + '\n') + + elif 'error' in rsp: + sys.stderr.write(rsp['error'] + '\n') + sys.exit(rsp.get('errorcode', 1)) else: print(repr(rsp)) try: diff --git a/confluent_server/builddeb b/confluent_server/builddeb index f71bfce4..3a983694 100755 --- a/confluent_server/builddeb +++ b/confluent_server/builddeb @@ -17,7 +17,9 @@ cd /tmp/confluent/$PKGNAME if [ -x ./makeman ]; then ./makeman fi -./makesetup +sed -e 's/~/./' ./makesetup > ./makesetup.deb +chmod +x ./makesetup.deb +./makesetup.deb VERSION=`cat VERSION` cat > setup.cfg << EOF [install] @@ -35,8 +37,10 @@ cd deb_dist/!(*.orig)/ if [ "$OPKGNAME" = "confluent-server" ]; then if grep wheezy /etc/os-release; then sed -i 's/^\(Depends:.*\)/\1, python-confluent-client, python-lxml, python-eficompressor, python-pycryptodomex, python-dateutil, python-pyopenssl, python-msgpack/' debian/control + elif grep jammy /etc/os-release; then + sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi(>=1.5.71), python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil/' debian/control else - sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi, python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd/' debian/control + sed -i 's/^\(Depends:.*\)/\1, confluent-client, python3-lxml, python3-eficompressor, python3-pycryptodome, python3-websocket, python3-msgpack, python3-eventlet, python3-pyparsing, python3-pyghmi(>=1.5.71), python3-paramiko, python3-pysnmp4, python3-libarchive-c, confluent-vtbufferd, python3-netifaces, python3-yaml, python3-dateutil, python3-pyasyncore/' debian/control fi if grep wheezy /etc/os-release; then echo 'confluent_client python-confluent-client' >> debian/pydist-overrides @@ -72,7 +76,7 @@ else rm -rf $PKGNAME.egg-info dist setup.py rm -rf $(find deb_dist -mindepth 1 -maxdepth 1 -type d) if [ ! -z "$1" ]; then - mv deb_dist/* $1/ + mv deb_dist/*.deb $1/ fi fi exit 0 diff --git a/confluent_server/confluent/auth.py b/confluent_server/confluent/auth.py index fd07a133..fb82af24 100644 --- a/confluent_server/confluent/auth.py +++ b/confluent_server/confluent/auth.py @@ -58,7 +58,7 @@ _allowedbyrole = { '/nodes/', '/node*/media/uploads/', '/node*/inventory/firmware/updates/*', - '/node*/suppport/servicedata*', + '/node*/support/servicedata*', '/node*/attributes/expression', '/nodes/*/console/session*', '/nodes/*/shell/sessions*', diff --git a/confluent_server/confluent/certutil.py b/confluent_server/confluent/certutil.py index dffaf85e..4ac67165 100644 --- a/confluent_server/confluent/certutil.py +++ b/confluent_server/confluent/certutil.py @@ -76,7 +76,7 @@ def get_certificate_paths(): continue kploc = check_apache_config(os.path.join(currpath, fname)) - if keypath and kploc[0]: + if keypath and kploc[0] and keypath != kploc[0]: return None, None # Ambiguous... if kploc[0]: keypath, certpath = kploc @@ -95,27 +95,29 @@ def assure_tls_ca(): os.makedirs(os.path.dirname(fname)) except OSError as e: if e.errno != 17: + os.seteuid(ouid) raise + try: + shutil.copy2('/etc/confluent/tls/cacert.pem', fname) + hv, _ = util.run( + ['openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash', '-noout']) + if not isinstance(hv, str): + hv = hv.decode('utf8') + hv = hv.strip() + hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv) + certname = '{0}.pem'.format(collective.get_myname()) + for currname in os.listdir('/var/lib/confluent/public/site/tls/'): + currname = os.path.join('/var/lib/confluent/public/site/tls/', currname) + if currname.endswith('.0'): + try: + realname = os.readlink(currname) + if realname == certname: + os.unlink(currname) + except OSError: + pass + os.symlink(certname, hashname) finally: os.seteuid(ouid) - shutil.copy2('/etc/confluent/tls/cacert.pem', fname) - hv, _ = util.run( - ['openssl', 'x509', '-in', '/etc/confluent/tls/cacert.pem', '-hash', '-noout']) - if not isinstance(hv, str): - hv = hv.decode('utf8') - hv = hv.strip() - hashname = '/var/lib/confluent/public/site/tls/{0}.0'.format(hv) - certname = '{0}.pem'.format(collective.get_myname()) - for currname in os.listdir('/var/lib/confluent/public/site/tls/'): - currname = os.path.join('/var/lib/confluent/public/site/tls/', currname) - if currname.endswith('.0'): - try: - realname = os.readlink(currname) - if realname == certname: - os.unlink(currname) - except OSError: - pass - os.symlink(certname, hashname) def substitute_cfg(setting, key, val, newval, cfgfile, line): if key.strip() == setting: @@ -204,7 +206,7 @@ def create_simple_ca(keyout, certout): finally: os.remove(tmpconfig) -def create_certificate(keyout=None, certout=None): +def create_certificate(keyout=None, certout=None, csrout=None): if not keyout: keyout, certout = get_certificate_paths() if not keyout: @@ -212,9 +214,10 @@ def create_certificate(keyout=None, certout=None): assure_tls_ca() shortname = socket.gethostname().split('.')[0] longname = shortname # socket.getfqdn() - subprocess.check_call( - ['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', - keyout]) + if not csrout: + subprocess.check_call( + ['openssl', 'ecparam', '-name', 'secp384r1', '-genkey', '-out', + keyout]) san = ['IP:{0}'.format(x) for x in get_ip_addresses()] # It is incorrect to put IP addresses as DNS type. However # there exists non-compliant clients that fail with them as IP @@ -227,21 +230,34 @@ def create_certificate(keyout=None, certout=None): os.close(tmphdl) tmphdl, extconfig = tempfile.mkstemp() os.close(tmphdl) - tmphdl, csrout = tempfile.mkstemp() - os.close(tmphdl) + needcsr = False + if csrout is None: + needcsr = True + tmphdl, csrout = tempfile.mkstemp() + os.close(tmphdl) shutil.copy2(sslcfg, tmpconfig) - serialnum = '0x' + ''.join(['{:02x}'.format(x) for x in bytearray(os.urandom(20))]) try: - with open(tmpconfig, 'a') as cfgfile: - cfgfile.write('\n[SAN]\nsubjectAltName={0}'.format(san)) - with open(extconfig, 'a') as cfgfile: - cfgfile.write('\nbasicConstraints=CA:false\nsubjectAltName={0}'.format(san)) - subprocess.check_call([ - 'openssl', 'req', '-new', '-key', keyout, '-out', csrout, '-subj', - '/CN={0}'.format(longname), - '-extensions', 'SAN', '-config', tmpconfig - ]) + if needcsr: + with open(tmpconfig, 'a') as cfgfile: + cfgfile.write('\n[SAN]\nsubjectAltName={0}'.format(san)) + with open(extconfig, 'a') as cfgfile: + cfgfile.write('\nbasicConstraints=CA:false\nsubjectAltName={0}'.format(san)) + subprocess.check_call([ + 'openssl', 'req', '-new', '-key', keyout, '-out', csrout, '-subj', + '/CN={0}'.format(longname), + '-extensions', 'SAN', '-config', tmpconfig + ]) + else: + # when used manually, allow the csr SAN to stand + # may add explicit subj/SAN argument, in which case we would skip copy + with open(tmpconfig, 'a') as cfgfile: + cfgfile.write('\ncopy_extensions=copy\n') + with open(extconfig, 'a') as cfgfile: + cfgfile.write('\nbasicConstraints=CA:false\n') if os.path.exists('/etc/confluent/tls/cakey.pem'): + # simple style CA in effect, make a random serial number and + # hope for the best, and accept inability to backdate the cert + serialnum = '0x' + ''.join(['{:02x}'.format(x) for x in bytearray(os.urandom(20))]) subprocess.check_call([ 'openssl', 'x509', '-req', '-in', csrout, '-CA', '/etc/confluent/tls/cacert.pem', @@ -250,20 +266,40 @@ def create_certificate(keyout=None, certout=None): '-extfile', extconfig ]) else: + # we moved to a 'proper' CA, mainly for access to backdating + # start of certs for finicky system clocks + # this also provides a harder guarantee of serial uniqueness, but + # not of practical consequence (160 bit random value is as good as + # guaranteed unique) + # downside is certificate generation is serialized + cacfgfile = '/etc/confluent/tls/ca/openssl.cfg' + if needcsr: + tmphdl, tmpcafile = tempfile.mkstemp() + shutil.copy2(cacfgfile, tmpcafile) + os.close(tmphdl) + cacfgfile = tmpcafile + # with realcalock: # if we put it in server, we must lock it subprocess.check_call([ - 'openssl', 'ca', '-config', '/etc/confluent/tls/ca/openssl.cfg', + 'openssl', 'ca', '-config', cacfgfile, '-in', csrout, '-out', certout, '-batch', '-notext', '-startdate', '19700101010101Z', '-enddate', '21000101010101Z', '-extfile', extconfig ]) finally: os.remove(tmpconfig) - os.remove(csrout) - os.remove(extconfig) + if needcsr: + os.remove(csrout) + print(extconfig) # os.remove(extconfig) if __name__ == '__main__': + import sys outdir = os.getcwd() keyout = os.path.join(outdir, 'key.pem') - certout = os.path.join(outdir, 'cert.pem') - create_certificate(keyout, certout) + certout = os.path.join(outdir, sys.argv[2] + 'cert.pem') + csrout = None + try: + csrout = sys.argv[1] + except IndexError: + csrout = None + create_certificate(keyout, certout, csrout) diff --git a/confluent_server/confluent/config/attributes.py b/confluent_server/confluent/config/attributes.py index 101ee03d..f926c962 100644 --- a/confluent_server/confluent/config/attributes.py +++ b/confluent_server/confluent/config/attributes.py @@ -469,9 +469,13 @@ node = { 'net.interface_names': { 'description': 'Interface name or comma delimited list of names to match for this interface. It is generally recommended ' 'to leave this blank unless needing to set up interfaces that are not on a common subnet with a confluent server, ' - 'as confluent servers provide autodetection for matching the correct network definition to an interface.' + 'as confluent servers provide autodetection for matching the correct network definition to an interface. ' 'This would be the default name per the deployed OS and can be a comma delimited list to denote members of ' - 'a team' + 'a team or a single interface for VLAN/PKEY connections.' + }, + 'net.vlan_id': { + 'description': 'Ethernet VLAN or InfiniBand PKEY to use for this connection. ' + 'Specify the parent device using net.interface_names.' }, 'net.ipv4_address': { 'description': 'When configuring static, use this address. If ' diff --git a/confluent_server/confluent/config/configmanager.py b/confluent_server/confluent/config/configmanager.py index 9419e7fe..7702b97d 100644 --- a/confluent_server/confluent/config/configmanager.py +++ b/confluent_server/confluent/config/configmanager.py @@ -252,10 +252,12 @@ def _rpc_master_rename_nodegroups(tenant, renamemap): def _rpc_master_clear_node_attributes(tenant, nodes, attributes): - ConfigManager(tenant).clear_node_attributes(nodes, attributes) + warnings = [] + ConfigManager(tenant).clear_node_attributes(nodes, attributes, warnings) + return warnings -def _rpc_clear_node_attributes(tenant, nodes, attributes): +def _rpc_clear_node_attributes(tenant, nodes, attributes): # master has to do the warnings ConfigManager(tenant)._true_clear_node_attributes(nodes, attributes) @@ -348,9 +350,9 @@ def exec_on_leader(function, *args): rpclen = len(rpcpayload) cfgleader.sendall(struct.pack('!Q', rpclen)) cfgleader.sendall(rpcpayload) - _pendingchangesets[xid].wait() + retv = _pendingchangesets[xid].wait() del _pendingchangesets[xid] - return + return retv def exec_on_followers(fnname, *args): @@ -714,8 +716,9 @@ def relay_slaved_requests(name, listener): exc = None if not (rpc['function'].startswith('_rpc_') or rpc['function'].endswith('_collective_member')): raise Exception('Unsupported function {0} called'.format(rpc['function'])) + retv = None try: - globals()[rpc['function']](*rpc['args']) + retv = globals()[rpc['function']](*rpc['args']) except ValueError as ve: exc = ['ValueError', str(ve)] except Exception as e: @@ -723,7 +726,7 @@ def relay_slaved_requests(name, listener): exc = ['Exception', str(e)] if 'xid' in rpc: res = _push_rpc(listener, msgpack.packb({'xid': rpc['xid'], - 'exc': exc}, use_bin_type=False)) + 'exc': exc, 'ret': retv}, use_bin_type=False)) if not res: break try: @@ -929,7 +932,7 @@ def follow_channel(channel): exc = Exception(excstr) _pendingchangesets[rpc['xid']].send_exception(exc) else: - _pendingchangesets[rpc['xid']].send() + _pendingchangesets[rpc['xid']].send(rpc.get('ret', None)) if 'quorum' in rpc: _hasquorum = rpc['quorum'] res = _push_rpc(channel, b'') # use null as ACK @@ -1089,6 +1092,11 @@ class _ExpressionFormat(string.Formatter): self._nodename = nodename self._numbers = None + def _vformat(self, format_string, args, kwargs, used_args, recursion_depth, + auto_arg_index=False): + return super()._vformat(format_string, args, kwargs, used_args, + recursion_depth, auto_arg_index) + def get_field(self, field_name, args, kwargs): return field_name, field_name @@ -1895,7 +1903,7 @@ class ConfigManager(object): def add_group_attributes(self, attribmap): self.set_group_attributes(attribmap, autocreate=True) - def set_group_attributes(self, attribmap, autocreate=False): + def set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): for group in attribmap: curr = attribmap[group] for attrib in curr: @@ -1916,11 +1924,11 @@ class ConfigManager(object): if cfgstreams: exec_on_followers('_rpc_set_group_attributes', self.tenant, attribmap, autocreate) - self._true_set_group_attributes(attribmap, autocreate) + self._true_set_group_attributes(attribmap, autocreate, merge=merge, keydata=keydata, skipped=skipped) - def _true_set_group_attributes(self, attribmap, autocreate=False): + def _true_set_group_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): changeset = {} - for group in attribmap: + for group in list(attribmap): if group == '': raise ValueError('"{0}" is not a valid group name'.format( group)) @@ -1933,6 +1941,11 @@ class ConfigManager(object): group)) if not autocreate and group not in self._cfgstore['nodegroups']: raise ValueError("{0} group does not exist".format(group)) + if merge == 'skip' and group in self._cfgstore['nodegroups']: + if skipped is not None: + skipped.append(group) + del attribmap[group] + continue for attr in list(attribmap[group]): # first do a pass to normalize out any aliased attribute names if attr in _attraliases: @@ -2007,6 +2020,9 @@ class ConfigManager(object): newdict = {'value': attribmap[group][attr]} else: newdict = attribmap[group][attr] + if keydata and attr.startswith('secret.') and 'cryptvalue' in newdict: + newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey']) + del newdict['cryptvalue'] if 'value' in newdict and attr.startswith("secret."): newdict['cryptvalue'] = crypt_value(newdict['value']) del newdict['value'] @@ -2197,16 +2213,19 @@ class ConfigManager(object): self._notif_attribwatchers(changeset) self._bg_sync_to_file() - def clear_node_attributes(self, nodes, attributes): + def clear_node_attributes(self, nodes, attributes, warnings=None): if cfgleader: - return exec_on_leader('_rpc_master_clear_node_attributes', + mywarnings = exec_on_leader('_rpc_master_clear_node_attributes', self.tenant, nodes, attributes) + if mywarnings and warnings is not None: + warnings.extend(mywarnings) + return if cfgstreams: exec_on_followers('_rpc_clear_node_attributes', self.tenant, nodes, attributes) - self._true_clear_node_attributes(nodes, attributes) + self._true_clear_node_attributes(nodes, attributes, warnings) - def _true_clear_node_attributes(self, nodes, attributes): + def _true_clear_node_attributes(self, nodes, attributes, warnings=None): # accumulate all changes into a changeset and push in one go changeset = {} realattributes = [] @@ -2229,8 +2248,17 @@ class ConfigManager(object): # delete it and check for inheritence to backfil data del nodek[attrib] self._do_inheritance(nodek, attrib, node, changeset) + if warnings is not None: + if attrib in nodek: + warnings.append('The attribute "{}" was defined specifically for the node and clearing now has a value inherited from the group "{}"'.format(attrib, nodek[attrib]['inheritedfrom'])) _addchange(changeset, node, attrib) _mark_dirtykey('nodes', node, self.tenant) + elif attrib in nodek: + if warnings is not None: + warnings.append('The attribute "{0}" is inherited from group "{1}", leaving the inherited value alone (use "{0}=" with no value to explicitly blank the value if desired)'.format(attrib, nodek[attrib]['inheritedfrom'])) + else: + if warnings is not None: + warnings.append('Attribute "{}" is either already cleared, or does not match a defined attribute (if referencing an attribute group, try a wildcard)'.format(attrib)) if ('_expressionkeys' in nodek and attrib in nodek['_expressionkeys']): recalcexpressions = True @@ -2329,7 +2357,7 @@ class ConfigManager(object): - def set_node_attributes(self, attribmap, autocreate=False): + def set_node_attributes(self, attribmap, autocreate=False, merge="replace", keydata=None, skipped=None): for node in attribmap: curr = attribmap[node] for attrib in curr: @@ -2350,9 +2378,9 @@ class ConfigManager(object): if cfgstreams: exec_on_followers('_rpc_set_node_attributes', self.tenant, attribmap, autocreate) - self._true_set_node_attributes(attribmap, autocreate) + self._true_set_node_attributes(attribmap, autocreate, merge, keydata, skipped) - def _true_set_node_attributes(self, attribmap, autocreate): + def _true_set_node_attributes(self, attribmap, autocreate, merge="replace", keydata=None, skipped=None): # TODO(jbjohnso): multi mgr support, here if we have peers, # pickle the arguments and fire them off in eventlet # flows to peers, all should have the same result @@ -2360,7 +2388,7 @@ class ConfigManager(object): changeset = {} # first do a sanity check of the input upfront # this mitigates risk of arguments being partially applied - for node in attribmap: + for node in list(attribmap): node = confluent.util.stringify(node) if node == '': raise ValueError('"{0}" is not a valid node name'.format(node)) @@ -2373,6 +2401,11 @@ class ConfigManager(object): '"{0}" is not a valid node name'.format(node)) if autocreate is False and node not in self._cfgstore['nodes']: raise ValueError("node {0} does not exist".format(node)) + if merge == "skip" and node in self._cfgstore['nodes']: + del attribmap[node] + if skipped is not None: + skipped.append(node) + continue if 'groups' not in attribmap[node] and node not in self._cfgstore['nodes']: attribmap[node]['groups'] = [] for attrname in list(attribmap[node]): @@ -2443,6 +2476,9 @@ class ConfigManager(object): # add check here, skip None attributes if newdict is None: continue + if keydata and attrname.startswith('secret.') and 'cryptvalue' in newdict: + newdict['value'] = decrypt_value(newdict['cryptvalue'], keydata['cryptkey'], keydata['integritykey']) + del newdict['cryptvalue'] if 'value' in newdict and attrname.startswith("secret."): newdict['cryptvalue'] = crypt_value(newdict['value']) del newdict['value'] @@ -2483,19 +2519,21 @@ class ConfigManager(object): self._bg_sync_to_file() #TODO: wait for synchronization to suceed/fail??) - def _load_from_json(self, jsondata, sync=True): + def _load_from_json(self, jsondata, sync=True, merge=False, keydata=None, skipped=None): self.inrestore = True try: - self._load_from_json_backend(jsondata, sync=True) + self._load_from_json_backend(jsondata, sync=True, merge=merge, keydata=keydata, skipped=skipped) finally: self.inrestore = False - def _load_from_json_backend(self, jsondata, sync=True): + def _load_from_json_backend(self, jsondata, sync=True, merge=False, keydata=None, skipped=None): """Load fresh configuration data from jsondata :param jsondata: String of jsondata :return: """ + if not skipped: + skipped = {'nodes': None, 'nodegroups': None} dumpdata = json.loads(jsondata) tmpconfig = {} for confarea in _config_areas: @@ -2543,20 +2581,27 @@ class ConfigManager(object): pass # Now we have to iterate through each fixed up element, using the # set attribute to flesh out inheritence and expressions - _cfgstore['main']['idmap'] = {} + if (not merge) or _cfgstore.get('main', {}).get('idmap', None) is None: + _cfgstore['main']['idmap'] = {} + attribmerge = merge if merge else "replace" for confarea in _config_areas: - self._cfgstore[confarea] = {} + if not merge or confarea not in self._cfgstore: + self._cfgstore[confarea] = {} if confarea not in tmpconfig: continue if confarea == 'nodes': - self.set_node_attributes(tmpconfig[confarea], True) + self.set_node_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodes']) elif confarea == 'nodegroups': - self.set_group_attributes(tmpconfig[confarea], True) + self.set_group_attributes(tmpconfig[confarea], True, merge=attribmerge, keydata=keydata, skipped=skipped['nodegroups']) elif confarea == 'usergroups': + if merge: + continue for usergroup in tmpconfig[confarea]: role = tmpconfig[confarea][usergroup].get('role', 'Administrator') self.create_usergroup(usergroup, role=role) elif confarea == 'users': + if merge: + continue for user in tmpconfig[confarea]: ucfg = tmpconfig[confarea][user] uid = ucfg.get('id', None) @@ -2627,7 +2672,7 @@ class ConfigManager(object): dumpdata[confarea][element][attribute]['cryptvalue'] = '!'.join(cryptval) elif isinstance(dumpdata[confarea][element][attribute], set): dumpdata[confarea][element][attribute] = \ - list(dumpdata[confarea][element][attribute]) + confluent.util.natural_sort(list(dumpdata[confarea][element][attribute])) return json.dumps( dumpdata, sort_keys=True, indent=4, separators=(',', ': ')) @@ -2856,7 +2901,7 @@ def _restore_keys(jsond, password, newpassword=None, sync=True): newpassword = keyfile.read() set_global('master_privacy_key', _format_key(cryptkey, password=newpassword), sync) - if integritykey: + if integritykey: set_global('master_integrity_key', _format_key(integritykey, password=newpassword), sync) _masterkey = cryptkey @@ -2891,35 +2936,46 @@ def _dump_keys(password, dojson=True): return keydata -def restore_db_from_directory(location, password): +def restore_db_from_directory(location, password, merge=False, skipped=None): + kdd = None try: with open(os.path.join(location, 'keys.json'), 'r') as cfgfile: keydata = cfgfile.read() - json.loads(keydata) - _restore_keys(keydata, password) + kdd = json.loads(keydata) + if merge: + if 'cryptkey' in kdd: + kdd['cryptkey'] = _parse_key(kdd['cryptkey'], password) + if 'integritykey' in kdd: + kdd['integritykey'] = _parse_key(kdd['integritykey'], password) + else: + kdd['integritykey'] = None # GCM + else: + kdd = None + _restore_keys(keydata, password) except IOError as e: if e.errno == 2: raise Exception("Cannot restore without keys, this may be a " "redacted dump") - try: - moreglobals = json.load(open(os.path.join(location, 'globals.json'))) - for globvar in moreglobals: - set_global(globvar, moreglobals[globvar]) - except IOError as e: - if e.errno != 2: - raise - try: - collective = json.load(open(os.path.join(location, 'collective.json'))) - _cfgstore['collective'] = {} - for coll in collective: - add_collective_member(coll, collective[coll]['address'], - collective[coll]['fingerprint']) - except IOError as e: - if e.errno != 2: - raise + if not merge: + try: + moreglobals = json.load(open(os.path.join(location, 'globals.json'))) + for globvar in moreglobals: + set_global(globvar, moreglobals[globvar]) + except IOError as e: + if e.errno != 2: + raise + try: + collective = json.load(open(os.path.join(location, 'collective.json'))) + _cfgstore['collective'] = {} + for coll in collective: + add_collective_member(coll, collective[coll]['address'], + collective[coll]['fingerprint']) + except IOError as e: + if e.errno != 2: + raise with open(os.path.join(location, 'main.json'), 'r') as cfgfile: cfgdata = cfgfile.read() - ConfigManager(tenant=None)._load_from_json(cfgdata) + ConfigManager(tenant=None)._load_from_json(cfgdata, merge=merge, keydata=kdd, skipped=skipped) ConfigManager.wait_for_sync(True) diff --git a/confluent_server/confluent/consoleserver.py b/confluent_server/confluent/consoleserver.py index 37274792..aa05b9b7 100644 --- a/confluent_server/confluent/consoleserver.py +++ b/confluent_server/confluent/consoleserver.py @@ -49,7 +49,6 @@ _handled_consoles = {} _tracelog = None _bufferdaemon = None -_bufferlock = None try: range = xrange @@ -62,39 +61,38 @@ def chunk_output(output, n): yield output[i:i + n] def get_buffer_output(nodename): - out = _bufferdaemon.stdin - instream = _bufferdaemon.stdout + out = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + out.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1) + out.connect("\x00confluent-vtbuffer") if not isinstance(nodename, bytes): nodename = nodename.encode('utf8') outdata = bytearray() - with _bufferlock: - out.write(struct.pack('I', len(nodename))) - out.write(nodename) - out.flush() - select.select((instream,), (), (), 30) - while not outdata or outdata[-1]: - try: - chunk = os.read(instream.fileno(), 128) - except IOError: - chunk = None - if chunk: - outdata.extend(chunk) - else: - select.select((instream,), (), (), 0) - return bytes(outdata[:-1]) + out.send(struct.pack('I', len(nodename))) + out.send(nodename) + select.select((out,), (), (), 30) + while not outdata or outdata[-1]: + try: + chunk = os.read(out.fileno(), 128) + except IOError: + chunk = None + if chunk: + outdata.extend(chunk) + else: + select.select((out,), (), (), 0) + return bytes(outdata[:-1]) def send_output(nodename, output): if not isinstance(nodename, bytes): nodename = nodename.encode('utf8') - with _bufferlock: - _bufferdaemon.stdin.write(struct.pack('I', len(nodename) | (1 << 29))) - _bufferdaemon.stdin.write(nodename) - _bufferdaemon.stdin.flush() - for chunk in chunk_output(output, 8192): - _bufferdaemon.stdin.write(struct.pack('I', len(chunk) | (2 << 29))) - _bufferdaemon.stdin.write(chunk) - _bufferdaemon.stdin.flush() + out = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + out.setsockopt(socket.SOL_SOCKET, socket.SO_PASSCRED, 1) + out.connect("\x00confluent-vtbuffer") + out.send(struct.pack('I', len(nodename) | (1 << 29))) + out.send(nodename) + for chunk in chunk_output(output, 8192): + out.send(struct.pack('I', len(chunk) | (2 << 29))) + out.send(chunk) def _utf8_normalize(data, decoder): # first we give the stateful decoder a crack at the byte stream, @@ -175,6 +173,9 @@ class ConsoleHandler(object): self.connectstate = 'connecting' eventlet.spawn(self._connect) + def resize(self, width, height): + return None + def _get_retry_time(self): clustsize = len(self.cfgmgr._cfgstore['nodes']) self._retrytime = self._retrytime * 2 + 1 @@ -600,15 +601,10 @@ def _start_tenant_sessions(cfm): def initialize(): global _tracelog global _bufferdaemon - global _bufferlock - _bufferlock = semaphore.Semaphore() _tracelog = log.Logger('trace') _bufferdaemon = subprocess.Popen( - ['/opt/confluent/bin/vtbufferd'], bufsize=0, stdin=subprocess.PIPE, - stdout=subprocess.PIPE) - fl = fcntl.fcntl(_bufferdaemon.stdout.fileno(), fcntl.F_GETFL) - fcntl.fcntl(_bufferdaemon.stdout.fileno(), - fcntl.F_SETFL, fl | os.O_NONBLOCK) + ['/opt/confluent/bin/vtbufferd', 'confluent-vtbuffer'], bufsize=0, stdin=subprocess.DEVNULL, + stdout=subprocess.DEVNULL) def start_console_sessions(): configmodule.hook_new_configmanagers(_start_tenant_sessions) @@ -622,6 +618,8 @@ def connect_node(node, configmanager, username=None, direct=True, width=80, myname = collective.get_myname() if myc and myc != collective.get_myname() and direct: minfo = configmodule.get_collective_member(myc) + if not minfo: + raise Exception('Unable to get collective member for {}'.format(node)) return ProxyConsole(node, minfo, myname, configmanager, username, width, height) consk = (node, configmanager.tenant) diff --git a/confluent_server/confluent/core.py b/confluent_server/confluent/core.py index ec62c57d..6c55f417 100644 --- a/confluent_server/confluent/core.py +++ b/confluent_server/confluent/core.py @@ -44,6 +44,7 @@ import confluent.discovery.core as disco import confluent.interface.console as console import confluent.exceptions as exc import confluent.messages as msg +import confluent.mountmanager as mountmanager import confluent.networking.macmap as macmap import confluent.noderange as noderange import confluent.osimage as osimage @@ -73,7 +74,7 @@ import uuid import yaml pluginmap = {} -dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol', b'geist', u'geist', b'deltapdu', u'deltapdu', b'eatonpdu', u'eatonpdu', b'affluent', u'affluent', b'cnos', u'cnos') +dispatch_plugins = (b'ipmi', u'ipmi', b'redfish', u'redfish', b'tsmsol', u'tsmsol', b'geist', u'geist', b'deltapdu', u'deltapdu', b'eatonpdu', u'eatonpdu', b'affluent', u'affluent', b'cnos', u'cnos', b'enos', u'enos') PluginCollection = plugin.PluginCollection @@ -161,16 +162,25 @@ def _merge_dict(original, custom): rootcollections = ['deployment/', 'discovery/', 'events/', 'networking/', - 'noderange/', 'nodes/', 'nodegroups/', 'storage/', 'usergroups/' , + 'noderange/', 'nodes/', 'nodegroups/', 'storage/', 'usergroups/', 'users/', 'uuid', 'version', 'staging/'] + class PluginRoute(object): def __init__(self, routedict): self.routeinfo = routedict +def handle_storage(configmanager, inputdata, pathcomponents, operation): + if len(pathcomponents) == 1: + yield msg.ChildCollection('remote/') + return + if pathcomponents[1] == 'remote': + for rsp in mountmanager.handle_request(configmanager, inputdata, pathcomponents[2:], operation): + yield rsp + def handle_deployment(configmanager, inputdata, pathcomponents, operation): if len(pathcomponents) == 1: @@ -193,8 +203,19 @@ def handle_deployment(configmanager, inputdata, pathcomponents, for prof in osimage.list_profiles(): yield msg.ChildCollection(prof + '/') return - if len(pathcomponents) == 3: - profname = pathcomponents[-1] + if len(pathcomponents) >= 3: + profname = pathcomponents[2] + if len(pathcomponents) == 4: + if operation == 'retrieve': + if len(pathcomponents) == 4 and pathcomponents[-1] == 'info': + with open('/var/lib/confluent/public/os/{}/profile.yaml'.format(profname)) as profyaml: + profinfo = yaml.safe_load(profyaml) + profinfo['name'] = profname + yield msg.KeyValueData(profinfo) + return + elif len(pathcomponents) == 3: + if operation == 'retrieve': + yield msg.ChildCollection('info') if operation == 'update': if 'updateboot' in inputdata: osimage.update_boot(profname) @@ -210,6 +231,17 @@ def handle_deployment(configmanager, inputdata, pathcomponents, for cust in customized: yield msg.KeyValueData({'customized': cust}) return + if pathcomponents[1] == 'fingerprint': + if operation == 'create': + importer = osimage.MediaImporter(inputdata['filename'], configmanager, checkonly=True) + medinfo = { + 'targetpath': importer.targpath, + 'name': importer.osname, + 'oscategory': importer.oscategory, + 'errors': importer.errors, + } + yield msg.KeyValueData(medinfo) + return if pathcomponents[1] == 'importing': if len(pathcomponents) == 2 or not pathcomponents[-1]: if operation == 'retrieve': @@ -217,8 +249,12 @@ def handle_deployment(configmanager, inputdata, pathcomponents, yield imp return elif operation == 'create': - importer = osimage.MediaImporter(inputdata['filename'], - configmanager) + if inputdata.get('custname', None): + importer = osimage.MediaImporter(inputdata['filename'], + configmanager, inputdata['custname']) + else: + importer = osimage.MediaImporter(inputdata['filename'], + configmanager) yield msg.KeyValueData({'target': importer.targpath, 'name': importer.importkey}) return @@ -324,6 +360,10 @@ def _init_core(): 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', }), + 'extra_advanced': PluginRoute({ + 'pluginattrs': ['hardwaremanagement.method'], + 'default': 'ipmi', + }), }, }, 'storage': { @@ -392,6 +432,7 @@ def _init_core(): 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', }), + 'ikvm': PluginRoute({'handler': 'ikvm'}), }, 'description': PluginRoute({ 'pluginattrs': ['hardwaremanagement.method'], @@ -483,6 +524,20 @@ def _init_core(): 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', }), + 'normalized': { + 'inlet_temp': PluginRoute({ + 'pluginattrs': ['hardwaremanagement.method'], + 'default': 'ipmi', + }), + 'average_cpu_temp': PluginRoute({ + 'pluginattrs': ['hardwaremanagement.method'], + 'default': 'ipmi', + }), + 'total_power': PluginRoute({ + 'pluginattrs': ['hardwaremanagement.method'], + 'default': 'ipmi', + }), + }, 'energy': PluginCollection({ 'pluginattrs': ['hardwaremanagement.method'], 'default': 'ipmi', @@ -1314,6 +1369,9 @@ def handle_path(path, operation, configmanager, inputdata=None, autostrip=True): elif pathcomponents[0] == 'deployment': return handle_deployment(configmanager, inputdata, pathcomponents, operation) + elif pathcomponents[0] == 'storage': + return handle_storage(configmanager, inputdata, pathcomponents, + operation) elif pathcomponents[0] == 'nodegroups': return handle_nodegroup_request(configmanager, inputdata, pathcomponents, diff --git a/confluent_server/confluent/credserver.py b/confluent_server/confluent/credserver.py index c569bc4d..390179f8 100644 --- a/confluent_server/confluent/credserver.py +++ b/confluent_server/confluent/credserver.py @@ -127,14 +127,15 @@ class CredServer(object): if hmacval != hmac.new(hmackey, etok, hashlib.sha256).digest(): client.close() return - cfgupdate = {nodename: {'crypted.selfapikey': {'hashvalue': echotoken}, 'deployment.sealedapikey': '', 'deployment.apiarmed': ''}} - if hmackey and apiarmed != 'continuous': - self.cfm.clear_node_attributes([nodename], ['secret.selfapiarmtoken']) - if apiarmed == 'continuous': - del cfgupdate[nodename]['deployment.apiarmed'] + cfgupdate = {nodename: {'crypted.selfapikey': {'hashvalue': echotoken}}} self.cfm.set_node_attributes(cfgupdate) client.recv(2) # drain end of message client.send(b'\x05\x00') # report success + if hmackey and apiarmed != 'continuous': + self.cfm.clear_node_attributes([nodename], ['secret.selfapiarmtoken']) + if apiarmed != 'continuous': + tokclear = {nodename: {'deployment.sealedapikey': '', 'deployment.apiarmed': ''}} + self.cfm.set_node_attributes(tokclear) finally: client.close() diff --git a/confluent_server/confluent/discovery/core.py b/confluent_server/confluent/discovery/core.py index dfb50b9f..fd302f8b 100644 --- a/confluent_server/confluent/discovery/core.py +++ b/confluent_server/confluent/discovery/core.py @@ -74,6 +74,8 @@ import confluent.discovery.handlers.tsm as tsm import confluent.discovery.handlers.pxe as pxeh import confluent.discovery.handlers.smm as smm import confluent.discovery.handlers.xcc as xcc +import confluent.discovery.handlers.xcc3 as xcc3 +import confluent.discovery.handlers.megarac as megarac import confluent.exceptions as exc import confluent.log as log import confluent.messages as msg @@ -113,6 +115,8 @@ nodehandlers = { 'service:lenovo-smm': smm, 'service:lenovo-smm2': smm, 'lenovo-xcc': xcc, + 'lenovo-xcc3': xcc3, + 'megarac-bmc': megarac, 'service:management-hardware.IBM:integrated-management-module2': imm, 'pxe-client': pxeh, 'onie-switch': None, @@ -132,6 +136,8 @@ servicenames = { 'service:lenovo-smm2': 'lenovo-smm2', 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', + 'lenovo-xcc3': 'lenovo-xcc3', + 'megarac-bmc': 'megarac-bmc', #'openbmc': 'openbmc', 'service:management-hardware.IBM:integrated-management-module2': 'lenovo-imm2', 'service:io-device.Lenovo:management-module': 'lenovo-switch', @@ -147,6 +153,8 @@ servicebyname = { 'lenovo-smm2': 'service:lenovo-smm2', 'affluent-switch': 'affluent-switch', 'lenovo-xcc': 'lenovo-xcc', + 'lenovo-xcc3': 'lenovo-xcc3', + 'megarac-bmc': 'megarac-bmc', 'lenovo-imm2': 'service:management-hardware.IBM:integrated-management-module2', 'lenovo-switch': 'service:io-device.Lenovo:management-module', 'thinkagile-storage': 'service:thinkagile-storagebmc', @@ -453,7 +461,7 @@ def iterate_addrs(addrs, countonly=False): yield 1 return yield addrs - + def _parameterize_path(pathcomponents): listrequested = False childcoll = True @@ -542,7 +550,7 @@ def handle_api_request(configmanager, inputdata, operation, pathcomponents): if len(pathcomponents) > 2: raise Exception('TODO') currsubs = get_subscriptions() - return [msg.ChildCollection(x) for x in currsubs] + return [msg.ChildCollection(x) for x in currsubs] elif operation == 'retrieve': return handle_read_api_request(pathcomponents) elif (operation in ('update', 'create') and @@ -1703,3 +1711,4 @@ if __name__ == '__main__': start_detection() while True: eventlet.sleep(30) + diff --git a/confluent_server/confluent/discovery/handlers/megarac.py b/confluent_server/confluent/discovery/handlers/megarac.py new file mode 100644 index 00000000..d7d8786a --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/megarac.py @@ -0,0 +1,51 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.redfishbmc as redfishbmc +import eventlet.support.greendns + + +getaddrinfo = eventlet.support.greendns.getaddrinfo + + +class NodeHandler(redfishbmc.NodeHandler): + + def get_firmware_default_account_info(self): + return ('admin', 'admin') + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]]} + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) + diff --git a/confluent_server/confluent/discovery/handlers/redfishbmc.py b/confluent_server/confluent/discovery/handlers/redfishbmc.py new file mode 100644 index 00000000..7cf3f3d1 --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/redfishbmc.py @@ -0,0 +1,321 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.generic as generic +import confluent.exceptions as exc +import confluent.netutil as netutil +import confluent.util as util +import eventlet +import eventlet.support.greendns +import json +try: + from urllib import urlencode +except ImportError: + from urllib.parse import urlencode + +getaddrinfo = eventlet.support.greendns.getaddrinfo + +webclient = eventlet.import_patched('pyghmi.util.webclient') + +def get_host_interface_urls(wc, mginfo): + returls = [] + hifurl = mginfo.get('HostInterfaces', {}).get('@odata.id', None) + if not hifurl: + return None + hifinfo = wc.grab_json_response(hifurl) + hifurls = hifinfo.get('Members', []) + for hifurl in hifurls: + hifurl = hifurl['@odata.id'] + hifinfo = wc.grab_json_response(hifurl) + acturl = hifinfo.get('ManagerEthernetInterface', {}).get('@odata.id', None) + if acturl: + returls.append(acturl) + return returls + + +class NodeHandler(generic.NodeHandler): + devname = 'BMC' + + def __init__(self, info, configmanager): + self.trieddefault = None + self.targuser = None + self.curruser = None + self.currpass = None + self.targpass = None + self.nodename = None + self.csrftok = None + self.channel = None + self.atdefault = True + self._srvroot = None + self._mgrinfo = None + super(NodeHandler, self).__init__(info, configmanager) + + def srvroot(self, wc): + if not self._srvroot: + srvroot, status = wc.grab_json_response_with_status('/redfish/v1/') + if status == 200: + self._srvroot = srvroot + return self._srvroot + + def mgrinfo(self, wc): + if not self._mgrinfo: + mgrs = self.srvroot(wc)['Managers']['@odata.id'] + rsp = wc.grab_json_response(mgrs) + if len(rsp['Members']) != 1: + raise Exception("Can not handle multiple Managers") + mgrurl = rsp['Members'][0]['@odata.id'] + self._mgrinfo = wc.grab_json_response(mgrurl) + return self._mgrinfo + + + def get_firmware_default_account_info(self): + raise Exception('This must be subclassed') + + def scan(self): + c = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + i = c.grab_json_response('/redfish/v1/') + uuid = i.get('UUID', None) + if uuid: + self.info['uuid'] = uuid.lower() + + def validate_cert(self, certificate): + # broadly speaking, merely checks consistency moment to moment, + # but if https_cert gets stricter, this check means something + fprint = util.get_fingerprint(self.https_cert) + return util.cert_matches(fprint, certificate) + + def enable_ipmi(self, wc): + npu = self.mgrinfo(wc).get( + 'NetworkProtocol', {}).get('@odata.id', None) + if not npu: + raise Exception('Cannot enable IPMI, no NetworkProtocol on BMC') + npi = wc.grab_json_response(npu) + if not npi.get('IPMI', {}).get('ProtocolEnabled'): + wc.set_header('If-Match', '*') + wc.grab_json_response_with_status( + npu, {'IPMI': {'ProtocolEnabled': True}}, method='PATCH') + acctinfo = wc.grab_json_response_with_status( + self.target_account_url(wc)) + acctinfo = acctinfo[0] + actypes = acctinfo['AccountTypes'] + candidates = acctinfo['AccountTypes@Redfish.AllowableValues'] + if 'IPMI' not in actypes and 'IPMI' in candidates: + actypes.append('IPMI') + acctupd = { + 'AccountTypes': actypes, + 'Password': self.currpass, + } + rsp = wc.grab_json_response_with_status( + self.target_account_url(wc), acctupd, method='PATCH') + + def _get_wc(self): + defuser, defpass = self.get_firmware_default_account_info() + wc = webclient.SecureHTTPConnection(self.ipaddr, 443, verifycallback=self.validate_cert) + wc.set_basic_credentials(defuser, defpass) + wc.set_header('Content-Type', 'application/json') + wc.set_header('Accept', 'application/json') + authmode = 0 + if not self.trieddefault: + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status == 403: + self.trieddefault = True + chgurl = None + rsp = json.loads(rsp) + currerr = rsp.get('error', {}) + ecode = currerr.get('code', None) + if ecode.endswith('PasswordChangeRequired'): + for einfo in currerr.get('@Message.ExtendedInfo', []): + if einfo.get('MessageId', None).endswith('PasswordChangeRequired'): + for msgarg in einfo.get('MessageArgs'): + chgurl = msgarg + break + if chgurl: + if self.targpass == defpass: + raise Exception("Must specify a non-default password to onboard this BMC") + wc.set_header('If-Match', '*') + cpr = wc.grab_json_response_with_status(chgurl, {'Password': self.targpass}, method='PATCH') + if cpr[1] >= 200 and cpr[1] < 300: + self.curruser = defuser + self.currpass = self.targpass + wc.set_basic_credentials(self.curruser, self.currpass) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + tries = 10 + while status >= 300 and tries: + eventlet.sleep(1) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + return wc + + if status > 400: + self.trieddefault = True + if status == 401: + wc.set_basic_credentials(defuser, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status == 200: # Default user still, but targpass + self.currpass = self.targpass + self.curruser = defuser + return wc + elif self.targuser != defuser: + wc.set_basic_credentials(self.targuser, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + raise Exception("Target BMC does not recognize firmware default credentials nor the confluent stored credential") + else: + self.curruser = defuser + self.currpass = defpass + return wc + if self.curruser: + wc.set_basic_credentials(self.curruser, self.currpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + return None + return wc + wc.set_basic_credentials(self.targuser, self.targpass) + rsp, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + if status != 200: + return None + self.curruser = self.targuser + self.currpass = self.targpass + return wc + + def target_account_url(self, wc): + asrv = self.srvroot(wc).get('AccountService', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(asrv) + accts = rsp.get('Accounts', {}).get('@odata.id') + rsp, status = wc.grab_json_response_with_status(accts) + accts = rsp.get('Members', []) + for accturl in accts: + accturl = accturl.get('@odata.id', '') + if accturl: + rsp, status = wc.grab_json_response_with_status(accturl) + if rsp.get('UserName', None) == self.curruser: + targaccturl = accturl + break + else: + raise Exception("Unable to identify Account URL to modify on this BMC") + return targaccturl + + def config(self, nodename): + mgrs = None + self.nodename = nodename + creds = self.configmanager.get_node_attributes( + nodename, ['secret.hardwaremanagementuser', + 'secret.hardwaremanagementpassword', + 'hardwaremanagement.manager', + 'hardwaremanagement.method', + 'console.method'], + True) + cd = creds.get(nodename, {}) + defuser, defpass = self.get_firmware_default_account_info() + user, passwd, _ = self.get_node_credentials( + nodename, creds, defuser, defpass) + user = util.stringify(user) + passwd = util.stringify(passwd) + self.targuser = user + self.targpass = passwd + wc = self._get_wc() + curruserinfo = {} + authupdate = {} + wc.set_header('Content-Type', 'application/json') + if user != self.curruser: + authupdate['UserName'] = user + if passwd != self.currpass: + authupdate['Password'] = passwd + if authupdate: + targaccturl = self.target_account_url(wc) + rsp, status = wc.grab_json_response_with_status(targaccturl, authupdate, method='PATCH') + if status >= 300: + raise Exception("Failed attempting to update credentials on BMC") + self.curruser = user + self.currpass = passwd + wc.set_basic_credentials(user, passwd) + _, status = wc.grab_json_response_with_status('/redfish/v1/Managers') + tries = 10 + while tries and status >= 300: + tries -= 1 + eventlet.sleep(1.0) + _, status = wc.grab_json_response_with_status( + '/redfish/v1/Managers') + if (cd.get('hardwaremanagement.method', {}).get('value', 'ipmi') != 'redfish' + or cd.get('console.method', {}).get('value', None) == 'ipmi'): + self.enable_ipmi(wc) + if ('hardwaremanagement.manager' in cd and + cd['hardwaremanagement.manager']['value'] and + not cd['hardwaremanagement.manager']['value'].startswith( + 'fe80::')): + newip = cd['hardwaremanagement.manager']['value'] + newip = newip.split('/', 1)[0] + newipinfo = getaddrinfo(newip, 0)[0] + newip = newipinfo[-1][0] + if ':' in newip: + raise exc.NotImplementedException('IPv6 remote config TODO') + hifurls = get_host_interface_urls(wc, self.mgrinfo(wc)) + mgtnicinfo = self.mgrinfo(wc)['EthernetInterfaces']['@odata.id'] + mgtnicinfo = wc.grab_json_response(mgtnicinfo) + mgtnics = [x['@odata.id'] for x in mgtnicinfo.get('Members', [])] + actualnics = [] + for candnic in mgtnics: + if candnic in hifurls: + continue + actualnics.append(candnic) + if len(actualnics) != 1: + raise Exception("Multi-interface BMCs are not supported currently") + currnet = wc.grab_json_response(actualnics[0]) + netconfig = netutil.get_nic_config(self.configmanager, nodename, ip=newip) + newconfig = { + "Address": newip, + "SubnetMask": netutil.cidr_to_mask(netconfig['prefix']), + } + newgw = netconfig['ipv4_gateway'] + if newgw: + newconfig['Gateway'] = newgw + else: + newconfig['Gateway'] = newip # required property, set to self just to have a value + for net in currnet.get("IPv4Addresses", []): + if net["Address"] == newip and net["SubnetMask"] == newconfig['SubnetMask'] and (not newgw or newconfig['Gateway'] == newgw): + break + else: + wc.set_header('If-Match', '*') + rsp, status = wc.grab_json_response_with_status(actualnics[0], { + 'DHCPv4': {'DHCPEnabled': False}, + 'IPv4StaticAddresses': [newconfig]}, method='PATCH') + elif self.ipaddr.startswith('fe80::'): + self.configmanager.set_node_attributes( + {nodename: {'hardwaremanagement.manager': self.ipaddr}}) + else: + raise exc.TargetEndpointUnreachable( + 'hardwaremanagement.manager must be set to desired address (No IPv6 Link Local detected)') + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]] } + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) diff --git a/confluent_server/confluent/discovery/handlers/xcc.py b/confluent_server/confluent/discovery/handlers/xcc.py index 23f31fb0..ef009b6d 100644 --- a/confluent_server/confluent/discovery/handlers/xcc.py +++ b/confluent_server/confluent/discovery/handlers/xcc.py @@ -247,6 +247,10 @@ class NodeHandler(immhandler.NodeHandler): if rsp.status == 200: pwdchanged = True password = newpassword + wc.set_header('Authorization', 'Bearer ' + rspdata['access_token']) + if '_csrf_token' in wc.cookies: + wc.set_header('X-XSRF-TOKEN', wc.cookies['_csrf_token']) + wc.grab_json_response_with_status('/api/providers/logout') else: if rspdata.get('locktime', 0) > 0: raise LockedUserException( @@ -280,6 +284,7 @@ class NodeHandler(immhandler.NodeHandler): rsp.read() if rsp.status != 200: return (None, None) + wc.grab_json_response_with_status('/api/providers/logout') self._currcreds = (username, newpassword) wc.set_basic_credentials(username, newpassword) pwdchanged = True @@ -403,6 +408,34 @@ class NodeHandler(immhandler.NodeHandler): if user['users_user_name'] == '': return user['users_user_id'] + def create_tmp_account(self, wc): + rsp, status = wc.grab_json_response_with_status('/redfish/v1/AccountService/Accounts') + if status != 200: + raise Exception("Unable to list current accounts") + usednames = set([]) + tmpnam = '6pmu0ezczzcp' + tpass = base64.b64encode(os.urandom(9)).decode() + 'Iw47$' + ntpass = base64.b64encode(os.urandom(9)).decode() + 'Iw47$' + for acct in rsp.get("Members", []): + url = acct.get("@odata.id", None) + if url: + uinfo = wc.grab_json_response(url) + usednames.add(uinfo.get('UserName', None)) + if tmpnam in usednames: + raise Exception("Tmp account already exists") + rsp, status = wc.grab_json_response_with_status( + '/redfish/v1/AccountService/Accounts', + {'UserName': tmpnam, 'Password': tpass, 'RoleId': 'Administrator'}) + if status >= 300: + raise Exception("Failure creating tmp account: " + repr(rsp)) + tmpurl = rsp['@odata.id'] + wc.set_basic_credentials(tmpnam, tpass) + rsp, status = wc.grab_json_response_with_status( + tmpurl, {'Password': ntpass}, method='PATCH') + wc.set_basic_credentials(tmpnam, ntpass) + return tmpurl + + def _setup_xcc_account(self, username, passwd, wc): userinfo = wc.grab_json_response('/api/dataset/imm_users') uid = None @@ -434,18 +467,35 @@ class NodeHandler(immhandler.NodeHandler): '/api/function', {'USER_UserModify': '{0},{1},,1,4,0,0,0,0,,8,,,'.format(uid, username)}) if status == 200 and rsp.get('return', 0) == 13: + wc.grab_json_response('/api/providers/logout') wc.set_basic_credentials(self._currcreds[0], self._currcreds[1]) status = 503 + tries = 2 + tmpaccount = None while status != 200: + tries -= 1 rsp, status = wc.grab_json_response_with_status( '/redfish/v1/AccountService/Accounts/{0}'.format(uid), {'UserName': username}, method='PATCH') if status != 200: rsp = json.loads(rsp) - if rsp.get('error', {}).get('code', 'Unknown') in ('Base.1.8.GeneralError', 'Base.1.12.GeneralError'): - eventlet.sleep(10) + if rsp.get('error', {}).get('code', 'Unknown') in ('Base.1.8.GeneralError', 'Base.1.12.GeneralError', 'Base.1.14.GeneralError'): + if tries: + eventlet.sleep(4) + elif tmpaccount: + wc.grab_json_response_with_status(tmpaccount, method='DELETE') + raise Exception('Failed renaming main account') + else: + tmpaccount = self.create_tmp_account(wc) + tries = 8 else: break + if tmpaccount: + wc.set_basic_credentials(username, passwd) + wc.grab_json_response_with_status(tmpaccount, method='DELETE') + self.tmppasswd = None + self._currcreds = (username, passwd) + return self.tmppasswd = None wc.grab_json_response('/api/providers/logout') self._currcreds = (username, passwd) @@ -596,7 +646,10 @@ class NodeHandler(immhandler.NodeHandler): statargs['ENET_IPv4GatewayIPAddr'] = netconfig['ipv4_gateway'] elif not netutil.address_is_local(newip): raise exc.InvalidArgumentException('Will not remotely configure a device with no gateway') - wc.grab_json_response('/api/dataset', statargs) + netset, status = wc.grab_json_response_with_status('/api/dataset', statargs) + print(repr(netset)) + print(repr(status)) + elif self.ipaddr.startswith('fe80::'): self.configmanager.set_node_attributes( {nodename: {'hardwaremanagement.manager': self.ipaddr}}) @@ -627,8 +680,9 @@ def remote_nodecfg(nodename, cfm): ipaddr = ipaddr.split('/', 1)[0] ipaddr = getaddrinfo(ipaddr, 0)[0][-1] if not ipaddr: - raise Excecption('Cannot remote configure a system without known ' + raise Exception('Cannot remote configure a system without known ' 'address') info = {'addresses': [ipaddr]} nh = NodeHandler(info, cfm) nh.config(nodename) + diff --git a/confluent_server/confluent/discovery/handlers/xcc3.py b/confluent_server/confluent/discovery/handlers/xcc3.py new file mode 100644 index 00000000..050186e9 --- /dev/null +++ b/confluent_server/confluent/discovery/handlers/xcc3.py @@ -0,0 +1,104 @@ +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import confluent.discovery.handlers.redfishbmc as redfishbmc +import eventlet.support.greendns +import confluent.util as util + +webclient = eventlet.import_patched('pyghmi.util.webclient') + + + +getaddrinfo = eventlet.support.greendns.getaddrinfo + + +class NodeHandler(redfishbmc.NodeHandler): + devname = 'XCC' + + def get_firmware_default_account_info(self): + return ('USERID', 'PASSW0RD') + + def scan(self): + ip, port = self.get_web_port_and_ip() + c = webclient.SecureHTTPConnection(ip, port, + verifycallback=self.validate_cert) + c.set_header('Accept', 'application/json') + i = c.grab_json_response('/api/providers/logoninfo') + modelname = i.get('items', [{}])[0].get('machine_name', None) + if modelname: + self.info['modelname'] = modelname + for attrname in list(self.info.get('attributes', {})): + val = self.info['attributes'][attrname] + if '-uuid' == attrname[-5:] and len(val) == 32: + val = val.lower() + self.info['attributes'][attrname] = '-'.join([val[:8], val[8:12], val[12:16], val[16:20], val[20:]]) + attrs = self.info.get('attributes', {}) + room = attrs.get('room-id', None) + if room: + self.info['room'] = room + rack = attrs.get('rack-id', None) + if rack: + self.info['rack'] = rack + name = attrs.get('name', None) + if name: + self.info['hostname'] = name + unumber = attrs.get('lowest-u', None) + if unumber: + self.info['u'] = unumber + location = attrs.get('location', None) + if location: + self.info['location'] = location + mtm = attrs.get('enclosure-machinetype-model', None) + if mtm: + self.info['modelnumber'] = mtm.strip() + sn = attrs.get('enclosure-serial-number', None) + if sn: + self.info['serialnumber'] = sn.strip() + if attrs.get('enclosure-form-factor', None) == 'dense-computing': + encuuid = attrs.get('chassis-uuid', None) + if encuuid: + self.info['enclosure.uuid'] = fixuuid(encuuid) + slot = int(attrs.get('slot', 0)) + if slot != 0: + self.info['enclosure.bay'] = slot + + def validate_cert(self, certificate): + fprint = util.get_fingerprint(self.https_cert) + return util.cert_matches(fprint, certificate) + + +def remote_nodecfg(nodename, cfm): + cfg = cfm.get_node_attributes( + nodename, 'hardwaremanagement.manager') + ipaddr = cfg.get(nodename, {}).get('hardwaremanagement.manager', {}).get( + 'value', None) + ipaddr = ipaddr.split('/', 1)[0] + ipaddr = getaddrinfo(ipaddr, 0)[0][-1] + if not ipaddr: + raise Exception('Cannot remote configure a system without known ' + 'address') + info = {'addresses': [ipaddr]} + nh = NodeHandler(info, cfm) + nh.config(nodename) + + +if __name__ == '__main__': + import confluent.config.configmanager as cfm + c = cfm.ConfigManager(None) + import sys + info = {'addresses': [[sys.argv[1]]]} + print(repr(info)) + testr = NodeHandler(info, c) + testr.config(sys.argv[2]) + diff --git a/confluent_server/confluent/discovery/protocols/pxe.py b/confluent_server/confluent/discovery/protocols/pxe.py index a9a07963..64e64c79 100644 --- a/confluent_server/confluent/discovery/protocols/pxe.py +++ b/confluent_server/confluent/discovery/protocols/pxe.py @@ -315,9 +315,9 @@ def proxydhcp(handler, nodeguess): optidx = rqv.tobytes().index(b'\x63\x82\x53\x63') + 4 except ValueError: continue - hwlen = rq[2] - opts, disco = opts_to_dict(rq, optidx, 3) - disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rq[28:28+hwlen]]) + hwlen = rqv[2] + opts, disco = opts_to_dict(rqv, optidx, 3) + disco['hwaddr'] = ':'.join(['{0:02x}'.format(x) for x in rqv[28:28+hwlen]]) node = None if disco.get('hwaddr', None) in macmap: node = macmap[disco['hwaddr']] @@ -346,7 +346,7 @@ def proxydhcp(handler, nodeguess): profile = None if not myipn: myipn = socket.inet_aton(recv) - profile = get_deployment_profile(node, cfg) + profile, stgprofile = get_deployment_profile(node, cfg) if profile: log.log({ 'info': 'Offering proxyDHCP boot from {0} to {1} ({2})'.format(recv, node, client[0])}) @@ -356,7 +356,7 @@ def proxydhcp(handler, nodeguess): continue if opts.get(77, None) == b'iPXE': if not profile: - profile = get_deployment_profile(node, cfg) + profile, stgprofile = get_deployment_profile(node, cfg) if not profile: log.log({'info': 'No pending profile for {0}, skipping proxyDHCP reply'.format(node)}) continue @@ -385,8 +385,9 @@ def proxydhcp(handler, nodeguess): rpv[268:280] = b'\x3c\x09PXEClient\xff' net4011.sendto(rpv[:281], client) except Exception as e: - tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, - event=log.Events.stacktrace) + log.logtrace() + # tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, + # event=log.Events.stacktrace) def start_proxydhcp(handler, nodeguess=None): @@ -453,13 +454,14 @@ def snoop(handler, protocol=None, nodeguess=None): # with try/except if i < 64: continue - _, level, typ = struct.unpack('QII', cmsgarr[:16]) - if level == socket.IPPROTO_IP and typ == IP_PKTINFO: - idx, recv = struct.unpack('II', cmsgarr[16:24]) - recv = ipfromint(recv) - rqv = memoryview(rawbuffer)[:i] if rawbuffer[0] == 1: # Boot request - process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv) + _, level, typ = struct.unpack('QII', cmsgarr[:16]) + if level == socket.IPPROTO_IP and typ == IP_PKTINFO: + idx, recv = struct.unpack('II', cmsgarr[16:24]) + recv = ipfromint(recv) + rqv = memoryview(rawbuffer)[:i] + client = (ipfromint(clientaddr.sin_addr.s_addr), socket.htons(clientaddr.sin_port)) + process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv, client) elif netc == net6: recv = 'ff02::1:2' pkt, addr = netc.recvfrom(2048) @@ -476,6 +478,10 @@ def snoop(handler, protocol=None, nodeguess=None): tracelog.log(traceback.format_exc(), ltype=log.DataTypes.event, event=log.Events.stacktrace) + +_mac_to_uuidmap = {} + + def process_dhcp6req(handler, rqv, addr, net, cfg, nodeguess): ip = addr[0] req, disco = v6opts_to_dict(bytearray(rqv[4:])) @@ -501,7 +507,7 @@ def process_dhcp6req(handler, rqv, addr, net, cfg, nodeguess): handler(info) consider_discover(info, req, net, cfg, None, nodeguess, addr) -def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv): +def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv, client): rq = bytearray(rqv) addrlen = rq[2] if addrlen > 16 or addrlen == 0: @@ -531,7 +537,12 @@ def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv): # We will fill out service to have something to byte into, # but the nature of the beast is that we do not have peers, # so that will not be present for a pxe snoop - info = {'hwaddr': netaddr, 'uuid': disco['uuid'], + theuuid = disco['uuid'] + if theuuid: + _mac_to_uuidmap[netaddr] = theuuid + elif netaddr in _mac_to_uuidmap: + theuuid = _mac_to_uuidmap[netaddr] + info = {'hwaddr': netaddr, 'uuid': theuuid, 'architecture': disco['arch'], 'netinfo': {'ifidx': idx, 'recvip': recv, 'txid': txid}, 'services': ('pxe-client',)} @@ -539,7 +550,7 @@ def process_dhcp4req(handler, nodeguess, cfg, net4, idx, recv, rqv): and time.time() > ignoredisco.get(netaddr, 0) + 90): ignoredisco[netaddr] = time.time() handler(info) - consider_discover(info, rqinfo, net4, cfg, rqv, nodeguess) + consider_discover(info, rqinfo, net4, cfg, rqv, nodeguess, requestor=client) @@ -583,26 +594,34 @@ def get_deployment_profile(node, cfg, cfd=None): if not cfd: cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates')) profile = cfd.get(node, {}).get('deployment.pendingprofile', {}).get('value', None) - if not profile: - return None - candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) - if candmgrs: - candmgrs = noderange.NodeRange(candmgrs, cfg).nodes - if collective.get_myname() not in candmgrs: - return None - return profile + stgprofile = cfd.get(node, {}).get('deployment.stagedprofile', {}).get('value', None) + if profile or stgprofile: + candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) + if candmgrs: + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: # fallback to unverified noderange + candmgrs = noderange.NodeRange(candmgrs).nodes + if collective.get_myname() not in candmgrs: + return None, None + return profile, stgprofile staticassigns = {} myipbypeer = {} -def check_reply(node, info, packet, sock, cfg, reqview, addr): - httpboot = info['architecture'] == 'uefi-httpboot' +def check_reply(node, info, packet, sock, cfg, reqview, addr, requestor): + if not requestor: + requestor = ('0.0.0.0', None) + if requestor[0] == '0.0.0.0' and not info.get('uuid', None): + return # ignore DHCP from local non-PXE segment + httpboot = info.get('architecture', None) == 'uefi-httpboot' cfd = cfg.get_node_attributes(node, ('deployment.*', 'collective.managercandidates')) - profile = get_deployment_profile(node, cfg, cfd) - if not profile: + profile, stgprofile = get_deployment_profile(node, cfg, cfd) + if ((not profile) + and (requestor[0] == '0.0.0.0' or not stgprofile)): if time.time() > ignoremacs.get(info['hwaddr'], 0) + 90: ignoremacs[info['hwaddr']] = time.time() log.log({'info': 'Ignoring boot attempt by {0} no deployment profile specified (uuid {1}, hwaddr {2})'.format( - node, info['uuid'], info['hwaddr'] + node, info.get('uuid', 'NA'), info['hwaddr'] )}) return if addr: @@ -611,7 +630,7 @@ def check_reply(node, info, packet, sock, cfg, reqview, addr): return return reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock) else: - return reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile) + return reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile, sock, requestor) def reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock): myaddrs = netutil.get_my_addresses(addr[-1], socket.AF_INET6) @@ -648,14 +667,16 @@ def reply_dhcp6(node, addr, cfg, packet, cfd, profile, sock): ipass[4:16] = b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x18' ipass[16:32] = socket.inet_pton(socket.AF_INET6, ipv6addr) ipass[32:40] = b'\x00\x00\x00\x78\x00\x00\x01\x2c' - elif (not packet['vci']) or not packet['vci'].startswith('HTTPClient:Arch:'): - return # do not send ip-less replies to anything but HTTPClient specifically - #1 msgtype - #3 txid - #22 - server ident - #len(packet[1]) + 4 - client ident - #len(ipass) + 4 or 0 - #len(url) + 4 + elif (not packet['vci']) or not packet['vci'].startswith( + 'HTTPClient:Arch:'): + # do not send ip-less replies to anything but HTTPClient specifically + return + # 1 msgtype + # 3 txid + # 22 - server ident + # len(packet[1]) + 4 - client ident + # len(ipass) + 4 or 0 + # len(url) + 4 replylen = 50 + len(bootfile) + len(packet[1]) + 4 if len(ipass): replylen += len(ipass) @@ -695,26 +716,31 @@ def get_my_duid(): return _myuuid -def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): +def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile, sock=None, requestor=None): replen = 275 # default is going to be 286 # while myipn is describing presumed destination, it's really # vague in the face of aliases, need to convert to ifidx and evaluate # aliases for best match to guess - + isboot = True + if requestor is None: + requestor = ('0.0.0.0', None) + if info.get('architecture', None) is None: + isboot = False rqtype = packet[53][0] - insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols', - {}).get('value', 'never') - if not insecuremode: - insecuremode = 'never' - if insecuremode == 'never' and not httpboot: - if rqtype == 1 and info['architecture']: - log.log( - {'info': 'Boot attempt by {0} detected in insecure mode, but ' - 'insecure mode is disabled. Set the attribute ' - '`deployment.useinsecureprotocols` to `firmware` or ' - '`always` to enable support, or use UEFI HTTP boot ' - 'with HTTPS.'.format(node)}) - return + if isboot: + insecuremode = cfd.get(node, {}).get('deployment.useinsecureprotocols', + {}).get('value', 'never') + if not insecuremode: + insecuremode = 'never' + if insecuremode == 'never' and not httpboot: + if rqtype == 1 and info.get('architecture', None): + log.log( + {'info': 'Boot attempt by {0} detected in insecure mode, but ' + 'insecure mode is disabled. Set the attribute ' + '`deployment.useinsecureprotocols` to `firmware` or ' + '`always` to enable support, or use UEFI HTTP boot ' + 'with HTTPS.'.format(node)}) + return reply = bytearray(512) repview = memoryview(reply) repview[:20] = iphdr @@ -725,9 +751,16 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): repview[1:10] = reqview[1:10] # duplicate txid, hwlen, and others repview[10:11] = b'\x80' # always set broadcast repview[28:44] = reqview[28:44] # copy chaddr field + relayip = reqview[24:28].tobytes() + if (not isboot) and relayip == b'\x00\x00\x00\x00': + # Ignore local DHCP packets if it isn't a firmware request + return + relayipa = None + if relayip != b'\x00\x00\x00\x00': + relayipa = socket.inet_ntoa(relayip) gateway = None netmask = None - niccfg = netutil.get_nic_config(cfg, node, ifidx=info['netinfo']['ifidx']) + niccfg = netutil.get_nic_config(cfg, node, ifidx=info['netinfo']['ifidx'], relayipn=relayip) nicerr = niccfg.get('error_msg', False) if nicerr: log.log({'error': nicerr}) @@ -751,7 +784,7 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): gateway = None netmask = (2**32 - 1) ^ (2**(32 - netmask) - 1) netmask = struct.pack('!I', netmask) - elif (not packet['vci']) or not (packet['vci'].startswith('HTTPClient:Arch:') or packet['vci'].startswith('PXEClient')): + elif (not packet.get('vci', None)) or not (packet['vci'].startswith('HTTPClient:Arch:') or packet['vci'].startswith('PXEClient')): return # do not send ip-less replies to anything but netboot specifically myipn = niccfg['deploy_server'] if not myipn: @@ -771,10 +804,19 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): node, profile, len(bootfile) - 127)}) return repview[108:108 + len(bootfile)] = bootfile + elif info.get('architecture', None) == 'uefi-aarch64' and packet.get(77, None) == b'iPXE': + if not profile: + profile, stgprofile = get_deployment_profile(node, cfg) + if not profile: + log.log({'info': 'No pending profile for {0}, skipping proxyDHCP eply'.format(node)}) + return + bootfile = 'http://{0}/confluent-public/os/{1}/boot.ipxe'.format(myipn, profile).encode('utf8') + repview[108:108 + len(bootfile)] = bootfile myip = myipn myipn = socket.inet_aton(myipn) orepview[12:16] = myipn repview[20:24] = myipn + repview[24:28] = relayip repview[236:240] = b'\x63\x82\x53\x63' repview[240:242] = b'\x35\x01' if rqtype == 1: # if discover, then offer @@ -785,17 +827,19 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): repview[245:249] = myipn repview[249:255] = b'\x33\x04\x00\x00\x00\xf0' # fixed short lease time repview[255:257] = b'\x61\x11' - repview[257:274] = packet[97] + if packet.get(97, None) is not None: + repview[257:274] = packet[97] # Note that sending PXEClient kicks off the proxyDHCP procedure, ignoring # boot filename and such in the DHCP packet # we will simply always do it to provide the boot payload in a consistent # matter to both dhcp-elsewhere and fixed ip clients - if info['architecture'] == 'uefi-httpboot': - repview[replen - 1:replen + 11] = b'\x3c\x0aHTTPClient' - replen += 12 - else: - repview[replen - 1:replen + 10] = b'\x3c\x09PXEClient' - replen += 11 + if isboot: + if info.get('architecture', None) == 'uefi-httpboot': + repview[replen - 1:replen + 11] = b'\x3c\x0aHTTPClient' + replen += 12 + else: + repview[replen - 1:replen + 10] = b'\x3c\x09PXEClient' + replen += 11 hwlen = bytearray(reqview[2:3].tobytes())[0] fulladdr = repview[28:28+hwlen].tobytes() myipbypeer[fulladdr] = myipn @@ -812,6 +856,14 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): repview[replen - 1:replen + 1] = b'\x03\x04' repview[replen + 1:replen + 5] = gateway replen += 6 + elif relayip != b'\x00\x00\x00\x00' and clipn: + log.log({'error': 'Relay DHCP offer to {} will fail due to missing gateway information'.format(node)}) + if 82 in packet: + reloptionslen = len(packet[82]) + reloptionshdr = struct.pack('BB', 82, reloptionslen) + repview[replen - 1:replen + 1] = reloptionshdr + repview[replen + 1:replen + reloptionslen + 1] = packet[82] + replen += 2 + reloptionslen repview[replen - 1:replen] = b'\xff' # end of options, should always be last byte repview = memoryview(reply) pktlen = struct.pack('!H', replen + 28) # ip+udp = 28 @@ -835,9 +887,19 @@ def reply_dhcp4(node, info, packet, cfg, reqview, httpboot, cfd, profile): ipinfo = 'with static address {0}'.format(niccfg['ipv4_address']) else: ipinfo = 'without address, served from {0}'.format(myip) - log.log({ - 'info': 'Offering {0} boot {1} to {2}'.format(boottype, ipinfo, node)}) - send_raw_packet(repview, replen + 28, reqview, info) + if relayipa: + ipinfo += ' (relayed to {} via {})'.format(relayipa, requestor[0]) + if isboot: + log.log({ + 'info': 'Offering {0} boot {1} to {2}'.format(boottype, ipinfo, node)}) + else: + log.log({ + 'info': 'Offering DHCP {} to {}'.format(ipinfo, node)}) + if relayip != b'\x00\x00\x00\x00': + sock.sendto(repview[28:28 + replen], requestor) + else: + send_raw_packet(repview, replen + 28, reqview, info) + def send_raw_packet(repview, replen, reqview, info): ifidx = info['netinfo']['ifidx'] @@ -862,9 +924,10 @@ def send_raw_packet(repview, replen, reqview, info): sendto(tsock.fileno(), pkt, replen, 0, ctypes.byref(targ), ctypes.sizeof(targ)) -def ack_request(pkt, rq, info): +def ack_request(pkt, rq, info, sock=None, requestor=None): hwlen = bytearray(rq[2:3].tobytes())[0] hwaddr = rq[28:28+hwlen].tobytes() + relayip = rq[24:28].tobytes() myipn = myipbypeer.get(hwaddr, None) if not myipn or pkt.get(54, None) != myipn: return @@ -883,15 +946,20 @@ def ack_request(pkt, rq, info): repview[12:len(rply)].tobytes()) datasum = ~datasum & 0xffff repview[26:28] = struct.pack('!H', datasum) - send_raw_packet(repview, len(rply), rq, info) + if relayip != b'\x00\x00\x00\x00': + sock.sendto(repview[28:], requestor) + else: + send_raw_packet(repview, len(rply), rq, info) -def consider_discover(info, packet, sock, cfg, reqview, nodeguess, addr=None): - if info.get('hwaddr', None) in macmap and info.get('uuid', None): - check_reply(macmap[info['hwaddr']], info, packet, sock, cfg, reqview, addr) +def consider_discover(info, packet, sock, cfg, reqview, nodeguess, addr=None, requestor=None): + if packet.get(53, None) == b'\x03': + ack_request(packet, reqview, info, sock, requestor) + elif info.get('hwaddr', None) in macmap: # and info.get('uuid', None): + check_reply(macmap[info['hwaddr']], info, packet, sock, cfg, reqview, addr, requestor) elif info.get('uuid', None) in uuidmap: - check_reply(uuidmap[info['uuid']], info, packet, sock, cfg, reqview, addr) + check_reply(uuidmap[info['uuid']], info, packet, sock, cfg, reqview, addr, requestor) elif packet.get(53, None) == b'\x03': - ack_request(packet, reqview, info) + ack_request(packet, reqview, info, sock, requestor) elif info.get('uuid', None) and info.get('hwaddr', None): if time.time() > ignoremacs.get(info['hwaddr'], 0) + 90: ignoremacs[info['hwaddr']] = time.time() diff --git a/confluent_server/confluent/discovery/protocols/slp.py b/confluent_server/confluent/discovery/protocols/slp.py index e42c1577..3ca7cd01 100644 --- a/confluent_server/confluent/discovery/protocols/slp.py +++ b/confluent_server/confluent/discovery/protocols/slp.py @@ -246,11 +246,11 @@ def _find_srvtype(net, net4, srvtype, addresses, xid): try: net4.sendto(data, ('239.255.255.253', 427)) except socket.error as se: - # On occasion, multicasting may be disabled - # tolerate this scenario and move on - if se.errno != 101: - raise - net4.sendto(data, (bcast, 427)) + pass + try: + net4.sendto(data, (bcast, 427)) + except socket.error as se: + pass def _grab_rsps(socks, rsps, interval, xidmap, deferrals): @@ -411,7 +411,7 @@ def query_srvtypes(target): parsed = _parse_slp_header(rs) if parsed: payload = parsed['payload'] - if payload[:2] != '\x00\x00': + if payload[:2] != b'\x00\x00': return stypelen = struct.unpack('!H', bytes(payload[2:4]))[0] stypes = payload[4:4+stypelen].decode('utf-8') @@ -471,10 +471,13 @@ def snoop(handler, protocol=None): # socket in use can occur when aliased ipv4 are encountered net.bind(('', 427)) net4.bind(('', 427)) - + newmacs = set([]) + known_peers = set([]) + peerbymacaddress = {} + deferpeers = [] while True: try: - newmacs = set([]) + newmacs.clear() r, _, _ = select.select((net, net4), (), (), 60) # clear known_peers and peerbymacaddress # to avoid stale info getting in... @@ -482,14 +485,16 @@ def snoop(handler, protocol=None): # addresses that come close together # calling code needs to understand deeper context, as snoop # will now yield dupe info over time - known_peers = set([]) - peerbymacaddress = {} - deferpeers = [] + known_peers.clear() + peerbymacaddress.clear() + deferpeers.clear() while r and len(deferpeers) < 256: for s in r: (rsp, peer) = s.recvfrom(9000) if peer in known_peers: continue + if peer in deferpeers: + continue mac = neighutil.get_hwaddr(peer[0]) if not mac: probepeer = (peer[0], struct.unpack('H', os.urandom(2))[0] | 1025) + peer[2:] diff --git a/confluent_server/confluent/discovery/protocols/ssdp.py b/confluent_server/confluent/discovery/protocols/ssdp.py index 3c1edc74..5c27473b 100644 --- a/confluent_server/confluent/discovery/protocols/ssdp.py +++ b/confluent_server/confluent/discovery/protocols/ssdp.py @@ -60,6 +60,7 @@ def active_scan(handler, protocol=None): known_peers = set([]) for scanned in scan(['urn:dmtf-org:service:redfish-rest:1', 'urn::service:affluent']): for addr in scanned['addresses']: + addr = addr[0:1] + addr[2:] if addr in known_peers: break hwaddr = neighutil.get_hwaddr(addr[0]) @@ -79,13 +80,20 @@ def scan(services, target=None): def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byehandler, machandlers, handler): - if mac in peerbymacaddress and peer not in peerbymacaddress[mac]['addresses']: - peerbymacaddress[mac]['addresses'].append(peer) + if mac in peerbymacaddress: + normpeer = peer[0:1] + peer[2:] + for currpeer in peerbymacaddress[mac]['addresses']: + currnormpeer = currpeer[0:1] + peer[2:] + if currnormpeer == normpeer: + break + else: + peerbymacaddress[mac]['addresses'].append(peer) else: peerdata = { 'hwaddr': mac, 'addresses': [peer], } + targurl = None for headline in rsp[1:]: if not headline: continue @@ -105,13 +113,21 @@ def _process_snoop(peer, rsp, mac, known_peers, newmacs, peerbymacaddress, byeha if not value.endswith('/redfish/v1/'): return elif header == 'LOCATION': - if not value.endswith('/DeviceDescription.json'): + if '/eth' in value and value.endswith('.xml'): + targurl = '/redfish/v1/' + targtype = 'megarac-bmc' + continue # MegaRAC redfish + elif value.endswith('/DeviceDescription.json'): + targurl = '/DeviceDescription.json' + targtype = 'lenovo-xcc' + continue + else: return - if handler: - eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer) + if handler and targurl: + eventlet.spawn_n(check_fish_handler, handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype) -def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer): - retdata = check_fish(('/DeviceDescription.json', peerdata)) +def check_fish_handler(handler, peerdata, known_peers, newmacs, peerbymacaddress, machandlers, mac, peer, targurl, targtype): + retdata = check_fish((targurl, peerdata, targtype)) if retdata: known_peers.add(peer) newmacs.add(mac) @@ -164,11 +180,14 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): net4.bind(('', 1900)) net6.bind(('', 1900)) peerbymacaddress = {} + newmacs = set([]) + deferrednotifies = [] + machandlers = {} while True: try: - newmacs = set([]) - deferrednotifies = [] - machandlers = {} + newmacs.clear() + deferrednotifies.clear() + machandlers.clear() r = select.select((net4, net6), (), (), 60) if r: r = r[0] @@ -251,7 +270,10 @@ def snoop(handler, byehandler=None, protocol=None, uuidlookup=None): break candmgrs = cfd.get(node, {}).get('collective.managercandidates', {}).get('value', None) if candmgrs: - candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + try: + candmgrs = noderange.NodeRange(candmgrs, cfg).nodes + except Exception: + candmgrs = noderange.NodeRange(candmgrs).nodes if collective.get_myname() not in candmgrs: break currtime = time.time() @@ -322,7 +344,7 @@ def _find_service(service, target): host = '[{0}]'.format(host) msg = smsg.format(host, service) if not isinstance(msg, bytes): - msg = msg.encode('utf8') + msg = msg.encode('utf8') net6.sendto(msg, addr[4]) else: net4.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) @@ -410,7 +432,11 @@ def _find_service(service, target): if '/redfish/v1/' not in peerdata[nid].get('urls', ()) and '/redfish/v1' not in peerdata[nid].get('urls', ()): continue if '/DeviceDescription.json' in peerdata[nid]['urls']: - pooltargs.append(('/DeviceDescription.json', peerdata[nid])) + pooltargs.append(('/DeviceDescription.json', peerdata[nid], 'lenovo-xcc')) + else: + for targurl in peerdata[nid]['urls']: + if '/eth' in targurl and targurl.endswith('.xml'): + pooltargs.append(('/redfish/v1/', peerdata[nid], 'megarac-bmc')) # For now, don't interrogate generic redfish bmcs # This is due to a need to deduplicate from some supported SLP # targets (IMM, TSM, others) @@ -425,21 +451,32 @@ def _find_service(service, target): def check_fish(urldata, port=443, verifycallback=None): if not verifycallback: verifycallback = lambda x: True - url, data = urldata + try: + url, data, targtype = urldata + except ValueError: + url, data = urldata + targtype = 'service:redfish-bmc' try: wc = webclient.SecureHTTPConnection(_get_svrip(data), port, verifycallback=verifycallback, timeout=1.5) - peerinfo = wc.grab_json_response(url) + peerinfo = wc.grab_json_response(url, headers={'Accept': 'application/json'}) except socket.error: return None if url == '/DeviceDescription.json': + if not peerinfo: + return None try: peerinfo = peerinfo[0] + except KeyError: + peerinfo['xcc-variant'] = '3' + except IndexError: + return None + try: myuuid = peerinfo['node-uuid'].lower() if '-' not in myuuid: myuuid = '-'.join([myuuid[:8], myuuid[8:12], myuuid[12:16], myuuid[16:20], myuuid[20:]]) data['uuid'] = myuuid data['attributes'] = peerinfo - data['services'] = ['lenovo-xcc'] + data['services'] = ['lenovo-xcc'] if 'xcc-variant' not in peerinfo else ['lenovo-xcc' + peerinfo['xcc-variant']] return data except (IndexError, KeyError): return None @@ -447,7 +484,7 @@ def check_fish(urldata, port=443, verifycallback=None): peerinfo = wc.grab_json_response('/redfish/v1/') if url == '/redfish/v1/': if 'UUID' in peerinfo: - data['services'] = ['service:redfish-bmc'] + data['services'] = [targtype] data['uuid'] = peerinfo['UUID'].lower() return data return None @@ -466,7 +503,12 @@ def _parse_ssdp(peer, rsp, peerdata): if code == b'200': if nid in peerdata: peerdatum = peerdata[nid] - if peer not in peerdatum['addresses']: + normpeer = peer[0:1] + peer[2:] + for currpeer in peerdatum['addresses']: + currnormpeer = currpeer[0:1] + peer[2:] + if currnormpeer == normpeer: + break + else: peerdatum['addresses'].append(peer) else: peerdatum = { @@ -501,5 +543,7 @@ def _parse_ssdp(peer, rsp, peerdata): if __name__ == '__main__': def printit(rsp): - print(repr(rsp)) + pass # print(repr(rsp)) active_scan(printit) + + diff --git a/confluent_server/confluent/firmwaremanager.py b/confluent_server/confluent/firmwaremanager.py index a7713943..eb5d4c86 100644 --- a/confluent_server/confluent/firmwaremanager.py +++ b/confluent_server/confluent/firmwaremanager.py @@ -53,7 +53,7 @@ def execupdate(handler, filename, updateobj, type, owner, node, datfile): return if type == 'ffdc' and os.path.isdir(filename): filename += '/' + node - if 'type' == 'ffdc': + if type == 'ffdc': errstr = False if os.path.exists(filename): errstr = '{0} already exists on {1}, cannot overwrite'.format( diff --git a/confluent_server/confluent/httpapi.py b/confluent_server/confluent/httpapi.py index baf40606..f5073d34 100644 --- a/confluent_server/confluent/httpapi.py +++ b/confluent_server/confluent/httpapi.py @@ -72,6 +72,20 @@ opmap = { } +def get_user_for_session(sessionid, sessiontok): + if not isinstance(sessionid, str): + sessionid = sessionid.decode() + if not isinstance(sessiontok, str): + sessiontok = sessiontok.decode() + if not sessiontok or not sessionid: + raise Exception("invalid session id or token") + if sessiontok != httpsessions.get(sessionid, {}).get('csrftoken', None): + raise Exception("Invalid csrf token for session") + user = httpsessions[sessionid]['name'] + if not isinstance(user, str): + user = user.decode() + return user + def group_creation_resources(): yield confluent.messages.Attributes( kv={'name': None}, desc="Name of the group").html() + '
' @@ -175,6 +189,8 @@ def _get_query_dict(env, reqbody, reqtype): qstring = None if qstring: for qpair in qstring.split('&'): + if '=' not in qpair: + continue qkey, qvalue = qpair.split('=') qdict[qkey] = qvalue if reqbody is not None: @@ -618,7 +634,6 @@ def resourcehandler(env, start_response): yield '500 - ' + str(e) return - def resourcehandler_backend(env, start_response): """Function to handle new wsgi requests """ @@ -669,7 +684,11 @@ def resourcehandler_backend(env, start_response): if 'CONTENT_LENGTH' in env and int(env['CONTENT_LENGTH']) > 0: reqbody = env['wsgi.input'].read(int(env['CONTENT_LENGTH'])) reqtype = env['CONTENT_TYPE'] - operation = opmap[env['REQUEST_METHOD']] + operation = opmap.get(env['REQUEST_METHOD'], None) + if not operation: + start_response('400 Bad Method', headers) + yield '' + return querydict = _get_query_dict(env, reqbody, reqtype) if operation != 'retrieve' and 'restexplorerop' in querydict: operation = querydict['restexplorerop'] @@ -728,7 +747,13 @@ def resourcehandler_backend(env, start_response): elif (env['PATH_INFO'].endswith('/forward/web') and env['PATH_INFO'].startswith('/nodes/')): prefix, _, _ = env['PATH_INFO'].partition('/forward/web') - _, _, nodename = prefix.rpartition('/') + #_, _, nodename = prefix.rpartition('/') + default = False + if 'default' in env['PATH_INFO']: + default = True + _,_,nodename,_ = prefix.split('/') + else: + _, _, nodename = prefix.rpartition('/') hm = cfgmgr.get_node_attributes(nodename, 'hardwaremanagement.manager') targip = hm.get(nodename, {}).get( 'hardwaremanagement.manager', {}).get('value', None) @@ -737,6 +762,29 @@ def resourcehandler_backend(env, start_response): yield 'No hardwaremanagement.manager defined for node' return targip = targip.split('/', 1)[0] + if default: + try: + ip_info = socket.getaddrinfo(targip, 0, 0, socket.SOCK_STREAM) + except socket.gaierror: + start_response('404 Not Found', headers) + yield 'hardwaremanagement.manager definition could not be resolved' + return + # this is just to future proof just in case the indexes of the address family change in future + for i in range(len(ip_info)): + if ip_info[i][0] == socket.AF_INET: + url = 'https://{0}/'.format(ip_info[i][-1][0]) + start_response('302', [('Location', url)]) + yield 'Our princess is in another castle!' + return + elif ip_info[i][0] == socket.AF_INET6: + url = 'https://[{0}]/'.format(ip_info[i][-1][0]) + if url.startswith('https://[fe80'): + start_response('405 Method Not Allowed', headers) + yield 'link local ipv6 address cannot be used in browser' + return + start_response('302', [('Location', url)]) + yield 'Our princess is in another castle!' + return funport = forwarder.get_port(targip, env['HTTP_X_FORWARDED_FOR'], authorized['sessionid']) host = env['HTTP_X_FORWARDED_HOST'] @@ -1123,7 +1171,7 @@ def serve(bind_host, bind_port): pass # we gave it our best shot there try: eventlet.wsgi.server(sock, resourcehandler, log=False, log_output=False, - debug=False, socket_timeout=60) + debug=False, socket_timeout=60, keepalive=False) except TypeError: # Older eventlet in place, skip arguments it does not understand eventlet.wsgi.server(sock, resourcehandler, log=False, debug=False) diff --git a/confluent_server/confluent/main.py b/confluent_server/confluent/main.py index b49d8f56..b0e3508a 100644 --- a/confluent_server/confluent/main.py +++ b/confluent_server/confluent/main.py @@ -220,16 +220,20 @@ def setlimits(): def assure_ownership(path): try: if os.getuid() != os.stat(path).st_uid: - sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) + if os.getuid() == 0: + sys.stderr.write('Attempting to run as root, when non-root usage is detected\n') + else: + sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) sys.exit(1) except OSError as e: if e.errno == 13: - sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) + if os.getuid() == 0: + sys.stderr.write('Attempting to run as root, when non-root usage is detected\n') + else: + sys.stderr.write('{} is not owned by confluent user, change ownership\n'.format(path)) sys.exit(1) def sanity_check(): - if os.getuid() == 0: - return True assure_ownership('/etc/confluent') assure_ownership('/etc/confluent/cfg') for filename in glob.glob('/etc/confluent/cfg/*'): diff --git a/confluent_server/confluent/messages.py b/confluent_server/confluent/messages.py index 99e757d3..70b2ca21 100644 --- a/confluent_server/confluent/messages.py +++ b/confluent_server/confluent/messages.py @@ -262,10 +262,10 @@ class Generic(ConfluentMessage): def json(self): return json.dumps(self.data) - + def raw(self): return self.data - + def html(self): return json.dumps(self.data) @@ -344,10 +344,10 @@ class ConfluentResourceCount(ConfluentMessage): self.myargs = [count] self.desc = 'Resource Count' self.kvpairs = {'count': count} - + def strip_node(self, node): pass - + class CreatedResource(ConfluentMessage): notnode = True readonly = True @@ -569,6 +569,8 @@ def get_input_message(path, operation, inputdata, nodes=None, multinode=False, return InputLicense(path, nodes, inputdata, configmanager) elif path == ['deployment', 'ident_image']: return InputIdentImage(path, nodes, inputdata) + elif path == ['console', 'ikvm']: + return InputIkvmParams(path, nodes, inputdata) elif inputdata: raise exc.InvalidArgumentException( 'No known input handler for request') @@ -948,6 +950,9 @@ class InputIdentImage(ConfluentInputMessage): keyname = 'ident_image' valid_values = ['create'] +class InputIkvmParams(ConfluentInputMessage): + keyname = 'method' + valid_values = ['unix', 'wss'] class InputIdentifyMessage(ConfluentInputMessage): valid_values = set([ diff --git a/confluent_server/confluent/mountmanager.py b/confluent_server/confluent/mountmanager.py new file mode 100644 index 00000000..36f654d2 --- /dev/null +++ b/confluent_server/confluent/mountmanager.py @@ -0,0 +1,79 @@ + +import eventlet +import confluent.messages as msg +import confluent.exceptions as exc +import struct +import eventlet.green.socket as socket +import eventlet.green.subprocess as subprocess +import os +mountsbyuser = {} +_browserfsd = None + +def assure_browserfs(): + global _browserfsd + if _browserfsd is None: + os.makedirs('/var/run/confluent/browserfs/mount', exist_ok=True) + _browserfsd = subprocess.Popen( + ['/opt/confluent/bin/browserfs', + '-c', '/var/run/confluent/browserfs/control', + '-s', '127.0.0.1:4006', + # browserfs supports unix domain websocket, however apache reverse proxy is dicey that way in some versions + '-w', '/var/run/confluent/browserfs/mount']) + while not os.path.exists('/var/run/confluent/browserfs/control'): + eventlet.sleep(0.5) + + +def handle_request(configmanager, inputdata, pathcomponents, operation): + curruser = configmanager.current_user + if len(pathcomponents) == 0: + mounts = mountsbyuser.get(curruser, []) + if operation == 'retrieve': + for mount in mounts: + yield msg.ChildCollection(mount['index']) + elif operation == 'create': + if 'name' not in inputdata: + raise exc.InvalidArgumentException('Required parameter "name" is missing') + usedidx = set([]) + for mount in mounts: + usedidx.add(mount['index']) + curridx = 1 + while curridx in usedidx: + curridx += 1 + currmount = requestmount(curruser, inputdata['name']) + currmount['index'] = curridx + if curruser not in mountsbyuser: + mountsbyuser[curruser] = [] + mountsbyuser[curruser].append(currmount) + yield msg.KeyValueData({ + 'path': currmount['path'], + 'fullpath': '/var/run/confluent/browserfs/mount/{}'.format(currmount['path']), + 'authtoken': currmount['authtoken'] + }) + +def requestmount(subdir, filename): + assure_browserfs() + a = socket.socket(socket.AF_UNIX) + a.connect('/var/run/confluent/browserfs/control') + subname = subdir.encode() + a.send(struct.pack('!II', 1, len(subname))) + a.send(subname) + fname = filename.encode() + a.send(struct.pack('!I', len(fname))) + a.send(fname) + rsp = a.recv(4) + retcode = struct.unpack('!I', rsp)[0] + if retcode != 0: + raise Exception("Bad return code") + rsp = a.recv(4) + nlen = struct.unpack('!I', rsp)[0] + idstr = a.recv(nlen).decode('utf8') + rsp = a.recv(4) + nlen = struct.unpack('!I', rsp)[0] + authtok = a.recv(nlen).decode('utf8') + thismount = { + 'id': idstr, + 'path': '{}/{}/{}'.format(idstr, subdir, filename), + 'authtoken': authtok + } + return thismount + diff --git a/confluent_server/confluent/netutil.py b/confluent_server/confluent/netutil.py index 37e8d198..e5384f5d 100644 --- a/confluent_server/confluent/netutil.py +++ b/confluent_server/confluent/netutil.py @@ -25,6 +25,9 @@ import eventlet.support.greendns import os getaddrinfo = eventlet.support.greendns.getaddrinfo +eventlet.support.greendns.resolver.clear() +eventlet.support.greendns.resolver._resolver.lifetime = 1 + def msg_align(len): return (len + 3) & ~3 @@ -190,6 +193,9 @@ class NetManager(object): iname = attribs.get('interface_names', None) if iname: myattribs['interface_names'] = iname + vlanid = attribs.get('vlan_id', None) + if vlanid: + myattribs['vlan_id'] = vlanid teammod = attribs.get('team_mode', None) if teammod: myattribs['team_mode'] = teammod @@ -317,7 +323,7 @@ def get_full_net_config(configmanager, node, serverip=None): if val is None: continue if attrib.startswith('net.'): - attrib = attrib.replace('net.', '').rsplit('.', 1) + attrib = attrib.replace('net.', '', 1).rsplit('.', 1) if len(attrib) == 1: iface = None attrib = attrib[0] @@ -333,11 +339,13 @@ def get_full_net_config(configmanager, node, serverip=None): myaddrs = get_addresses_by_serverip(serverip) nm = NetManager(myaddrs, node, configmanager) defaultnic = {} + ppool = eventlet.greenpool.GreenPool(64) if None in attribs: - nm.process_attribs(None, attribs[None]) + ppool.spawn(nm.process_attribs, None, attribs[None]) del attribs[None] for netname in sorted(attribs): - nm.process_attribs(netname, attribs[netname]) + ppool.spawn(nm.process_attribs, netname, attribs[netname]) + ppool.waitall() retattrs = {} if None in nm.myattribs: retattrs['default'] = nm.myattribs[None] @@ -400,7 +408,8 @@ def noneify(cfgdata): # the ip as reported by recvmsg to match the subnet of that net.* interface # if switch and port available, that should match. def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, - serverip=None): + serverip=None, relayipn=b'\x00\x00\x00\x00', + clientip=None): """Fetch network configuration parameters for a nic For a given node and interface, find and retrieve the pertinent network @@ -421,6 +430,28 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, #TODO(jjohnson2): ip address, prefix length, mac address, # join a bond/bridge, vlan configs, etc. # also other nic criteria, physical location, driver and index... + clientfam = None + clientipn = None + serverfam = None + serveripn = None + llaipn = socket.inet_pton(socket.AF_INET6, 'fe80::') + if serverip is not None: + if '.' in serverip: + serverfam = socket.AF_INET + elif ':' in serverip: + serverfam = socket.AF_INET6 + if serverfam: + serveripn = socket.inet_pton(serverfam, serverip) + if clientip is not None: + if '%' in clientip: + # link local, don't even bother' + clientfam = None + elif '.' in clientip: + clientfam = socket.AF_INET + elif ':' in clientip: + clientfam = socket.AF_INET6 + if clientfam: + clientipn = socket.inet_pton(clientfam, clientip) nodenetattribs = configmanager.get_node_attributes( node, 'net*').get(node, {}) cfgbyname = {} @@ -458,9 +489,22 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, cfgdata['ipv4_broken'] = True if v6broken: cfgdata['ipv6_broken'] = True + isremote = False if serverip is not None: dhcprequested = False myaddrs = get_addresses_by_serverip(serverip) + if serverfam == socket.AF_INET6 and ipn_on_same_subnet(serverfam, serveripn, llaipn, 64): + isremote = False + elif clientfam: + for myaddr in myaddrs: + # we may have received over a local vlan, wrong aliased subnet + # so have to check for *any* potential matches + fam, svrip, prefix = myaddr[:3] + if fam == clientfam: + if ipn_on_same_subnet(fam, clientipn, svrip, prefix): + break + else: + isremote = True genericmethod = 'static' ipbynodename = None ip6bynodename = None @@ -481,6 +525,10 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, bestsrvbyfam = {} for myaddr in myaddrs: fam, svrip, prefix = myaddr[:3] + if fam == socket.AF_INET and relayipn != b'\x00\x00\x00\x00': + bootsvrip = relayipn + else: + bootsvrip = svrip candsrvs.append((fam, svrip, prefix)) if fam == socket.AF_INET: nver = '4' @@ -500,14 +548,17 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, candip = cfgbyname[candidate].get('ipv{}_address'.format(nver), None) if candip and '/' in candip: candip, candprefix = candip.split('/') - if int(candprefix) != prefix: + if fam == socket.AF_INET and relayipn != b'\x00\x00\x00\x00': + prefix = int(candprefix) + if (not isremote) and int(candprefix) != prefix: continue candgw = cfgbyname[candidate].get('ipv{}_gateway'.format(nver), None) if candip: try: for inf in socket.getaddrinfo(candip, 0, fam, socket.SOCK_STREAM): candipn = socket.inet_pton(fam, inf[-1][0]) - if ipn_on_same_subnet(fam, svrip, candipn, prefix): + if ((isremote and ipn_on_same_subnet(fam, clientipn, candipn, int(candprefix))) + or ipn_on_same_subnet(fam, bootsvrip, candipn, prefix)): bestsrvbyfam[fam] = svrip cfgdata['ipv{}_address'.format(nver)] = candip cfgdata['ipv{}_method'.format(nver)] = ipmethod @@ -525,7 +576,7 @@ def get_nic_config(configmanager, node, ip=None, mac=None, ifidx=None, elif candgw: for inf in socket.getaddrinfo(candgw, 0, fam, socket.SOCK_STREAM): candgwn = socket.inet_pton(fam, inf[-1][0]) - if ipn_on_same_subnet(fam, svrip, candgwn, prefix): + if ipn_on_same_subnet(fam, bootsvrip, candgwn, prefix): candgws.append((fam, candgwn, prefix)) if foundaddr: return noneify(cfgdata) diff --git a/confluent_server/confluent/networking/lldp.py b/confluent_server/confluent/networking/lldp.py index e1fd8d4e..e181d46f 100644 --- a/confluent_server/confluent/networking/lldp.py +++ b/confluent_server/confluent/networking/lldp.py @@ -381,9 +381,10 @@ def list_info(parms, requestedparameter): break else: candidate = info[requestedparameter] - candidate = candidate.strip() - if candidate != '': - results.add(_api_sanitize_string(candidate)) + if candidate: + candidate = candidate.strip() + if candidate != '': + results.add(_api_sanitize_string(candidate)) return [msg.ChildCollection(x + suffix) for x in util.natural_sort(results)] def _handle_neighbor_query(pathcomponents, configmanager): diff --git a/confluent_server/confluent/noderange.py b/confluent_server/confluent/noderange.py index b59bf7c6..7657292c 100644 --- a/confluent_server/confluent/noderange.py +++ b/confluent_server/confluent/noderange.py @@ -55,6 +55,145 @@ def humanify_nodename(nodename): return [int(text) if text.isdigit() else text.lower() for text in re.split(numregex, nodename)] +def unnumber_nodename(nodename): + # stub out numbers + chunked = ["{}" if text.isdigit() else text.lower() + for text in re.split(numregex, nodename)] + return chunked + +def getnumbers_nodename(nodename): + return [x for x in re.split(numregex, nodename) if x.isdigit()] + + +class Bracketer(object): + __slots__ = ['sequences', 'count', 'nametmpl', 'diffn', 'tokens', 'numlens'] + + def __init__(self, nodename): + self.sequences = [] + self.numlens = [] + realnodename = nodename + if ':' in nodename: + realnodename = nodename.split(':', 1)[0] + self.count = len(getnumbers_nodename(realnodename)) + self.nametmpl = unnumber_nodename(realnodename) + for n in range(self.count): + self.sequences.append(None) + self.numlens.append([0, 0]) + self.diffn = None + self.tokens = [] + self.extend(nodename) + if self.count == 0: + self.tokens = [nodename] + + def extend(self, nodeorseq): + # can only differentiate a single number + endname = None + endnums = None + if ':' in nodeorseq: + nodename, endname = nodeorseq.split(':', 1) + else: + nodename = nodeorseq + txtnums = getnumbers_nodename(nodename) + nums = [int(x) for x in txtnums] + for n in range(self.count): + # First pass to see if we have exactly one different number + padto = len(txtnums[n]) + needpad = (padto != len('{}'.format(nums[n]))) + if self.sequences[n] is None: + # We initialize to text pieces, 'currstart', and 'prev' number + self.sequences[n] = [[], nums[n], nums[n]] + self.numlens[n] = [len(txtnums[n]), len(txtnums[n])] + elif self.sequences[n][2] == nums[n] and self.numlens[n][1] == padto: + continue # new nodename has no new number, keep going + else: # if self.sequences[n][2] != nums[n] or : + if self.diffn is not None and (n != self.diffn or + (padto < self.numlens[n][1]) or + (needpad and padto != self.numlens[n][1])): + self.flush_current() + self.sequences[n] = [[], nums[n], nums[n]] + self.numlens[n] = [padto, padto] + self.diffn = n + for n in range(self.count): + padto = len(txtnums[n]) + needpad = (padto != len('{}'.format(nums[n]))) + if self.sequences[n] is None: + # We initialize to text pieces, 'currstart', and 'prev' number + self.sequences[n] = [[], nums[n], nums[n]] + self.numlens[n] = [len(txtnums[n]), len(txtnums[n])] + elif self.sequences[n][2] == nums[n] and self.numlens[n][1] == padto: + continue # new nodename has no new number, keep going + else: # if self.sequences[n][2] != nums[n] or : + if self.diffn is not None and (n != self.diffn or + (padto < self.numlens[n][1]) or + (needpad and padto != self.numlens[n][1])): + self.flush_current() + self.sequences[n] = [[], nums[n], nums[n]] + self.numlens[n] = [padto, padto] + self.diffn = None + else: + self.diffn = n + if self.sequences[n][2] == (nums[n] - 1): + self.sequences[n][2] = nums[n] + self.numlens[n][1] = padto + elif self.sequences[n][2] < (nums[n] - 1): + if self.sequences[n][2] != self.sequences[n][1]: + fmtstr = '{{:0{}d}}:{{:0{}d}}'.format(*self.numlens[n]) + self.sequences[n][0].append(fmtstr.format(self.sequences[n][1], self.sequences[n][2])) + else: + fmtstr = '{{:0{}d}}'.format(self.numlens[n][0]) + self.sequences[n][0].append(fmtstr.format(self.sequences[n][1])) + self.sequences[n][1] = nums[n] + self.numlens[n][0] = padto + self.sequences[n][2] = nums[n] + self.numlens[n][1] = padto + + def flush_current(self): + txtfields = [] + if self.sequences and self.sequences[0] is not None: + for n in range(self.count): + if self.sequences[n][1] == self.sequences[n][2]: + fmtstr = '{{:0{}d}}'.format(self.numlens[n][0]) + self.sequences[n][0].append(fmtstr.format(self.sequences[n][1])) + else: + fmtstr = '{{:0{}d}}:{{:0{}d}}'.format(*self.numlens[n]) + self.sequences[n][0].append(fmtstr.format(self.sequences[n][1], self.sequences[n][2])) + txtfield = ','.join(self.sequences[n][0]) + if txtfield.isdigit(): + txtfields.append(txtfield) + else: + txtfields.append('[{}]'.format(txtfield)) + self.tokens.append(''.join(self.nametmpl).format(*txtfields)) + self.sequences = [] + for n in range(self.count): + self.sequences.append(None) + + @property + def range(self): + if self.sequences: + self.flush_current() + return ','.join(self.tokens) + + +def group_elements(elems): + """ Take the specefied elements and chunk them according to text similarity + """ + prev = None + currchunk = [] + chunked_elems = [currchunk] + for elem in elems: + elemtxt = unnumber_nodename(elem) + if not prev: + prev = elemtxt + currchunk.append(elem) + continue + if prev == elemtxt: + currchunk.append(elem) + else: + currchunk = [elem] + chunked_elems.append(currchunk) + prev = elemtxt + return chunked_elems + class ReverseNodeRange(object): """Abbreviate a set of nodes to a shorter noderange representation @@ -71,7 +210,8 @@ class ReverseNodeRange(object): @property def noderange(self): subsetgroups = [] - for group in self.cfm.get_groups(sizesort=True): + allgroups = self.cfm.get_groups(sizesort=True) + for group in allgroups: if lastnoderange: for nr in lastnoderange: if lastnoderange[nr] - self.nodes: @@ -88,7 +228,39 @@ class ReverseNodeRange(object): self.nodes -= nl if not self.nodes: break - return ','.join(sorted(subsetgroups) + sorted(self.nodes)) + # then, analyze sequentially identifying matching alpha subsections + # then try out noderange from beginning to end + # we need to know discontinuities, which are either: + # nodes that appear in the noderange that are not in the nodes + # nodes that do not exist at all (we need a noderange modification + # that returns non existing nodes) + ranges = [] + try: + subsetgroups.sort(key=humanify_nodename) + groupchunks = group_elements(subsetgroups) + for gc in groupchunks: + if not gc: + continue + bracketer = Bracketer(gc[0]) + for chnk in gc[1:]: + bracketer.extend(chnk) + ranges.append(bracketer.range) + except Exception: + subsetgroups.sort() + ranges.extend(subsetgroups) + try: + nodes = sorted(self.nodes, key=humanify_nodename) + nodechunks = group_elements(nodes) + for nc in nodechunks: + if not nc: + continue + bracketer = Bracketer(nc[0]) + for chnk in nc[1:]: + bracketer.extend(chnk) + ranges.append(bracketer.range) + except Exception: + ranges.extend(sorted(self.nodes)) + return ','.join(ranges) @@ -230,12 +402,16 @@ class NodeRange(object): def _expandstring(self, element, filternodes=None): prefix = '' if element[0][0] in ('/', '~'): + if self.purenumeric: + raise Exception('Regular expression not supported within "[]"') element = ''.join(element) nameexpression = element[1:] if self.cfm is None: raise Exception('Verification configmanager required') return set(self.cfm.filter_nodenames(nameexpression, filternodes)) elif '=' in element[0] or '!~' in element[0]: + if self.purenumeric: + raise Exception('Equality/Inequality operators (=, !=, =~, !~) are invalid within "[]"') element = ''.join(element) if self.cfm is None: raise Exception('Verification configmanager required') @@ -295,3 +471,29 @@ class NodeRange(object): if self.cfm is None: return set([element]) raise Exception(element + ' not a recognized node, group, or alias') + +if __name__ == '__main__': + cases = [ + (['r3u4', 'r5u6'], 'r3u4,r5u6'), # should not erroneously gather + (['r3u4s1', 'r5u6s3'], 'r3u4s1,r5u6s3'), # should not erroneously gather + (['r3u4s1', 'r3u4s2', 'r5u4s3'], 'r3u4s[1:2],r5u4s3'), # should not erroneously gather + (['r3u4', 'r3u5', 'r3u6', 'r3u9', 'r4u1'], 'r3u[4:6,9],r4u1'), + (['n01', 'n2', 'n03'], 'n01,n2,n03'), + (['n7', 'n8', 'n09', 'n10', 'n11', 'n12', 'n13', 'n14', 'n15', 'n16', + 'n17', 'n18', 'n19', 'n20'], 'n[7:8],n[09:20]') + ] + for case in cases: + gc = case[0] + bracketer = Bracketer(gc[0]) + for chnk in gc[1:]: + bracketer.extend(chnk) + br = bracketer.range + resnodes = NodeRange(br).nodes + if set(resnodes) != set(gc): + print('FAILED: ' + repr(sorted(gc))) + print('RESULT: ' + repr(sorted(resnodes))) + print('EXPECTED: ' + repr(case[1])) + print('ACTUAL: ' + br) + + + diff --git a/confluent_server/confluent/osimage.py b/confluent_server/confluent/osimage.py index 8884e0e9..387922fa 100644 --- a/confluent_server/confluent/osimage.py +++ b/confluent_server/confluent/osimage.py @@ -155,19 +155,38 @@ def update_boot_esxi(profiledir, profile, label): def find_glob(loc, fileglob): + grubcfgs = [] for cdir, _, fs in os.walk(loc): for f in fs: if fnmatch(f, fileglob): - return os.path.join(cdir, f) - return None + grubcfgs.append(os.path.join(cdir, f)) + return grubcfgs def update_boot_linux(profiledir, profile, label): profname = os.path.basename(profiledir) kernelargs = profile.get('kernelargs', '') + needefi = False + for grubexe in glob.glob(profiledir + '/boot/efi/boot/grubx64.efi'): + with open(grubexe, 'rb') as grubin: + grubcontent = grubin.read() + uaidx = grubcontent.find(b'User-Agent: GRUB 2.0') + if uaidx > 0: + grubcontent = grubcontent[uaidx:] + cridx = grubcontent.find(b'\r') + if cridx > 1: + grubcontent = grubcontent[:cridx] + grubver = grubcontent.split(b'~', 1)[0] + grubver = grubver.rsplit(b' ', 1)[-1] + grubver = grubver.split(b'.') + if len(grubver) > 1: + if int(grubver[0]) < 3 and int(grubver[1]) < 3: + needefi = True + lincmd = 'linuxefi' if needefi else 'linux' + initrdcmd = 'initrdefi' if needefi else 'initrd' grubcfg = "set timeout=5\nmenuentry '" grubcfg += label - grubcfg += "' {\n linuxefi /kernel " + kernelargs + "\n" + grubcfg += "' {\n " + lincmd + " /kernel " + kernelargs + "\n" initrds = [] for initramfs in glob.glob(profiledir + '/boot/initramfs/*.cpio'): initramfs = os.path.basename(initramfs) @@ -175,16 +194,21 @@ def update_boot_linux(profiledir, profile, label): for initramfs in os.listdir(profiledir + '/boot/initramfs'): if initramfs not in initrds: initrds.append(initramfs) - grubcfg += " initrdefi " + grubcfg += " " + initrdcmd + " " for initramfs in initrds: grubcfg += " /initramfs/{0}".format(initramfs) grubcfg += "\n}\n" # well need to honor grubprefix path if different grubcfgpath = find_glob(profiledir + '/boot', 'grub.cfg') if not grubcfgpath: - grubcfgpath = profiledir + '/boot/efi/boot/grub.cfg' - with open(grubcfgpath, 'w') as grubout: - grubout.write(grubcfg) + grubcfgpath = [ + profiledir + '/boot/efi/boot/grub.cfg', + profiledir + '/boot/boot/grub/grub.cfg' + ] + for grubcfgpth in grubcfgpath: + os.makedirs(os.path.dirname(grubcfgpth), 0o755, exist_ok=True) + with open(grubcfgpth, 'w') as grubout: + grubout.write(grubcfg) ipxeargs = kernelargs for initramfs in initrds: ipxeargs += " initrd=" + initramfs @@ -411,9 +435,7 @@ def check_ubuntu(isoinfo): ] return {'name': 'ubuntu-{0}-{1}'.format(ver, arch), 'method': EXTRACT|COPY, - 'extractlist': ['casper/vmlinuz', 'casper/initrd', - 'efi/boot/bootx64.efi', 'efi/boot/grubx64.efi' - ], + 'extractlist': exlist, 'copyto': 'install.iso', 'category': 'ubuntu{0}'.format(major)} @@ -601,7 +623,7 @@ def fingerprint(archive): return imginfo, None, None -def import_image(filename, callback, backend=False, mfd=None): +def import_image(filename, callback, backend=False, mfd=None, custtargpath=None, custdistpath=None, custname=''): if mfd: archive = os.fdopen(int(mfd), 'rb') else: @@ -610,11 +632,16 @@ def import_image(filename, callback, backend=False, mfd=None): if not identity: return -1 identity, imginfo, funname = identity - targpath = identity['name'] - distpath = '/var/lib/confluent/distributions/' + targpath - if identity.get('subname', None): - targpath += '/' + identity['subname'] - targpath = '/var/lib/confluent/distributions/' + targpath + distpath = custdistpath + if not distpath: + targpath = identity['name'] + distpath = '/var/lib/confluent/distributions/' + targpath + if not custtargpath: + if identity.get('subname', None): + targpath += '/' + identity['subname'] + targpath = '/var/lib/confluent/distributions/' + targpath + else: + targpath = custtargpath try: os.makedirs(targpath, 0o755) except Exception as e: @@ -747,9 +774,9 @@ def rebase_profile(dirname): # customization detected, skip # else # update required, manifest update - - - + + + def get_hashes(dirname): hashmap = {} for dname, _, fnames in os.walk(dirname): @@ -765,18 +792,21 @@ def get_hashes(dirname): def generate_stock_profiles(defprofile, distpath, targpath, osname, - profilelist): + profilelist, customname): osd, osversion, arch = osname.split('-') bootupdates = [] for prof in os.listdir('{0}/profiles'.format(defprofile)): srcname = '{0}/profiles/{1}'.format(defprofile, prof) - profname = '{0}-{1}'.format(osname, prof) + if customname: + profname = '{0}-{1}'.format(customname, prof) + else: + profname = '{0}-{1}'.format(osname, prof) dirname = '/var/lib/confluent/public/os/{0}'.format(profname) if os.path.exists(dirname): continue oumask = os.umask(0o22) shutil.copytree(srcname, dirname) - hmap = get_hashes(dirname) + hmap = get_hashes(dirname) profdata = None try: os.makedirs('{0}/boot/initramfs'.format(dirname), 0o755) @@ -824,11 +854,12 @@ def generate_stock_profiles(defprofile, distpath, targpath, osname, class MediaImporter(object): - def __init__(self, media, cfm=None): + def __init__(self, media, cfm=None, customname=None, checkonly=False): self.worker = None if not os.path.exists('/var/lib/confluent/public'): raise Exception('`osdeploy initialize` must be executed before importing any media') self.profiles = [] + self.errors = [] medfile = None self.medfile = None if cfm and media in cfm.clientfiles: @@ -848,25 +879,35 @@ class MediaImporter(object): self.phase = 'copying' if not identity: raise Exception('Unrecognized OS Media') - if 'subname' in identity: + self.customname = customname if customname else '' + if customname: + importkey = customname + elif 'subname' in identity: importkey = '{0}-{1}'.format(identity['name'], identity['subname']) else: importkey = identity['name'] - if importkey in importing: + if importkey in importing and not checkonly: raise Exception('Media import already in progress for this media') self.importkey = importkey - importing[importkey] = self - self.importkey = importkey self.osname = identity['name'] self.oscategory = identity.get('category', None) - targpath = identity['name'] + if customname: + targpath = customname + else: + targpath = identity['name'] self.distpath = '/var/lib/confluent/distributions/' + targpath - if identity.get('subname', None): + if identity.get('subname', None): # subname is to indicate disk number in a media set targpath += '/' + identity['subname'] self.targpath = '/var/lib/confluent/distributions/' + targpath if os.path.exists(self.targpath): - del importing[importkey] - raise Exception('{0} already exists'.format(self.targpath)) + errstr = '{0} already exists'.format(self.targpath) + if checkonly: + self.errors = [errstr] + else: + raise Exception(errstr) + if checkonly: + return + importing[importkey] = self self.filename = os.path.abspath(media) self.error = '' self.importer = eventlet.spawn(self.importmedia) @@ -884,7 +925,7 @@ class MediaImporter(object): os.environ['CONFLUENT_MEDIAFD'] = '{0}'.format(self.medfile.fileno()) with open(os.devnull, 'w') as devnull: self.worker = subprocess.Popen( - [sys.executable, __file__, self.filename, '-b'], + [sys.executable, __file__, self.filename, '-b', self.targpath, self.distpath, self.customname], stdin=devnull, stdout=subprocess.PIPE, close_fds=False) wkr = self.worker currline = b'' @@ -924,7 +965,7 @@ class MediaImporter(object): self.oscategory) try: generate_stock_profiles(defprofile, self.distpath, self.targpath, - self.osname, self.profiles) + self.osname, self.profiles, self.customname) except Exception as e: self.phase = 'error' self.error = str(e) @@ -951,7 +992,7 @@ if __name__ == '__main__': os.umask(0o022) if len(sys.argv) > 2: mfd = os.environ.get('CONFLUENT_MEDIAFD', None) - sys.exit(import_image(sys.argv[1], callback=printit, backend=True, mfd=mfd)) + sys.exit(import_image(sys.argv[1], callback=printit, backend=True, mfd=mfd, custtargpath=sys.argv[3], custdistpath=sys.argv[4], custname=sys.argv[5])) else: sys.exit(import_image(sys.argv[1], callback=printit)) diff --git a/confluent_server/confluent/plugins/configuration/attributes.py b/confluent_server/confluent/plugins/configuration/attributes.py index c2ea83d9..2c9a6ac9 100644 --- a/confluent_server/confluent/plugins/configuration/attributes.py +++ b/confluent_server/confluent/plugins/configuration/attributes.py @@ -21,16 +21,16 @@ import confluent.util as util from fnmatch import fnmatch -def retrieve(nodes, element, configmanager, inputdata): +def retrieve(nodes, element, configmanager, inputdata, clearwarnbynode=None): configmanager.check_quorum() if nodes is not None: - return retrieve_nodes(nodes, element, configmanager, inputdata) + return retrieve_nodes(nodes, element, configmanager, inputdata, clearwarnbynode) elif element[0] == 'nodegroups': return retrieve_nodegroup( - element[1], element[3], configmanager, inputdata) + element[1], element[3], configmanager, inputdata, clearwarnbynode) -def retrieve_nodegroup(nodegroup, element, configmanager, inputdata): +def retrieve_nodegroup(nodegroup, element, configmanager, inputdata, clearwarnbynode=None): try: grpcfg = configmanager.get_nodegroup_attributes(nodegroup) except KeyError: @@ -106,10 +106,12 @@ def retrieve_nodegroup(nodegroup, element, configmanager, inputdata): raise Exception("BUGGY ATTRIBUTE FOR NODEGROUP") -def retrieve_nodes(nodes, element, configmanager, inputdata): +def retrieve_nodes(nodes, element, configmanager, inputdata, clearwarnbynode): attributes = configmanager.get_node_attributes(nodes) if element[-1] == 'all': for node in util.natural_sort(nodes): + if clearwarnbynode and node in clearwarnbynode: + yield msg.Attributes(node, {'_warnings': clearwarnbynode[node]}) theattrs = set(allattributes.node).union(set(attributes[node])) for attribute in sorted(theattrs): if attribute in attributes[node]: # have a setting for it @@ -266,6 +268,7 @@ def update_nodes(nodes, element, configmanager, inputdata): namemap[node] = rename['rename'] configmanager.rename_nodes(namemap) return yield_rename_resources(namemap, isnode=True) + clearwarnbynode = {} for node in nodes: updatenode = inputdata.get_attributes(node, allattributes.node) clearattribs = [] @@ -299,10 +302,11 @@ def update_nodes(nodes, element, configmanager, inputdata): markup = (e.text[:e.offset-1] + '-->' + e.text[e.offset-1] + '<--' + e.text[e.offset:]).strip() raise exc.InvalidArgumentException('Syntax error in attribute name: "{0}"'.format(markup)) if len(clearattribs) > 0: - configmanager.clear_node_attributes([node], clearattribs) + clearwarnbynode[node] = [] + configmanager.clear_node_attributes([node], clearattribs, warnings=clearwarnbynode[node]) updatedict[node] = updatenode try: configmanager.set_node_attributes(updatedict) except ValueError as e: raise exc.InvalidArgumentException(str(e)) - return retrieve(nodes, element, configmanager, inputdata) + return retrieve(nodes, element, configmanager, inputdata, clearwarnbynode) diff --git a/confluent_server/confluent/plugins/console/ikvm.py b/confluent_server/confluent/plugins/console/ikvm.py new file mode 100644 index 00000000..395ead60 --- /dev/null +++ b/confluent_server/confluent/plugins/console/ikvm.py @@ -0,0 +1,34 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2024 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# This provides linkage between vinz and confluent, with support +# for getting session authorization from the BMC + +import confluent.vinzmanager as vinzmanager +import confluent.messages as msg + + +def create(nodes, element, configmanager, inputdata): + for node in nodes: + url = vinzmanager.get_url(node, inputdata) + yield msg.ChildCollection(url) + + +def update(nodes, element, configmanager, inputdata): + for node in nodes: + url = vinzmanager.get_url(node, inputdata) + yield msg.ChildCollection(url) diff --git a/confluent_server/confluent/plugins/console/openbmc.py b/confluent_server/confluent/plugins/console/openbmc.py index 17acae7c..3ff08e5a 100644 --- a/confluent_server/confluent/plugins/console/openbmc.py +++ b/confluent_server/confluent/plugins/console/openbmc.py @@ -119,6 +119,7 @@ class TsmConsole(conapi.Console): self.datacallback = None self.nodeconfig = config self.connected = False + self.recvr = None def recvdata(self): @@ -134,22 +135,30 @@ class TsmConsole(conapi.Console): kv = util.TLSCertVerifier( self.nodeconfig, self.node, 'pubkeys.tls_hardwaremanager').verify_cert wc = webclient.SecureHTTPConnection(self.origbmc, 443, verifycallback=kv) - rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json'}) + try: + rsp = wc.grab_json_response_with_status('/login', {'data': [self.username.decode('utf8'), self.password.decode("utf8")]}, headers={'Content-Type': 'application/json', 'Accept': 'application/json'}) + except Exception as e: + raise cexc.TargetEndpointUnreachable(str(e)) + if rsp[1] > 400: + raise cexc.TargetEndpointBadCredentials bmc = self.bmc if '%' in self.bmc: prefix = self.bmc.split('%')[0] bmc = prefix + ']' self.ws = WrappedWebSocket(host=bmc) self.ws.set_verify_callback(kv) - self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION'])) + self.ws.connect('wss://{0}/console0'.format(self.bmc), host=bmc, cookie='XSRF-TOKEN={0}; SESSION={1}'.format(wc.cookies['XSRF-TOKEN'], wc.cookies['SESSION']), subprotocols=[wc.cookies['XSRF-TOKEN']]) self.connected = True - eventlet.spawn_n(self.recvdata) + self.recvr = eventlet.spawn(self.recvdata) return def write(self, data): self.ws.send(data) def close(self): + if self.recvr: + self.recvr.kill() + self.recvr = None if self.ws: self.ws.close() self.connected = False diff --git a/confluent_server/confluent/plugins/hardwaremanagement/affluent.py b/confluent_server/confluent/plugins/hardwaremanagement/affluent.py index ea169b3b..ee10bd7d 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/affluent.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/affluent.py @@ -45,6 +45,19 @@ class WebClient(object): 'target certificate fingerprint and ' 'pubkeys.tls_hardwaremanager attribute')) return {} + except (socket.gaierror, socket.herror, TimeoutError) as e: + results.put(msg.ConfluentTargetTimeout(self.node, str(e))) + return {} + except OSError as e: + if e.errno == 113: + results.put(msg.ConfluentTargetTimeout(self.node)) + else: + results.put(msg.ConfluentTargetTimeout(self.node), str(e)) + return {} + except Exception as e: + results.put(msg.ConfluentNodeError(self.node, + repr(e))) + return {} if status == 401: results.put(msg.ConfluentTargetInvalidCredentials(self.node, 'Unable to authenticate')) return {} @@ -115,9 +128,7 @@ def retrieve(nodes, element, configmanager, inputdata): results = queue.LightQueue() workers = set([]) if element == ['power', 'state']: - for node in nodes: - yield msg.PowerState(node=node, state='on') - return + _run_method(retrieve_power, workers, results, configmanager, nodes, element) elif element == ['health', 'hardware']: _run_method(retrieve_health, workers, results, configmanager, nodes, element) elif element[:3] == ['inventory', 'hardware', 'all']: @@ -188,9 +199,15 @@ def retrieve_sensors(configmanager, creds, node, results, element): +def retrieve_power(configmanager, creds, node, results, element): + wc = WebClient(node, configmanager, creds) + hinfo = wc.fetch('/affluent/health', results) + if hinfo: + results.put(msg.PowerState(node=node, state='on')) + def retrieve_health(configmanager, creds, node, results, element): wc = WebClient(node, configmanager, creds) - hinfo = wc.fetch('/affluent/health', results) + hinfo = wc.fetch('/affluent/health', results) if hinfo: results.put(msg.HealthSummary(hinfo.get('health', 'unknown'), name=node)) results.put(msg.SensorReadings(hinfo.get('sensors', []), name=node)) diff --git a/confluent_server/confluent/plugins/hardwaremanagement/eatonpdu.py b/confluent_server/confluent/plugins/hardwaremanagement/eatonpdu.py index 16be5b38..4c3d4654 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/eatonpdu.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/eatonpdu.py @@ -33,7 +33,8 @@ def simplify_name(name): def sanitize_json(data): if not isinstance(data, str): data = data.decode('utf8') - return re.sub(r'([^ {:,]*):', r'"\1":', data).replace("'", '"') + return re.sub(r'([^ {:,]*):', r'"\1":', data).replace("'", '"').replace(',,', ',null,') + def answer_challenge(username, password, data): realm = data[0] diff --git a/confluent_server/confluent/plugins/hardwaremanagement/enclosure.py b/confluent_server/confluent/plugins/hardwaremanagement/enclosure.py index 933a852b..a59422c0 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/enclosure.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/enclosure.py @@ -15,10 +15,31 @@ import confluent.core as core import confluent.messages as msg import pyghmi.exceptions as pygexc import confluent.exceptions as exc +import eventlet.queue as queue +import eventlet.greenpool as greenpool + + +def reseat_bays(encmgr, bays, configmanager, rspq): + try: + for encbay in bays: + node = bays[encbay] + try: + for rsp in core.handle_path( + '/nodes/{0}/_enclosure/reseat_bay'.format(encmgr), + 'update', configmanager, + inputdata={'reseat': int(encbay)}): + rspq.put(rsp) + except pygexc.UnsupportedFunctionality as uf: + rspq.put(msg.ConfluentNodeError(node, str(uf))) + except exc.TargetEndpointUnreachable as uf: + rspq.put(msg.ConfluentNodeError(node, str(uf))) + finally: + rspq.put(None) def update(nodes, element, configmanager, inputdata): emebs = configmanager.get_node_attributes( nodes, (u'enclosure.manager', u'enclosure.bay')) + baysbyencmgr = {} for node in nodes: try: em = emebs[node]['enclosure.manager']['value'] @@ -30,13 +51,20 @@ def update(nodes, element, configmanager, inputdata): em = node if not eb: eb = -1 - try: - for rsp in core.handle_path( - '/nodes/{0}/_enclosure/reseat_bay'.format(em), - 'update', configmanager, - inputdata={'reseat': int(eb)}): - yield rsp - except pygexc.UnsupportedFunctionality as uf: - yield msg.ConfluentNodeError(node, str(uf)) - except exc.TargetEndpointUnreachable as uf: - yield msg.ConfluentNodeError(node, str(uf)) + if em not in baysbyencmgr: + baysbyencmgr[em] = {} + baysbyencmgr[em][eb] = node + rspq = queue.Queue() + gp = greenpool.GreenPool(64) + for encmgr in baysbyencmgr: + gp.spawn_n(reseat_bays, encmgr, baysbyencmgr[encmgr], configmanager, rspq) + while gp.running(): + nrsp = rspq.get() + if nrsp is not None: + yield nrsp + while not rspq.empty(): + nrsp = rspq.get() + if nrsp is not None: + yield nrsp + + diff --git a/confluent_server/confluent/plugins/hardwaremanagement/enos.py b/confluent_server/confluent/plugins/hardwaremanagement/enos.py new file mode 100644 index 00000000..f568fae2 --- /dev/null +++ b/confluent_server/confluent/plugins/hardwaremanagement/enos.py @@ -0,0 +1,347 @@ + +# Copyright 2019 Lenovo +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +#Noncritical: +# - One or more temperature sensors is in the warning range; +#Critical: +# - One or more temperature sensors is in the failure range; +# - One or more fans are running < 100 RPM; +# - One power supply is off. + +import re +import eventlet +import eventlet.queue as queue +import confluent.exceptions as exc +webclient = eventlet.import_patched("pyghmi.util.webclient") +import confluent.messages as msg +import confluent.util as util +import confluent.plugins.shell.ssh as ssh + + +class SwitchSensor(object): + def __init__(self, name, states=None, units=None, value=None, health=None): + self.name = name + self.value = value + self.states = states + self.health = health + self.units = units + + +def _run_method(method, workers, results, configmanager, nodes, element): + creds = configmanager.get_node_attributes( + nodes, ["switchuser", "switchpass", "secret.hardwaremanagementpassword", + "secret.hardwaremanagementuser"], decrypt=True) + for node in nodes: + workers.add(eventlet.spawn(method, configmanager, creds, + node, results, element)) + + +def enos_login(node, configmanager, creds): + try: + ukey = "switchuser" + upass = "switchpass" + if ukey not in creds and "secret.hardwaremanagementuser" in creds[node]: + ukey = "secret.hardwaremanagementuser" + upass = "secret.hardwaremanagementpassword" + + if ukey not in creds[node]: + raise exc.TargetEndpointBadCredentials("Unable to authenticate - switchuser or secret.hardwaremanagementuser not set") + user = creds[node][ukey]["value"] + if upass not in creds[node]: + passwd = None + else: + passwd = creds[node][upass]["value"] + nssh = ssh.SshConn(node=node, config=configmanager, username=user, password=passwd) + nssh.do_logon() + return nssh + except Exception as e: + raise exc.TargetEndpointBadCredentials(f"Unable to authenticate {e}") + + +def enos_version(ssh): + sshStdout, sshStderr = ssh.exec_command(cmd="show", cmdargs=["version"]) + return sshStdout + + +def update(nodes, element, configmanager, inputdata): + for node in nodes: + yield msg.ConfluentNodeError(node, "Not Implemented") + + +def delete(nodes, element, configmanager, inputdata): + for node in nodes: + yield msg.ConfluentNodeError(node, "Not Implemented") + + +def create(nodes, element, configmanager, inputdata): + for node in nodes: + yield msg.ConfluentNodeError(node, "Not Implemented") + + +def retrieve(nodes, element, configmanager, inputdata): + results = queue.LightQueue() + workers = set([]) + if element == ["power", "state"]: + for node in nodes: + yield msg.PowerState(node=node, state="on") + return + elif element == ["health", "hardware"]: + _run_method(retrieve_health, workers, results, configmanager, nodes, element) + elif element[:3] == ["inventory", "hardware", "all"]: + _run_method(retrieve_inventory, workers, results, configmanager, nodes, element) + elif element[:3] == ["inventory", "firmware", "all"]: + _run_method(retrieve_firmware, workers, results, configmanager, nodes, element) + elif element[:3] == ["sensors", "hardware", "all"]: + _run_method(retrieve_sensors, workers, results, configmanager, nodes, element) + else: + for node in nodes: + yield msg.ConfluentNodeError(node, f"Not Implemented: {element}") + return + currtimeout = 10 + while workers: + try: + datum = results.get(10) + while datum: + if datum: + yield datum + datum = results.get_nowait() + except queue.Empty: + pass + eventlet.sleep(0.001) + for t in list(workers): + if t.dead: + workers.discard(t) + try: + while True: + datum = results.get_nowait() + if datum: + yield datum + except queue.Empty: + pass + + +def retrieve_inventory(configmanager, creds, node, results, element): + if len(element) == 3: + results.put(msg.ChildCollection("all")) + results.put(msg.ChildCollection("system")) + return + + switch = gather_data(configmanager, creds, node) + invinfo = switch["inventory"] + + for fan, data in switch["fans"].items(): + invinfo["inventory"][0]["information"][f"Fan #{fan}"] = data["state"] + + for psu, data in switch["psus"].items(): + invinfo["inventory"][0]["information"][f"PSU #{psu}"] = data["state"] + + results.put(msg.KeyValueData(invinfo, node)) + + +def gather_data(configmanager, creds, node): + nssh = enos_login(node=node, configmanager=configmanager, creds=creds) + switch_lines = enos_version(ssh=nssh) + switch_data = {} + sysinfo = {"Product name": {"regex": ".*RackSwitch (\w+)"}, + "Serial Number": {"regex": "ESN\s*\w*\s*: ([\w-]+)"}, + "Board Serial Number": {"regex": "Switch Serial No: (\w+)"}, + "Model": {"regex": "MTM\s*\w*\s*: ([\w-]+)"}, + "FRU Number": {"regex": "Hardware Part\s*\w*\s*: (\w+)"}, + "Airflow": {"regex": "System Fan Airflow\s*\w*\s*: ([\w-]+)"}, + } + + invinfo = { + "inventory": [{ + "name": "System", + "present": True, + "information": { + "Manufacturer": "Lenovo", + } + }] + } + + switch_data["sensors"] = [] + + switch_data["fans"] = gather_fans(switch_lines) + for fan, data in switch_data["fans"].items(): + if "rpm" in data: + health = "ok" + if int(data["rpm"]) < 100: + health = "critical" + switch_data["sensors"].append(SwitchSensor(name=f"Fan {fan}", value=data['rpm'], + units="RPM", health=health)) + + switch_data["psus"] = gather_psus(switch_lines) + + # Hunt for the temp limits + phylimit = {"warn": None, "shut": None} + templimit = {"warn": None, "shut": None} + for line in switch_lines: + match = re.match(r"([\w\s]+)Warning[\w\s]+\s(\d+)[\sA-Za-z\/]+\s(\d+)[\s\w\/]+\s(\d*)", line) + if match: + if "System" in match.group(1): + templimit["warn"] = int(match.group(2)) + templimit["shut"] = int(match.group(3)) + elif "PHYs" in match.group(1): + phylimit["warn"] = int(match.group(2)) + phylimit["shut"] = int(match.group(3)) + if not phylimit["warn"]: + phylimit = templimit + + for line in switch_lines: + # match the inventory data + for key in sysinfo.keys(): + match = re.match(re.compile(sysinfo[key]["regex"]), line) + if match: + invinfo["inventory"][0]["information"][key] = match.group(1).strip() + + # match temp sensors logging where failed + match = re.match(r"Temperature\s+([\d\s\w]+)\s*:\s*(\d+)+\s+([CF])+", line) + if match: + health = "ok" + temp = int(match.group(2)) + name = f"{match.group(1).strip()} Temp" + if "Phy" in name: + if temp > phylimit["warn"]: + health = "warning" + if temp > phylimit["shut"]: + health = "critical" + else: + if temp > templimit["warn"]: + health = "warning" + if temp > templimit["shut"]: + health = "critical" + switch_data["sensors"].append(SwitchSensor(name=name, + value=temp, units=f"°{match.group(3)}", health=health)) + match = re.match(r"\s*(\w+) Faults\s*:\s+(.+)", line) + if match and match.group(2) not in ["()", "None"]: + switch_data["sensors"].append(SwitchSensor(name=f"{match.group(1)} Fault", + value=match.group(2).strip(), units="", health="critical")) + + switch_data["inventory"] = invinfo + + sysfw = {"Software Version": "Unknown", "Boot kernel": "Unknown"} + for line in switch_lines: + for key in sysfw.keys(): + regex = f"{key}\s*\w*\s* ([0-9.]+)" + match = re.match(re.compile(regex), line) + if match: + sysfw[key] = match.group(1) + switch_data["firmware"] = sysfw + + return switch_data + + +def gather_psus(data): + psus = {} + for line in data: + # some switches are: + # Power Supply 1: Back-To-Front + # others are: + # Internal Power Supply: On + if "Power Supply" in line: + match = re.match(re.compile(f"Power Supply (\d)+.*"), line) + if match: + psu = match.group(1) + if psu not in psus: + psus[psu] = {} + m = re.match(r".+\s+(\w+\-\w+\-\w+)\s*\[*.*$", line) + if m: + psus[psu]["airflow"] = m.group(1) + psus[psu]["state"] = "Present" + else: + psus[psu]["state"] = "Not installed" + else: + for psu in range(1, 10): + if "Power Supply" in line and psu not in psus: + if psu not in psus: + psus[psu] = {} + if "Not Installed" in line: + psus[psu]["state"] = "Not installed" + break + else: + psus[psu]["state"] = "Present" + break + return psus + + +def gather_fans(data): + fans = {} + for line in data: + # look for presence of fans + if "Fan" in line: + match = re.match(re.compile(f"Fan (\d)+.*"), line) + if match: + fan = match.group(1) + if match: + if fan not in fans: + fans[fan] = {} + if "rpm" in line or "RPM" in line: + if "Module" in line: + m = re.search(r"Module\s+(\d)+:", line) + if m: + fans[fan]["Module"] = m.group(1) + fans[fan]["state"] = "Present" + m = re.search(r"(\d+)\s*:\s+(RPM=)*(\d+)(rpm)*", line) + if m: + fans[fan]["rpm"] = m.group(3) + + m = re.search(r"\s+(PWM=)*(\d+)(%|pwm)+", line) + if m: + fans[fan]["pwm"] = m.group(2) + + m = re.search(r"(.+)\s+(\w+\-\w+\-\w+)$", line) + if m: + fans[fan]["airflow"] = m.group(1) + else: + fans[fan]["state"] = "Not installed" + return fans + + +def retrieve_firmware(configmanager, creds, node, results, element): + if len(element) == 3: + results.put(msg.ChildCollection("all")) + return + sysinfo = gather_data(configmanager, creds, node)["firmware"] + items = [{ + "Software": {"version": sysinfo["Software Version"]}, + }, + { + "Boot kernel": {"version": sysinfo["Boot kernel"]}, + }] + results.put(msg.Firmware(items, node)) + + +def retrieve_health(configmanager, creds, node, results, element): + switch = gather_data(configmanager, creds, node) + badreadings = [] + summary = "ok" + sensors = gather_data(configmanager, creds, node)["sensors"] + + for sensor in sensors: + if sensor.health not in ["ok"]: + if sensor.health in ["critical"]: + summary = "critical" + elif summary in ["ok"] and sensor.health in ["warning"]: + summary = "warning" + badreadings.append(sensor) + results.put(msg.HealthSummary(summary, name=node)) + results.put(msg.SensorReadings(badreadings, name=node)) + + +def retrieve_sensors(configmanager, creds, node, results, element): + sensors = gather_data(configmanager, creds, node)["sensors"] + results.put(msg.SensorReadings(sensors, node)) diff --git a/confluent_server/confluent/plugins/hardwaremanagement/geist.py b/confluent_server/confluent/plugins/hardwaremanagement/geist.py index 3af6fa49..3f086115 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/geist.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/geist.py @@ -1,13 +1,13 @@ # Copyright 2022 Lenovo # -# Licensed under the Apache License, Version 2.0 (the "License"); +# Licensed under the Apache License, Version 2.0 (the 'License'); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, +# distributed under the License is distributed on an 'AS IS' BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @@ -20,12 +20,15 @@ import eventlet.green.time as time import eventlet import eventlet.greenpool as greenpool + + def simplify_name(name): - return name.lower().replace(' ', '_').replace('/', '-').replace( - '_-_', '-') + return name.lower().replace(' ', '_').replace('/', '-').replace('_-_', '-') + pdupool = greenpool.GreenPool(128) + def data_by_type(indata): databytype = {} for keyname in indata: @@ -34,7 +37,9 @@ def data_by_type(indata): if not objtype: continue if objtype in databytype: - raise Exception("Multiple instances of type {} not yet supported".format(objtype)) + raise Exception( + 'Multiple instances of type {} not yet supported'.format(objtype) + ) databytype[objtype] = obj obj['keyname'] = keyname return databytype @@ -58,31 +63,30 @@ class GeistClient(object): def wc(self): if self._wc: return self._wc - targcfg = self.configmanager.get_node_attributes(self.node, - ['hardwaremanagement.manager'], - decrypt=True) + targcfg = self.configmanager.get_node_attributes( + self.node, ['hardwaremanagement.manager'], decrypt=True + ) targcfg = targcfg.get(self.node, {}) - target = targcfg.get( - 'hardwaremanagement.manager', {}).get('value', None) + target = targcfg.get('hardwaremanagement.manager', {}).get('value', None) if not target: target = self.node target = target.split('/', 1)[0] cv = util.TLSCertVerifier( - self.configmanager, self.node, - 'pubkeys.tls_hardwaremanager').verify_cert + self.configmanager, self.node, 'pubkeys.tls_hardwaremanager' + ).verify_cert self._wc = wc.SecureHTTPConnection(target, port=443, verifycallback=cv) return self._wc def login(self, configmanager): - credcfg = configmanager.get_node_attributes(self.node, - ['secret.hardwaremanagementuser', - 'secret.hardwaremanagementpassword'], - decrypt=True) + credcfg = configmanager.get_node_attributes( + self.node, + ['secret.hardwaremanagementuser', 'secret.hardwaremanagementpassword'], + decrypt=True, + ) credcfg = credcfg.get(self.node, {}) - username = credcfg.get( - 'secret.hardwaremanagementuser', {}).get('value', None) - passwd = credcfg.get( - 'secret.hardwaremanagementpassword', {}).get('value', None) + username = credcfg.get('secret.hardwaremanagementuser', {}).get('value', None) + passwd = credcfg.get('secret.hardwaremanagementpassword', {}).get('value', None) + if not isinstance(username, str): username = username.decode('utf8') if not isinstance(passwd, str): @@ -92,26 +96,32 @@ class GeistClient(object): self.username = username rsp = self.wc.grab_json_response( '/api/auth/{0}'.format(username), - {'cmd': 'login', 'data': {'password': passwd}}) + {'cmd': 'login', 'data': {'password': passwd}}, + ) + token = rsp['data']['token'] return token def logout(self): if self._token: - self.wc.grab_json_response('/api/auth/{0}'.format(self.username), - {'cmd': 'logout', 'token': self.token}) + self.wc.grab_json_response( + '/api/auth/{0}'.format(self.username), + {'cmd': 'logout', 'token': self.token}, + ) self._token = None def get_outlet(self, outlet): rsp = self.wc.grab_json_response('/api/dev') rsp = rsp['data'] dbt = data_by_type(rsp) + if 't3hd' in dbt: del dbt['t3hd'] if len(dbt) != 1: raise Exception('Multiple PDUs not supported per pdu') pdutype = list(dbt)[0] outlet = dbt[pdutype]['outlet'][str(int(outlet) - 1)] + state = outlet['state'].split('2')[-1] return state @@ -125,12 +135,20 @@ class GeistClient(object): raise Exception('Multiple PDUs per endpoint not supported') pdu = dbt[list(dbt)[0]]['keyname'] outlet = int(outlet) - 1 + rsp = self.wc.grab_json_response( '/api/dev/{0}/outlet/{1}'.format(pdu, outlet), - {'cmd': 'control', 'token': self.token, - 'data': {'action': state, 'delay': False}}) + { + 'cmd': 'control', + 'token': self.token, + 'data': {'action': state, 'delay': False}, + }, + ) -def process_measurement(keyname, name, enttype, entname, measurement, readings, category): + +def process_measurement( + keyname, name, enttype, entname, measurement, readings, category +): if measurement['type'] == 'realPower': if category not in ('all', 'power'): return @@ -147,6 +165,10 @@ def process_measurement(keyname, name, enttype, entname, measurement, readings, if category not in ('all',): return readtype = 'Voltage' + elif measurement['type'] == 'current': + if category not in ('all',): + return + readtype = 'Current' elif measurement['type'] == 'temperature': readtype = 'Temperature' elif measurement['type'] == 'dewpoint': @@ -158,23 +180,35 @@ def process_measurement(keyname, name, enttype, entname, measurement, readings, myname = entname + ' ' + readtype if name != 'all' and simplify_name(myname) != name: return - readings.append({ - 'name': myname, - 'value': float(measurement['value']), - 'units': measurement['units'], - 'type': readtype.split()[-1] - }) - + readings.append( + { + 'name': myname, + 'value': float(measurement['value']), + 'units': measurement['units'], + 'type': readtype.split()[-1], + } + ) + def process_measurements(name, category, measurements, enttype, readings): for measure in util.natural_sort(list(measurements)): measurement = measurements[measure]['measurement'] entname = measurements[measure]['name'] for measureid in measurement: - process_measurement(measure, name, enttype, entname, measurement[measureid], readings, category) - + process_measurement( + measure, + name, + enttype, + entname, + measurement[measureid], + readings, + category, + ) + _sensors_by_node = {} + + def read_sensors(element, node, configmanager): category, name = element[-2:] justnames = False @@ -192,10 +226,12 @@ def read_sensors(element, node, configmanager): _sensors_by_node[node] = (adev, time.time() + 1) sn = _sensors_by_node.get(node, None) dbt = data_by_type(sn[0]['data']) + readings = [] - for datatype in dbt: + for datatype in dbt: datum = dbt[datatype] process_measurements(name, category, datum['entity'], 'entity', readings) + if 'outlet' in datum: process_measurements(name, category, datum['outlet'], 'outlet', readings) if justnames: @@ -204,25 +240,78 @@ def read_sensors(element, node, configmanager): else: yield msg.SensorReadings(readings, name=node) -def get_outlet(node, configmanager, element): + +def get_outlet(element, node, configmanager): gc = GeistClient(node, configmanager) state = gc.get_outlet(element[-1]) + return msg.PowerState(node=node, state=state) + def read_firmware(node, configmanager): gc = GeistClient(node, configmanager) adev = gc.wc.grab_json_response('/api/sys') myversion = adev['data']['version'] yield msg.Firmware([{'PDU Firmware': {'version': myversion}}], node) + +def read_inventory(element, node, configmanager): + _inventory = {} + inventory = {} + gc = GeistClient(node, configmanager) + adev = gc.wc.grab_json_response('/api/sys') + basedata = adev['data'] + inventory['present'] = True + inventory['name'] = 'PDU' + for elem in basedata.items(): + + if ( + elem[0] != 'component' + and elem[0] != 'locale' + and elem[0] != 'state' + and elem[0] != 'contact' + and elem[0] != 'appVersion' + and elem[0] != 'build' + and elem[0] != 'version' + and elem[0] != 'apiVersion' + ): + temp = elem[0] + if elem[0] == 'serialNumber': + temp = 'Serial' + elif elem[0] == 'partNumber': + temp = 'P/N' + elif elem[0] == 'modelNumber': + temp = 'Lenovo P/N and Serial' + _inventory[temp] = elem[1] + elif elem[0] == 'component': + tempname = '' + for component in basedata['component'].items(): + for item in component: + if type(item) == str: + + tempname = item + else: + for entry in item.items(): + temp = entry[0] + if temp == 'sn': + temp = 'Serial' + _inventory[tempname + ' ' + temp] = entry[1] + + inventory['information'] = _inventory + + yield msg.KeyValueData({'inventory': [inventory]}, node) + + def retrieve(nodes, element, configmanager, inputdata): + if 'outlets' in element: gp = greenpool.GreenPile(pdupool) for node in nodes: - gp.spawn(get_outlet, node, configmanager, element) + + gp.spawn(get_outlet, element, node, configmanager) for res in gp: yield res - + return elif element[0] == 'sensors': gp = greenpool.GreenPile(pdupool) @@ -239,11 +328,20 @@ def retrieve(nodes, element, configmanager, inputdata): for rsp in gp: for datum in rsp: yield datum + + elif '/'.join(element).startswith('inventory/hardware/all'): + gp = greenpool.GreenPile(pdupool) + for node in nodes: + gp.spawn(read_inventory, element, node, configmanager) + for rsp in gp: + for datum in rsp: + yield datum else: for node in nodes: - yield msg.ConfluentResourceUnavailable(node, 'Not implemented') + yield msg.ConfluentResourceUnavailable(node, 'Not implemented') return - + + def update(nodes, element, configmanager, inputdata): if 'outlets' not in element: yield msg.ConfluentResourceUnavailable(node, 'Not implemented') diff --git a/confluent_server/confluent/plugins/hardwaremanagement/ipmi.py b/confluent_server/confluent/plugins/hardwaremanagement/ipmi.py index 938b69ae..32fabefe 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/ipmi.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/ipmi.py @@ -659,7 +659,9 @@ class IpmiHandler(object): elif self.element[1:4] == ['management_controller', 'extended', 'advanced']: return self.handle_bmcconfig(True) elif self.element[1:4] == ['management_controller', 'extended', 'extra']: - return self.handle_bmcconfig(True, extended=True) + return self.handle_bmcconfig(advanced=False, extended=True) + elif self.element[1:4] == ['management_controller', 'extended', 'extra_advanced']: + return self.handle_bmcconfig(advanced=True, extended=True) elif self.element[1:3] == ['system', 'all']: return self.handle_sysconfig() elif self.element[1:3] == ['system', 'advanced']: @@ -861,6 +863,23 @@ class IpmiHandler(object): resourcename = sensor['name'] self.ipmicmd.sensormap[simplify_name(resourcename)] = resourcename + def read_normalized(self, sensorname): + readings = None + if sensorname == 'average_cpu_temp': + cputemp = self.ipmicmd.get_average_processor_temperature() + readings = [cputemp] + elif sensorname == 'inlet_temp': + inltemp = self.ipmicmd.get_inlet_temperature() + readings = [inltemp] + elif sensorname == 'total_power': + sensor = EmptySensor('Total Power') + sensor.states = [] + sensor.units = 'W' + sensor.value = self.ipmicmd.get_system_power_watts() + readings = [sensor] + if readings: + self.output.put(msg.SensorReadings(readings, name=self.node)) + def read_sensors(self, sensorname): if sensorname == 'all': sensors = self.ipmicmd.get_sensor_descriptions() @@ -1157,6 +1176,8 @@ class IpmiHandler(object): if len(self.element) < 3: return self.sensorcategory = self.element[2] + if self.sensorcategory == 'normalized': + return self.read_normalized(self.element[-1]) # list sensors per category if len(self.element) == 3 and self.element[-2] == 'hardware': if self.sensorcategory == 'leds': @@ -1357,10 +1378,8 @@ class IpmiHandler(object): def identify(self): if 'update' == self.op: identifystate = self.inputdata.inputbynode[self.node] == 'on' - if self.inputdata.inputbynode[self.node] == 'blink': - raise exc.InvalidArgumentException( - '"blink" is not supported with ipmi') - self.ipmicmd.set_identify(on=identifystate) + blinkstate = self.inputdata.inputbynode[self.node] == 'blink' + self.ipmicmd.set_identify(on=identifystate, blink=blinkstate) self.output.put(msg.IdentifyState( node=self.node, state=self.inputdata.inputbynode[self.node])) return @@ -1453,7 +1472,8 @@ class IpmiHandler(object): if 'read' == self.op: try: if extended: - bmccfg = self.ipmicmd.get_extended_bmc_configuration() + bmccfg = self.ipmicmd.get_extended_bmc_configuration( + hideadvanced=(not advanced)) else: bmccfg = self.ipmicmd.get_bmc_configuration() self.output.put(msg.ConfigSet(self.node, bmccfg)) diff --git a/confluent_server/confluent/plugins/hardwaremanagement/pdu.py b/confluent_server/confluent/plugins/hardwaremanagement/pdu.py index b19c9b22..3db21636 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/pdu.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/pdu.py @@ -15,10 +15,16 @@ import confluent.core as core import confluent.messages as msg import pyghmi.exceptions as pygexc import confluent.exceptions as exc +import eventlet.greenpool as greenpool +import eventlet.queue as queue + +class TaskDone: + pass def retrieve(nodes, element, configmanager, inputdata): emebs = configmanager.get_node_attributes( nodes, (u'power.*pdu', u'power.*outlet')) + relpdus = {} if element == ['power', 'inlets']: outletnames = set([]) for node in nodes: @@ -39,13 +45,36 @@ def retrieve(nodes, element, configmanager, inputdata): for pgroup in outlets[node]: pdu = outlets[node][pgroup]['pdu'] outlet = outlets[node][pgroup]['outlet'] - try: - for rsp in core.handle_path( - '/nodes/{0}/power/outlets/{1}'.format(pdu, outlet), - 'retrieve', configmanager): - yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node) - except exc.TargetEndpointBadCredentials: - yield msg.ConfluentTargetInvalidCredentials(pdu) + if pdu not in relpdus: + relpdus[pdu] = {} + relpdus[pdu][outlet] = (node, pgroup) + rspq = queue.Queue() + gp = greenpool.GreenPool(64) + for pdu in relpdus: + gp.spawn(readpdu, pdu, relpdus[pdu], configmanager, rspq) + while gp.running(): + nrsp = rspq.get() + if not isinstance(nrsp, TaskDone): + yield nrsp + while not rspq.empty(): + nrsp = rspq.get() + if not isinstance(nrsp, TaskDone): + yield nrsp + +def readpdu(pdu, outletmap, configmanager, rspq): + try: + for outlet in outletmap: + node, pgroup = outletmap[outlet] + try: + for rsp in core.handle_path( + '/nodes/{0}/power/outlets/{1}'.format(pdu, outlet), + 'retrieve', configmanager): + rspq.put(msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)) + except exc.TargetEndpointBadCredentials: + rspq.put(msg.ConfluentTargetInvalidCredentials(pdu)) + finally: # ensure thhat at least one thing triggers the get + rspq.put(TaskDone()) + def get_outlets(nodes, emebs, inletname): outlets = {} @@ -72,11 +101,34 @@ def update(nodes, element, configmanager, inputdata): emebs = configmanager.get_node_attributes( nodes, (u'power.*pdu', u'power.*outlet')) inletname = element[-1] + relpdus = {} + rspq = queue.Queue() + gp = greenpool.GreenPool(64) outlets = get_outlets(nodes, emebs, inletname) for node in outlets: for pgroup in outlets[node]: pdu = outlets[node][pgroup]['pdu'] outlet = outlets[node][pgroup]['outlet'] + if pdu not in relpdus: + relpdus[pdu] = {} + relpdus[pdu][outlet] = (node, pgroup) + for pdu in relpdus: + gp.spawn(updatepdu, pdu, relpdus[pdu], configmanager, inputdata, rspq) + while gp.running(): + nrsp = rspq.get() + if not isinstance(nrsp, TaskDone): + yield nrsp + while not rspq.empty(): + nrsp = rspq.get() + if not isinstance(nrsp, TaskDone): + yield nrsp + +def updatepdu(pdu, outletmap, configmanager, inputdata, rspq): + try: + for outlet in outletmap: + node, pgroup = outletmap[outlet] for rsp in core.handle_path('/nodes/{0}/power/outlets/{1}'.format(pdu, outlet), 'update', configmanager, inputdata={'state': inputdata.powerstate(node)}): - yield msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node) + rspq.put(msg.KeyValueData({pgroup: rsp.kvpairs['state']['value']}, node)) + finally: + rspq.put(TaskDone()) diff --git a/confluent_server/confluent/plugins/hardwaremanagement/redfish.py b/confluent_server/confluent/plugins/hardwaremanagement/redfish.py index 20315134..f89d9a09 100644 --- a/confluent_server/confluent/plugins/hardwaremanagement/redfish.py +++ b/confluent_server/confluent/plugins/hardwaremanagement/redfish.py @@ -516,12 +516,20 @@ class IpmiHandler(object): return self.handle_ntp() elif self.element[1:4] == ['management_controller', 'extended', 'all']: return self.handle_bmcconfig() + elif self.element[1:4] == ['management_controller', 'extended', 'advanced']: + return self.handle_bmcconfig(True) + elif self.element[1:4] == ['management_controller', 'extended', 'extra']: + return self.handle_bmcconfig(advanced=False, extended=True) + elif self.element[1:4] == ['management_controller', 'extended', 'extra_advanced']: + return self.handle_bmcconfig(advanced=True, extended=True) elif self.element[1:3] == ['system', 'all']: return self.handle_sysconfig() elif self.element[1:3] == ['system', 'advanced']: return self.handle_sysconfig(True) elif self.element[1:3] == ['system', 'clear']: return self.handle_sysconfigclear() + elif self.element[1:3] == ['management_controller', 'clear']: + return self.handle_bmcconfigclear() elif self.element[1:3] == ['management_controller', 'licenses']: return self.handle_licenses() elif self.element[1:3] == ['management_controller', 'save_licenses']: @@ -712,6 +720,23 @@ class IpmiHandler(object): resourcename = sensor['name'] self.sensormap[simplify_name(resourcename)] = resourcename + def read_normalized(self, sensorname): + readings = None + if sensorname == 'average_cpu_temp': + cputemp = self.ipmicmd.get_average_processor_temperature() + readings = [cputemp] + elif sensorname == 'inlet_temp': + inltemp = self.ipmicmd.get_inlet_temperature() + readings = [inltemp] + elif sensorname == 'total_power': + sensor = EmptySensor('Total Power') + sensor.states = [] + sensor.units = 'W' + sensor.value = self.ipmicmd.get_system_power_watts() + readings = [sensor] + if readings: + self.output.put(msg.SensorReadings(readings, name=self.node)) + def read_sensors(self, sensorname): if sensorname == 'all': sensors = self.ipmicmd.get_sensor_descriptions() @@ -1012,6 +1037,8 @@ class IpmiHandler(object): if len(self.element) < 3: return self.sensorcategory = self.element[2] + if self.sensorcategory == 'normalized': + return self.read_normalized(self.element[-1]) # list sensors per category if len(self.element) == 3 and self.element[-2] == 'hardware': if self.sensorcategory == 'leds': @@ -1291,12 +1318,15 @@ class IpmiHandler(object): if 'read' == self.op: lc = self.ipmicmd.get_location_information() - def handle_bmcconfig(self, advanced=False): + def handle_bmcconfig(self, advanced=False, extended=False): if 'read' == self.op: try: - self.output.put(msg.ConfigSet( - self.node, - self.ipmicmd.get_bmc_configuration())) + if extended: + bmccfg = self.ipmicmd.get_extended_bmc_configuration( + hideadvanced=(not advanced)) + else: + bmccfg = self.ipmicmd.get_bmc_configuration() + self.output.put(msg.ConfigSet(self.node, bmccfg)) except Exception as e: self.output.put( msg.ConfluentNodeError(self.node, str(e))) @@ -1304,6 +1334,12 @@ class IpmiHandler(object): self.ipmicmd.set_bmc_configuration( self.inputdata.get_attributes(self.node)) + def handle_bmcconfigclear(self): + if 'read' == self.op: + raise exc.InvalidArgumentException( + 'Cannot read the "clear" resource') + self.ipmicmd.clear_bmc_configuration() + def handle_sysconfigclear(self): if 'read' == self.op: raise exc.InvalidArgumentException( diff --git a/confluent_server/confluent/plugins/info/layout.py b/confluent_server/confluent/plugins/info/layout.py index 8397af7f..ca7f120c 100644 --- a/confluent_server/confluent/plugins/info/layout.py +++ b/confluent_server/confluent/plugins/info/layout.py @@ -93,8 +93,14 @@ def retrieve(nodes, element, configmanager, inputdata): '/noderange/{0}/description'.format(needheight), 'retrieve', configmanager, inputdata=None): + if not hasattr(rsp, 'kvpairs'): + results['errors'].append((rsp.node, rsp.error)) + continue kvp = rsp.kvpairs for node in kvp: allnodedata[node]['height'] = kvp[node]['height'] + for node in allnodedata: + if 'height' not in allnodedata[node]: + allnodedata[node]['height'] = 1 yield msg.Generic(results) diff --git a/confluent_server/confluent/plugins/shell/ssh.py b/confluent_server/confluent/plugins/shell/ssh.py index 2a6b65ec..f802f842 100644 --- a/confluent_server/confluent/plugins/shell/ssh.py +++ b/confluent_server/confluent/plugins/shell/ssh.py @@ -43,7 +43,6 @@ if cryptography and cryptography.__version__.split('.') < ['1', '5']: paramiko.transport.Transport._preferred_keys) - class HostKeyHandler(paramiko.client.MissingHostKeyPolicy): def __init__(self, configmanager, node): @@ -112,7 +111,7 @@ class SshShell(conapi.Console): # that would rather not use the nodename as anything but an opaque # identifier self.datacallback = callback - if self.username is not b'': + if self.username != b'': self.logon() else: self.inputmode = 0 @@ -259,6 +258,115 @@ class SshShell(conapi.Console): self.ssh.close() self.datacallback = None + def create(nodes, element, configmanager, inputdata): if len(nodes) == 1: return SshShell(nodes[0], configmanager) + + +class SshConn(): + + def __init__(self, node, config, username=b'', password=b''): + self.node = node + self.ssh = None + self.datacallback = None + self.nodeconfig = config + self.username = username + self.password = password + self.connected = False + self.inputmode = 0 # 0 = username, 1 = password... + + def __del__(self): + if self.connected: + self.close() + + def do_logon(self): + self.ssh = paramiko.SSHClient() + self.ssh.set_missing_host_key_policy( + HostKeyHandler(self.nodeconfig, self.node)) + log.log({'info': f"Connecting to {self.node} by ssh"}) + try: + if self.password: + self.ssh.connect(self.node, username=self.username, + password=self.password, allow_agent=False, + look_for_keys=False) + else: + self.ssh.connect(self.node, username=self.username) + except paramiko.AuthenticationException as e: + self.ssh.close() + self.inputmode = 0 + self.username = b'' + self.password = b'' + log.log({'warn': f"Error connecting to {self.node}: {str(e)}"}) + return + except paramiko.ssh_exception.NoValidConnectionsError as e: + self.ssh.close() + self.inputmode = 0 + self.username = b'' + self.password = b'' + log.log({'warn': f"Error connecting to {self.node}: {str(e)}"}) + return + except cexc.PubkeyInvalid as pi: + self.ssh.close() + self.keyaction = b'' + self.candidatefprint = pi.fingerprint + log.log({'warn': pi.message}) + self.keyattrname = pi.attrname + log.log({'info': f"New fingerprint: {pi.fingerprint}"}) + self.inputmode = -1 + return + except paramiko.SSHException as pi: + self.ssh.close() + self.inputmode = -2 + warn = str(pi) + if warnhostkey: + warn += ' (Older cryptography package on this host only ' \ + 'works with ed25519, check ssh startup on target ' \ + 'and permissions on /etc/ssh/*key)\r\n' + log.log({'warn': warn}) + return + except Exception as e: + self.ssh.close() + self.ssh.close() + self.inputmode = 0 + self.username = b'' + self.password = b'' + log.log({'warn': f"Error connecting to {self.node}: {str(e)}"}) + return + self.inputmode = 2 + self.connected = True + log.log({'info': f"Connected by ssh to {self.node}"}) + + def exec_command(self, cmd, cmdargs): + safecmd = cmd.translate(str.maketrans({"[": r"\]", + "]": r"\]", + "?": r"\?", + "!": r"\!", + "\\": r"\\", + "^": r"\^", + "$": r"\$", + " ": r"\ ", + "*": r"\*"})) + cmds = [safecmd] + for arg in cmdargs: + arg = arg.translate(str.maketrans({"[": r"\]", + "]": r"\]", + "?": r"\?", + "!": r"\!", + "\\": r"\\", + "^": r"\^", + "$": r"\$", + " ": r"\ ", + "*": r"\*"})) + arg = "%s" % (str(arg).replace(r"'", r"'\''"),) + cmds.append(arg) + + runcmd = " ".join(cmds) + stdin, stdout, stderr = self.ssh.exec_command(runcmd) + rcode = stdout.channel.recv_exit_status() + return stdout.readlines(), stderr.readlines() + + def close(self): + if self.ssh is not None: + self.ssh.close() + log.log({'info': f"Disconnected from {self.node}"}) diff --git a/confluent_server/confluent/runansible.py b/confluent_server/confluent/runansible.py index cbbecc58..50696742 100644 --- a/confluent_server/confluent/runansible.py +++ b/confluent_server/confluent/runansible.py @@ -32,6 +32,7 @@ anspypath = None running_status = {} class PlayRunner(object): def __init__(self, playfiles, nodes): + self.stderr = '' self.playfiles = playfiles self.nodes = nodes self.worker = None @@ -63,6 +64,9 @@ class PlayRunner(object): else: textout += result['state'] + '\n' textout += '\n' + if self.stderr: + textout += "ERRORS **********************************\n" + textout += self.stderr return textout def dump_json(self): @@ -93,7 +97,8 @@ class PlayRunner(object): [mypath, __file__, targnodes, playfilename], stdin=devnull, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout, self.stderr = worker.communicate() + stdout, stder = worker.communicate() + self.stderr += stder.decode('utf8') current = memoryview(stdout) while len(current): sz = struct.unpack('=q', current[:8])[0] diff --git a/confluent_server/confluent/selfservice.py b/confluent_server/confluent/selfservice.py index 04030491..b7577b92 100644 --- a/confluent_server/confluent/selfservice.py +++ b/confluent_server/confluent/selfservice.py @@ -19,19 +19,38 @@ import json import os import time import yaml +try: + from yaml import CSafeDumper as SafeDumper + from yaml import CSafeLoader as SafeLoader +except ImportError: + from yaml import SafeLoader + from yaml import SafeDumper import confluent.discovery.protocols.ssdp as ssdp import eventlet webclient = eventlet.import_patched('pyghmi.util.webclient') -currtz = None +currtz = 'UTC' keymap = 'us' currlocale = 'en_US.UTF-8' currtzvintage = None def yamldump(input): - return yaml.safe_dump(input, default_flow_style=False) + return yaml.dump_all([input], Dumper=SafeDumper, default_flow_style=False) + +def yamlload(input): + return yaml.load(input, Loader=SafeLoader) + +def listdump(input): + # special case yaml for flat dumb list + # this is about 25x faster than doing full yaml dump even with CSafeDumper + # with a 17,000 element list + retval = '' + for entry in input: + retval += '- ' + entry + '\n' + return retval + def get_extra_names(nodename, cfg, myip=None): names = set([]) @@ -263,7 +282,7 @@ def handle_request(env, start_response): ifidx = int(nici.read()) ncfg = netutil.get_nic_config(cfg, nodename, ifidx=ifidx) else: - ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip) + ncfg = netutil.get_nic_config(cfg, nodename, serverip=myip, clientip=clientip) if env['PATH_INFO'] == '/self/deploycfg': for key in list(ncfg): if 'v6' in key: @@ -359,6 +378,8 @@ def handle_request(env, start_response): tdc = util.run(['timedatectl'])[0].split(b'\n') except subprocess.CalledProcessError: tdc = [] + currtzvintage = time.time() + ncfg['timezone'] = currtz for ent in tdc: ent = ent.strip() if ent.startswith(b'Time zone:'): @@ -402,10 +423,13 @@ def handle_request(env, start_response): yield node + '\n' else: start_response('200 OK', (('Content-Type', retype),)) - yield dumper(list(util.natural_sort(nodes))) + if retype == 'application/yaml': + yield listdump(list(util.natural_sort(nodes))) + else: + yield dumper(list(util.natural_sort(nodes))) elif env['PATH_INFO'] == '/self/remoteconfigbmc' and reqbody: try: - reqbody = yaml.safe_load(reqbody) + reqbody = yamlload(reqbody) except Exception: reqbody = None cfgmod = reqbody.get('configmod', 'unspecified') @@ -419,7 +443,7 @@ def handle_request(env, start_response): start_response('200 Ok', ()) yield 'complete' elif env['PATH_INFO'] == '/self/updatestatus' and reqbody: - update = yaml.safe_load(reqbody) + update = yamlload(reqbody) statusstr = update.get('state', None) statusdetail = update.get('state_detail', None) didstateupdate = False @@ -495,8 +519,8 @@ def handle_request(env, start_response): pals = get_extra_names(nodename, cfg, myip) result = syncfiles.start_syncfiles( nodename, cfg, json.loads(reqbody), pals) - start_response(result, ()) - yield '' + start_response(result[0], ()) + yield result[1] return if 'GET' == operation: status, output = syncfiles.get_syncresult(nodename) @@ -522,7 +546,7 @@ def handle_request(env, start_response): '/var/lib/confluent/public/os/{0}/scripts/{1}') if slist: start_response('200 OK', (('Content-Type', 'application/yaml'),)) - yield yaml.safe_dump(util.natural_sort(slist), default_flow_style=False) + yield yamldump(util.natural_sort(slist)) else: start_response('200 OK', ()) yield '' diff --git a/confluent_server/confluent/sockapi.py b/confluent_server/confluent/sockapi.py index 2d4db15b..86534767 100644 --- a/confluent_server/confluent/sockapi.py +++ b/confluent_server/confluent/sockapi.py @@ -70,15 +70,17 @@ try: # so we need to ffi that in using a strategy compatible with PyOpenSSL import OpenSSL.SSL as libssln import OpenSSL.crypto as crypto - from OpenSSL._util import ffi except ImportError: libssl = None - ffi = None crypto = None plainsocket = None libc = ctypes.CDLL(ctypes.util.find_library('c')) +libsslc = ctypes.CDLL(ctypes.util.find_library('ssl')) +libsslc.SSL_CTX_set_cert_verify_callback.argtypes = [ + ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] + def _should_authlog(path, operation): if (operation == 'retrieve' and @@ -389,11 +391,24 @@ def _tlshandler(bind_host, bind_port): else: eventlet.spawn_n(_tlsstartup, cnn) +@ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p) +def verify_stub(store, misc): + return 1 + +class PyObject_HEAD(ctypes.Structure): + _fields_ = [ + ("ob_refcnt", ctypes.c_ssize_t), + ("ob_type", ctypes.c_void_p), + ] + + +# see main/Modules/_ssl.c, only caring about the SSL_CTX pointer +class PySSLContext(ctypes.Structure): + _fields_ = [ + ("ob_base", PyObject_HEAD), + ("ctx", ctypes.c_void_p), + ] -if ffi: - @ffi.callback("int(*)( X509_STORE_CTX *, void*)") - def verify_stub(store, misc): - return 1 def _tlsstartup(cnn): @@ -416,8 +431,8 @@ def _tlsstartup(cnn): ctx.use_certificate_file('/etc/confluent/srvcert.pem') ctx.use_privatekey_file('/etc/confluent/privkey.pem') ctx.set_verify(libssln.VERIFY_PEER, lambda *args: True) - libssln._lib.SSL_CTX_set_cert_verify_callback(ctx._context, - verify_stub, ffi.NULL) + ssl_ctx = PySSLContext.from_address(id(ctx._context)).ctx + libsslc.SSL_CTX_set_cert_verify_callback(ssl_ctx, verify_stub, 0) cnn = libssl.Connection(ctx, cnn) cnn.set_accept_state() cnn.do_handshake() diff --git a/confluent_server/confluent/sshutil.py b/confluent_server/confluent/sshutil.py index 0a52fe81..40512648 100644 --- a/confluent_server/confluent/sshutil.py +++ b/confluent_server/confluent/sshutil.py @@ -98,14 +98,15 @@ def initialize_ca(): preexec_fn=normalize_uid) ouid = normalize_uid() try: - os.makedirs('/var/lib/confluent/public/site/ssh/', mode=0o755) - except OSError as e: - if e.errno != 17: - raise + try: + os.makedirs('/var/lib/confluent/public/site/ssh/', mode=0o755) + except OSError as e: + if e.errno != 17: + raise + cafilename = '/var/lib/confluent/public/site/ssh/{0}.ca'.format(myname) + shutil.copy('/etc/confluent/ssh/ca.pub', cafilename) finally: os.seteuid(ouid) - cafilename = '/var/lib/confluent/public/site/ssh/{0}.ca'.format(myname) - shutil.copy('/etc/confluent/ssh/ca.pub', cafilename) # newent = '@cert-authority * ' + capub.read() @@ -129,11 +130,21 @@ def prep_ssh_key(keyname): ap.write('#!/bin/sh\necho $CONFLUENT_SSH_PASSPHRASE\nrm {0}\n'.format(askpass)) os.chmod(askpass, 0o700) os.environ['CONFLUENT_SSH_PASSPHRASE'] = get_passphrase() + olddisplay = os.environ.get('DISPLAY', None) + oldaskpass = os.environ.get('SSH_ASKPASS', None) os.environ['DISPLAY'] = 'NONE' os.environ['SSH_ASKPASS'] = askpass - with open(os.devnull, 'wb') as devnull: - subprocess.check_output(['ssh-add', keyname], stdin=devnull, stderr=devnull) - del os.environ['CONFLUENT_SSH_PASSPHRASE'] + try: + with open(os.devnull, 'wb') as devnull: + subprocess.check_output(['ssh-add', keyname], stdin=devnull, stderr=devnull) + finally: + del os.environ['CONFLUENT_SSH_PASSPHRASE'] + del os.environ['DISPLAY'] + del os.environ['SSH_ASKPASS'] + if olddisplay: + os.environ['DISPLAY'] = olddisplay + if oldaskpass: + os.environ['SSH_ASKPASS'] = oldaskpass ready_keys[keyname] = 1 finally: adding_key = False @@ -175,6 +186,14 @@ def initialize_root_key(generate, automation=False): if os.path.exists('/etc/confluent/ssh/automation'): alreadyexist = True else: + ouid = normalize_uid() + try: + os.makedirs('/etc/confluent/ssh', mode=0o700) + except OSError as e: + if e.errno != 17: + raise + finally: + os.seteuid(ouid) subprocess.check_call( ['ssh-keygen', '-t', 'ed25519', '-f','/etc/confluent/ssh/automation', '-N', get_passphrase(), @@ -194,15 +213,18 @@ def initialize_root_key(generate, automation=False): suffix = 'automationpubkey' else: suffix = 'rootpubkey' + keyname = '/var/lib/confluent/public/site/ssh/{0}.{1}'.format( + myname, suffix) + if authorized: + with open(keyname, 'w'): + pass for auth in authorized: - shutil.copy( - auth, - '/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix)) - os.chmod('/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix), 0o644) - os.chown('/var/lib/confluent/public/site/ssh/{0}.{1}'.format( - myname, suffix), neededuid, -1) + with open(auth, 'r') as local_key: + with open(keyname, 'a') as dest: + dest.write(local_key.read()) + if os.path.exists(keyname): + os.chmod(keyname, 0o644) + os.chown(keyname, neededuid, -1) if alreadyexist: raise AlreadyExists() diff --git a/confluent_server/confluent/syncfiles.py b/confluent_server/confluent/syncfiles.py index 556d9bcf..16cf4c49 100644 --- a/confluent_server/confluent/syncfiles.py +++ b/confluent_server/confluent/syncfiles.py @@ -24,6 +24,7 @@ import confluent.noderange as noderange import eventlet import pwd import grp +import sys def mkdirp(path): try: @@ -193,8 +194,8 @@ def sync_list_to_node(sl, node, suffixes, peerip=None): targip = node if peerip: targip = peerip - output = util.run( - ['rsync', '-rvLD', targdir + '/', 'root@[{}]:/'.format(targip)])[0] + output, stderr = util.run( + ['rsync', '-rvLD', targdir + '/', 'root@[{}]:/'.format(targip)]) except Exception as e: if 'CalledProcessError' not in repr(e): # https://github.com/eventlet/eventlet/issues/413 @@ -212,6 +213,11 @@ def sync_list_to_node(sl, node, suffixes, peerip=None): unreadablefiles.append(filename.replace(targdir, '')) if unreadablefiles: raise Exception("Syncing failed due to unreadable files: " + ','.join(unreadablefiles)) + elif hasattr(e, 'stderr') and e.stderr and b'Permission denied, please try again.' in e.stderr: + raise Exception('Syncing failed due to authentication error, is the confluent automation key not set up (osdeploy initialize -a) or is there some process replacing authorized_keys on the host?') + elif hasattr(e, 'stderr') and e.stderr: + sys.stderr.write(e.stderr.decode('utf8')) + raise else: raise finally: @@ -229,7 +235,7 @@ def stage_ent(currmap, ent, targdir, appendexist=False): everyfent = [] allfents = ent.split() for tmpent in allfents: - fents = glob.glob(tmpent) + fents = glob.glob(tmpent) # TODO: recursive globbing? if not fents: raise Exception('No matching files for "{}"'.format(tmpent)) everyfent.extend(fents) @@ -279,9 +285,10 @@ def mkpathorlink(source, destination, appendexist=False): syncrunners = {} - +cleaner = None def start_syncfiles(nodename, cfg, suffixes, principals=[]): + global cleaner peerip = None if 'myips' in suffixes: targips = suffixes['myips'] @@ -305,13 +312,41 @@ def start_syncfiles(nodename, cfg, suffixes, principals=[]): raise Exception('Cannot perform syncfiles without profile assigned') synclist = '/var/lib/confluent/public/os/{}/syncfiles'.format(profile) if not os.path.exists(synclist): - return '200 OK' # not running + return '200 OK', 'No synclist' # not running sl = SyncList(synclist, nodename, cfg) if not (sl.appendmap or sl.mergemap or sl.replacemap or sl.appendoncemap): - return '200 OK' # the synclist has no actual entries + return '200 OK', 'Empty synclist' # the synclist has no actual entries + if nodename in syncrunners: + if syncrunners[nodename].dead: + syncrunners[nodename].wait() + else: + return '503 Synchronization already in progress', 'Synchronization already in progress for {}'.format(nodename) syncrunners[nodename] = eventlet.spawn( sync_list_to_node, sl, nodename, suffixes, peerip) - return '202 Queued' # backgrounded + if not cleaner: + cleaner = eventlet.spawn(cleanit) + return '202 Queued', 'Background synchronization initiated' # backgrounded + + +def cleanit(): + toreap = {} + while True: + for nn in list(syncrunners): + if syncrunners[nn].dead: + if nn in toreap: + try: + syncrunners[nn].wait() + except Exception as e: + print(repr(e)) + pass + del syncrunners[nn] + del toreap[nn] + else: + toreap[nn] = 1 + elif nn in toreap: + del toreap[nn] + eventlet.sleep(30) + def get_syncresult(nodename): if nodename not in syncrunners: diff --git a/confluent_server/confluent/util.py b/confluent_server/confluent/util.py index 1509a827..462ec930 100644 --- a/confluent_server/confluent/util.py +++ b/confluent_server/confluent/util.py @@ -29,9 +29,9 @@ import struct import eventlet.green.subprocess as subprocess -def mkdirp(path): +def mkdirp(path, mode=0o777): try: - os.makedirs(path) + os.makedirs(path, mode) except OSError as e: if e.errno != 17: raise @@ -42,7 +42,7 @@ def run(cmd): stdout, stderr = process.communicate() retcode = process.poll() if retcode: - raise subprocess.CalledProcessError(retcode, process.args, output=stdout) + raise subprocess.CalledProcessError(retcode, process.args, output=stdout, stderr=stderr) return stdout, stderr @@ -168,7 +168,7 @@ def cert_matches(fingerprint, certificate): return algo(certificate).digest() == fingerprint algo, _, fp = fingerprint.partition('$') newfp = None - if algo in ('sha512', 'sha256'): + if algo in ('sha512', 'sha256', 'sha384'): newfp = get_fingerprint(certificate, algo) return newfp and fingerprint == newfp diff --git a/confluent_server/confluent/vinzmanager.py b/confluent_server/confluent/vinzmanager.py new file mode 100644 index 00000000..308acb59 --- /dev/null +++ b/confluent_server/confluent/vinzmanager.py @@ -0,0 +1,217 @@ + +import confluent.auth as auth +import eventlet +import confluent.messages as msg +import confluent.exceptions as exc +import confluent.util as util +import confluent.config.configmanager as configmanager +import struct +import eventlet.green.socket as socket +import eventlet.green.subprocess as subprocess +import base64 +import os +import pwd +import confluent.httpapi as httpapi +mountsbyuser = {} +_vinzfd = None +_vinztoken = None +webclient = eventlet.import_patched('pyghmi.util.webclient') + + +# Handle the vinz VNC session +def assure_vinz(): + global _vinzfd + global _vinztoken + if _vinzfd is None: + _vinztoken = base64.b64encode(os.urandom(33), altchars=b'_-').decode() + os.environ['VINZ_TOKEN'] = _vinztoken + os.makedirs('/var/run/confluent/vinz/sessions', exist_ok=True) + + _vinzfd = subprocess.Popen( + ['/opt/confluent/bin/vinz', + '-c', '/var/run/confluent/vinz/control', + '-w', '127.0.0.1:4007', + '-a', '/var/run/confluent/vinz/approval', + # vinz supports unix domain websocket, however apache reverse proxy is dicey that way in some versions + '-d', '/var/run/confluent/vinz/sessions']) + while not os.path.exists('/var/run/confluent/vinz/control'): + eventlet.sleep(0.5) + eventlet.spawn(monitor_requests) + +_unix_by_nodename = {} +def get_url(nodename, inputdata): + method = inputdata.inputbynode[nodename] + assure_vinz() + if method == 'wss': + return f'/vinz/kvmsession/{nodename}' + elif method == 'unix': + if nodename not in _unix_by_nodename or not os.path.exists(_unix_by_nodename[nodename]): + _unix_by_nodename[nodename] = request_session(nodename) + return _unix_by_nodename[nodename] + + +_usersessions = {} +def close_session(sessionid): + sessioninfo = _usersessions.get(sessionid, None) + if not sessioninfo: + return + del _usersessions[sessionid] + nodename = sessioninfo['nodename'] + wc = sessioninfo['webclient'] + cfg = configmanager.ConfigManager(None) + c = cfg.get_node_attributes( + nodename, + ['secret.hardwaremanagementuser', + 'secret.hardwaremanagementpassword', + ], decrypt=True) + bmcuser = c.get(nodename, {}).get( + 'secret.hardwaremanagementuser', {}).get('value', None) + bmcpass = c.get(nodename, {}).get( + 'secret.hardwaremanagementpassword', {}).get('value', None) + if not isinstance(bmcuser, str): + bmcuser = bmcuser.decode() + if not isinstance(bmcpass, str): + bmcpass = bmcpass.decode() + if bmcuser and bmcpass: + wc.grab_json_response_with_status( + '/logout', {'data': [bmcuser, bmcpass]}, + headers={ + 'Content-Type': 'application/json', + 'Accept': 'application/json', + 'X-XSRF-TOKEN': wc.cookies['XSRF-TOKEN']}) + + +def send_grant(conn, nodename): + cfg = configmanager.ConfigManager(None) + c = cfg.get_node_attributes( + nodename, + ['secret.hardwaremanagementuser', + 'secret.hardwaremanagementpassword', + 'hardwaremanagement.manager'], decrypt=True) + bmcuser = c.get(nodename, {}).get( + 'secret.hardwaremanagementuser', {}).get('value', None) + bmcpass = c.get(nodename, {}).get( + 'secret.hardwaremanagementpassword', {}).get('value', None) + bmc = c.get(nodename, {}).get( + 'hardwaremanagement.manager', {}).get('value', None) + if bmcuser and bmcpass and bmc: + kv = util.TLSCertVerifier(cfg, nodename, + 'pubkeys.tls_hardwaremanager').verify_cert + wc = webclient.SecureHTTPConnection(bmc, 443, verifycallback=kv) + if not isinstance(bmcuser, str): + bmcuser = bmcuser.decode() + if not isinstance(bmcpass, str): + bmcpass = bmcpass.decode() + rsp = wc.grab_json_response_with_status( + '/login', {'data': [bmcuser, bmcpass]}, + headers={'Content-Type': 'application/json', + 'Accept': 'application/json'}) + sessionid = wc.cookies['SESSION'] + sessiontok = wc.cookies['XSRF-TOKEN'] + _usersessions[sessionid] = { + 'webclient': wc, + 'nodename': nodename, + } + url = '/kvm/0' + fprintinfo = cfg.get_node_attributes(nodename, 'pubkeys.tls_hardwaremanager') + fprint = fprintinfo.get( + nodename, {}).get('pubkeys.tls_hardwaremanager', {}).get('value', None) + if not fprint: + return + fprint = fprint.split('$', 1)[1] + fprint = bytes.fromhex(fprint) + conn.send(struct.pack('!BI', 1, len(bmc))) + conn.send(bmc.encode()) + conn.send(struct.pack('!I', len(sessionid))) + conn.send(sessionid.encode()) + conn.send(struct.pack('!I', len(sessiontok))) + conn.send(sessiontok.encode()) + conn.send(struct.pack('!I', len(fprint))) + conn.send(fprint) + conn.send(struct.pack('!I', len(url))) + conn.send(url.encode()) + conn.send(b'\xff') + +def evaluate_request(conn): + allow = False + authname = None + try: + creds = conn.getsockopt(socket.SOL_SOCKET, socket.SO_PEERCRED, + struct.calcsize('iII')) + pid, uid, gid = struct.unpack('iII', creds) + if uid != os.getuid(): + return + rqcode, fieldlen = struct.unpack('!BI', conn.recv(5)) + authtoken = conn.recv(fieldlen).decode() + if authtoken != _vinztoken: + return + if rqcode == 2: # disconnect notification + fieldlen = struct.unpack('!I', conn.recv(4))[0] + sessionid = conn.recv(fieldlen).decode() + close_session(sessionid) + conn.recv(1) # digest 0xff + if rqcode == 1: # request for new connection + fieldlen = struct.unpack('!I', conn.recv(4))[0] + nodename = conn.recv(fieldlen).decode() + idtype = struct.unpack('!B', conn.recv(1))[0] + if idtype == 1: + usernum = struct.unpack('!I', conn.recv(4))[0] + if usernum == 0: # root is a special guy + send_grant(conn, nodename) + return + try: + authname = pwd.getpwuid(usernum).pw_name + except Exception: + return + elif idtype == 2: + fieldlen = struct.unpack('!I', conn.recv(4))[0] + sessionid = conn.recv(fieldlen) + fieldlen = struct.unpack('!I', conn.recv(4))[0] + sessiontok = conn.recv(fieldlen) + try: + authname = httpapi.get_user_for_session(sessionid, sessiontok) + except Exception: + return + else: + return + conn.recv(1) # should be 0xff + if authname: + allow = auth.authorize(authname, f'/nodes/{nodename}/console/ikvm') + if allow: + send_grant(conn, nodename) + finally: + conn.close() + +def monitor_requests(): + a = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + try: + os.remove('/var/run/confluent/vinz/approval') + except Exception: + pass + a.bind('/var/run/confluent/vinz/approval') + os.chmod('/var/run/confluent/vinz/approval', 0o600) + a.listen(8) + while True: + conn, addr = a.accept() + eventlet.spawn_n(evaluate_request, conn) + +def request_session(nodename): + assure_vinz() + a = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + a.connect('/var/run/confluent/vinz/control') + nodename = nodename.encode() + a.send(struct.pack('!BI', 1, len(nodename))) + a.send(nodename) + a.send(b'\xff') + rsp = a.recv(1) + retcode = struct.unpack('!B', rsp)[0] + if retcode != 1: + raise Exception("Bad return code") + rsp = a.recv(4) + nlen = struct.unpack('!I', rsp)[0] + sockname = a.recv(nlen).decode('utf8') + retcode = a.recv(1) + if retcode != b'\xff': + raise Exception("Unrecognized response") + return os.path.join('/var/run/confluent/vinz/sessions', sockname) + diff --git a/confluent_server/confluent_server.spec.tmpl b/confluent_server/confluent_server.spec.tmpl index c7e2aa3a..0a36390c 100644 --- a/confluent_server/confluent_server.spec.tmpl +++ b/confluent_server/confluent_server.spec.tmpl @@ -1,12 +1,16 @@ %define name confluent_server %define version #VERSION# +%define fversion %{lua: +sv, _ = string.gsub("#VERSION#", "[~+]", "-") +print(sv) +} %define release 1 Summary: confluent systems management server Name: %{name} Version: %{version} Release: %{release} -Source0: %{name}-%{version}.tar.gz +Source0: %{name}-%{fversion}.tar.gz License: Apache2 Group: Development/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot @@ -14,15 +18,15 @@ Prefix: %{_prefix} BuildArch: noarch Requires: confluent_vtbufferd %if "%{dist}" == ".el7" -Requires: python-pyghmi >= 1.0.34, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic +Requires: python-pyghmi >= 1.5.71, python-eventlet, python-greenlet, python-pycryptodomex >= 3.4.7, confluent_client == %{version}, python-pyparsing, python-paramiko, python-dnspython, python-netifaces, python2-pyasn1 >= 0.2.3, python-pysnmp >= 4.3.4, python-lxml, python-eficompressor, python-setuptools, python-dateutil, python-websocket-client python2-msgpack python-libarchive-c python-yaml python-monotonic cpio %else %if "%{dist}" == ".el8" -Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-monotonic, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute +Requires: python3-pyghmi >= 1.5.71, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-enum34, python3-asn1crypto, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio %else %if "%{dist}" == ".el9" -Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute +Requires: python3-pyghmi >= 1.5.71, python3-eventlet, python3-greenlet, python3-pycryptodomex >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dns, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-yaml openssl iproute cpio %else -Requires: python3-pyghmi >= 1.0.34, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute +Requires: python3-dbm,python3-pyghmi >= 1.5.71, python3-eventlet, python3-greenlet, python3-pycryptodome >= 3.4.7, confluent_client == %{version}, python3-pyparsing, python3-paramiko, python3-dnspython, python3-netifaces, python3-pyasn1 >= 0.2.3, python3-pysnmp >= 4.3.4, python3-lxml, python3-eficompressor, python3-setuptools, python3-dateutil, python3-cffi, python3-pyOpenSSL, python3-websocket-client python3-msgpack python3-libarchive-c python3-PyYAML openssl iproute %endif %endif %endif @@ -33,7 +37,7 @@ Url: https://github.com/lenovo/confluent Server for console management and systems management aggregation %prep -%setup -n %{name}-%{version} -n %{name}-%{version} +%setup -n %{name}-%{fversion} %build %if "%{dist}" == ".el7" diff --git a/confluent_server/makesetup b/confluent_server/makesetup index f20e2d25..a34438d3 100755 --- a/confluent_server/makesetup +++ b/confluent_server/makesetup @@ -2,7 +2,11 @@ cd `dirname $0` VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi echo $VERSION > VERSION sed -e "s/#VERSION#/$VERSION/" setup.py.tmpl > setup.py diff --git a/confluent_server/requirements.txt b/confluent_server/requirements.txt index d0be6908..e69de29b 100644 --- a/confluent_server/requirements.txt +++ b/confluent_server/requirements.txt @@ -1,3 +0,0 @@ -confluent_client>=0.1 -pycrypto>=2.6 -pyparsing diff --git a/confluent_server/setup.py.tmpl b/confluent_server/setup.py.tmpl index e6bd08b2..8987e19d 100644 --- a/confluent_server/setup.py.tmpl +++ b/confluent_server/setup.py.tmpl @@ -19,12 +19,10 @@ setup( 'confluent/plugins/hardwaremanagement/', 'confluent/plugins/deployment/', 'confluent/plugins/console/', + 'confluent/plugins/info/', 'confluent/plugins/shell/', 'confluent/collective/', 'confluent/plugins/configuration/'], - install_requires=['paramiko', 'pycrypto>=2.6', 'confluent_client>=0.1.0', 'eventlet', - 'dnspython', 'netifaces', 'pysnmp', 'pyparsing', - 'pyghmi>=1.0.44'], scripts=['bin/confluent', 'bin/confluent_selfcheck', 'bin/confluentdbutil', 'bin/collective', 'bin/osdeploy'], data_files=[('/etc/init.d', ['sysvinit/confluent']), ('/usr/lib/sysctl.d', ['sysctl/confluent.conf']), diff --git a/confluent_server/systemd/confluent.service b/confluent_server/systemd/confluent.service index da0fee7b..598c23c9 100644 --- a/confluent_server/systemd/confluent.service +++ b/confluent_server/systemd/confluent.service @@ -16,7 +16,7 @@ Restart=on-failure AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_SETUID CAP_SETGID CAP_CHOWN CAP_NET_RAW User=confluent Group=confluent -DevicePolicy=closed +#DevicePolicy=closed # fuse filesystem requires us to interact with /dev/fuse ProtectControlGroups=true ProtectSystem=true diff --git a/confluent_vtbufferd/NOTICE b/confluent_vtbufferd/NOTICE index 95b86a82..da174e81 100644 --- a/confluent_vtbufferd/NOTICE +++ b/confluent_vtbufferd/NOTICE @@ -22,3 +22,16 @@ modification, are permitted provided that the following conditions are met: * Neither the name of the copyright holder nor the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS +* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS, +* COPYRIGHT HOLDERS, OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF +* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/confluent_vtbufferd/builddeb b/confluent_vtbufferd/builddeb index 36f41218..3a98315a 100755 --- a/confluent_vtbufferd/builddeb +++ b/confluent_vtbufferd/builddeb @@ -8,7 +8,11 @@ DSCARGS="--with-python3=True --with-python2=False" VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi cd .. rm -rf /tmp/confluent diff --git a/confluent_vtbufferd/buildrpm b/confluent_vtbufferd/buildrpm index 35feec59..9a20844d 100755 --- a/confluent_vtbufferd/buildrpm +++ b/confluent_vtbufferd/buildrpm @@ -1,7 +1,11 @@ VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi mkdir -p dist/confluent_vtbufferd-$VERSION cp ../LICENSE NOTICE *.c *.h Makefile dist/confluent_vtbufferd-$VERSION diff --git a/confluent_vtbufferd/vtbufferd.c b/confluent_vtbufferd/vtbufferd.c index e89269b4..bbcaba6b 100644 --- a/confluent_vtbufferd/vtbufferd.c +++ b/confluent_vtbufferd/vtbufferd.c @@ -1,8 +1,14 @@ +#include +#define _GNU_SOURCE #include #include #include #include #include +#include +#include +#include +#include #include "tmt.h" #define HASHSIZE 2053 #define MAXNAMELEN 256 @@ -10,13 +16,17 @@ struct terment { struct terment *next; char *name; + int fd; TMT *vt; }; #define SETNODE 1 #define WRITE 2 #define READBUFF 0 +#define CLOSECONN 3 +#define MAXEVTS 16 static struct terment *buffers[HASHSIZE]; +static char* nodenames[HASHSIZE]; unsigned long hash(char *str) /* djb2a */ @@ -37,10 +47,13 @@ TMT *get_termentbyname(char *name) { return NULL; } -TMT *set_termentbyname(char *name) { +TMT *set_termentbyname(char *name, int fd) { struct terment *ret; int idx; + if (nodenames[fd] == NULL) { + nodenames[fd] = strdup(name); + } idx = hash(name); for (ret = buffers[idx]; ret != NULL; ret = ret->next) if (strcmp(name, ret->name) == 0) @@ -48,12 +61,13 @@ TMT *set_termentbyname(char *name) { ret = (struct terment *)malloc(sizeof(*ret)); ret->next = buffers[idx]; ret->name = strdup(name); + ret->fd = fd; ret->vt = tmt_open(31, 100, NULL, NULL, L"→←↑↓■◆▒°±▒┘┐┌└┼⎺───⎽├┤┴┬│≤≥π≠£•"); buffers[idx] = ret; return ret->vt; } -void dump_vt(TMT* outvt) { +void dump_vt(TMT* outvt, int outfd) { const TMTSCREEN *out = tmt_screen(outvt); const TMTPOINT *curs = tmt_cursor(outvt); int line, idx, maxcol, maxrow; @@ -67,9 +81,10 @@ void dump_vt(TMT* outvt) { tmt_color_t fg = TMT_COLOR_DEFAULT; tmt_color_t bg = TMT_COLOR_DEFAULT; wchar_t sgrline[30]; + char strbuffer[128]; size_t srgidx = 0; char colorcode = 0; - wprintf(L"\033c"); + write(outfd, "\033c", 2); maxcol = 0; maxrow = 0; for (line = out->nline - 1; line >= 0; --line) { @@ -148,60 +163,145 @@ void dump_vt(TMT* outvt) { } if (sgrline[0] != 0) { sgrline[wcslen(sgrline) - 1] = 0; // Trim last ; - wprintf(L"\033[%lsm", sgrline); + + snprintf(strbuffer, sizeof(strbuffer), "\033[%lsm", sgrline); + write(outfd, strbuffer, strlen(strbuffer)); + write(outfd, "\033[]", 3); } - wprintf(L"%lc", out->lines[line]->chars[idx].c); + snprintf(strbuffer, sizeof(strbuffer), "%lc", out->lines[line]->chars[idx].c); + write(outfd, strbuffer, strlen(strbuffer)); } if (line < maxrow) - wprintf(L"\r\n"); + write(outfd, "\r\n", 2); } - fflush(stdout); - wprintf(L"\x1b[%ld;%ldH", curs->r + 1, curs->c + 1); - fflush(stdout); + //fflush(stdout); + snprintf(strbuffer, sizeof(strbuffer), "\x1b[%ld;%ldH", curs->r + 1, curs->c + 1); + write(outfd, strbuffer, strlen(strbuffer)); + //fflush(stdout); +} + +int handle_traffic(int fd) { + int cmd, length; + char currnode[MAXNAMELEN]; + char cmdbuf[MAXDATALEN]; + char *nodename; + TMT *currvt = NULL; + TMT *outvt = NULL; + length = read(fd, &cmd, 4); + if (length <= 0) { + return 0; + } + length = cmd & 536870911; + cmd = cmd >> 29; + if (cmd == SETNODE) { + cmd = read(fd, currnode, length); + currnode[length] = 0; + if (cmd < 0) + return 0; + currvt = set_termentbyname(currnode, fd); + } else if (cmd == WRITE) { + if (currvt == NULL) { + nodename = nodenames[fd]; + currvt = set_termentbyname(nodename, fd); + } + cmd = read(fd, cmdbuf, length); + cmdbuf[length] = 0; + if (cmd < 0) + return 0; + tmt_write(currvt, cmdbuf, length); + } else if (cmd == READBUFF) { + cmd = read(fd, cmdbuf, length); + cmdbuf[length] = 0; + if (cmd < 0) + return 0; + outvt = get_termentbyname(cmdbuf); + if (outvt != NULL) + dump_vt(outvt, fd); + length = write(fd, "\x00", 1); + if (length < 0) + return 0; + } else if (cmd == CLOSECONN) { + return 0; + } + return 1; } int main(int argc, char* argv[]) { - int cmd, length; setlocale(LC_ALL, ""); - char cmdbuf[MAXDATALEN]; - char currnode[MAXNAMELEN]; - TMT *currvt = NULL; - TMT *outvt = NULL; + struct sockaddr_un addr; + int numevts; + int status; + int poller; + int n, rt; + socklen_t len; + int ctlsock = 0; + int currsock = 0; + socklen_t addrlen = 0; + struct ucred ucr; + + struct epoll_event epvt, evts[MAXEVTS]; stdin = freopen(NULL, "rb", stdin); if (stdin == NULL) { exit(1); } + memset(&addr, 0, sizeof(struct sockaddr_un)); + addr.sun_family = AF_UNIX; + strncpy(addr.sun_path + 1, argv[1], sizeof(addr.sun_path) - 2); // abstract namespace socket + ctlsock = socket(AF_UNIX, SOCK_STREAM, 0); + if (ctlsock < 0) { + perror("Unable to open unix socket - "); + exit(1); + } + status = bind(ctlsock, (const struct sockaddr*)&addr, sizeof(sa_family_t) + strlen(argv[1]) + 1); //sizeof(struct sockaddr_un)); + if (status < 0) { + perror("Unable to open unix socket - "); + exit(1); + } + listen(ctlsock, 128); + poller = epoll_create(1); + memset(&epvt, 0, sizeof(struct epoll_event)); + epvt.events = EPOLLIN; + epvt.data.fd = ctlsock; + if (epoll_ctl(poller, EPOLL_CTL_ADD, ctlsock, &epvt) < 0) { + perror("Unable to poll the socket"); + exit(1); + } + // create a unix domain socket for accepting, each connection is only allowed to either read or write, not both while (1) { - length = fread(&cmd, 4, 1, stdin); - if (length < 0) - continue; - length = cmd & 536870911; - cmd = cmd >> 29; - if (cmd == SETNODE) { - cmd = fread(currnode, 1, length, stdin); - currnode[length] = 0; - if (cmd < 0) - continue; - currvt = set_termentbyname(currnode); - } else if (cmd == WRITE) { - if (currvt == NULL) - currvt = set_termentbyname(""); - cmd = fread(cmdbuf, 1, length, stdin); - cmdbuf[length] = 0; - if (cmd < 0) - continue; - tmt_write(currvt, cmdbuf, length); - } else if (cmd == READBUFF) { - cmd = fread(cmdbuf, 1, length, stdin); - cmdbuf[length] = 0; - if (cmd < 0) - continue; - outvt = get_termentbyname(cmdbuf); - if (outvt != NULL) - dump_vt(outvt); - length = write(1, "\x00", 1); - if (length < 0) - continue; + numevts = epoll_wait(poller, evts, MAXEVTS, -1); + if (numevts < 0) { + perror("Failed wait"); + exit(1); + } + for (n = 0; n < numevts; ++n) { + if (evts[n].data.fd == ctlsock) { + currsock = accept(ctlsock, (struct sockaddr *) &addr, &addrlen); + len = sizeof(ucr); + rt = getsockopt(currsock, SOL_SOCKET, SO_PEERCRED, &ucr, &len); + if (rt < 0) { + close(currsock); + continue; + } + if (ucr.uid != getuid()) { // block access for other users + close(currsock); + continue; + } + memset(&epvt, 0, sizeof(struct epoll_event)); + epvt.events = EPOLLIN; + epvt.data.fd = currsock; + epoll_ctl(poller, EPOLL_CTL_ADD, currsock, &epvt); + } else { + if (!handle_traffic(evts[n].data.fd)) { + epoll_ctl(poller, EPOLL_CTL_DEL, evts[n].data.fd, NULL); + close(evts[n].data.fd); + if (nodenames[evts[n].data.fd] != NULL) { + free(nodenames[evts[n].data.fd]); + nodenames[evts[n].data.fd] = NULL; + } + } + } } } } + + diff --git a/genesis/97genesis/install-base b/genesis/97genesis/install-base index 5d43f9b8..7fe1a976 100644 --- a/genesis/97genesis/install-base +++ b/genesis/97genesis/install-base @@ -5,16 +5,16 @@ dracut_install tpm2_create tpm2_pcrread tpm2_createpolicy tpm2_createprimary dracut_install tpm2_load tpm2_unseal tpm2_getcap tpm2_evictcontrol dracut_install tpm2_pcrextend tpm2_policypcr tpm2_flushcontext tpm2_startauthsession dracut_install openssl tar ipmitool cpio xz gzip lsmod ethtool -dracut_install modprobe touch echo cut wc bash netstat uniq grep ip hostname +dracut_install modprobe touch echo cut wc bash uniq grep ip hostname dracut_install awk egrep dirname bc expr sort dracut_install ssh sshd vi reboot lspci parted tmux mkfs mkfs.ext4 mkfs.xfs xfs_db mkswap dracut_install efibootmgr dracut_install du df ssh-keygen scp clear dhclient lldpd lldpcli tee -dracut_install /lib64/libnss_dns-2.28.so /lib64/libnss_dns.so.2 /lib64/libnss_myhostname.so.2 +dracut_install /lib64/libnss_dns.so.2 /lib64/libnss_dns.so.2 /lib64/libnss_myhostname.so.2 dracut_install ldd uptime /usr/lib64/libnl-3.so.200 dracut_install poweroff date /etc/nsswitch.conf /etc/services /etc/protocols dracut_install /usr/share/terminfo/x/xterm /usr/share/terminfo/l/linux /usr/share/terminfo/v/vt100 /usr/share/terminfo/x/xterm-color /usr/share/terminfo/s/screen /usr/share/terminfo/x/xterm-256color /usr/share/terminfo/p/putty-256color /usr/share/terminfo/p/putty /usr/share/terminfo/d/dumb -dracut_install chmod ifconfig whoami route head tail basename /etc/redhat-release ping tr lsusb /usr/share/hwdata/usb.ids +dracut_install chmod whoami head tail basename /etc/redhat-release ping tr /usr/share/hwdata/usb.ids dracut_install dmidecode /usr/lib64/libstdc++.so.6 dracut_install ps free find inst /bin/bash /bin/sh diff --git a/genesis/97genesis/install-python b/genesis/97genesis/install-python index afc16a44..c5dd6db7 100644 --- a/genesis/97genesis/install-python +++ b/genesis/97genesis/install-python @@ -2,36 +2,35 @@ #strace /usr/libexec/platform-python -c 'import hashlib; import socket; import argparse; import socket; import os; import http.client; import http.cookies; import subprocess; import base64; import ctypes; import struct; import urllib.parse; import shlex; import configparser' dracut_install /usr/libexec/platform-python dracut_install /etc/localtime -dracut_install /lib64/libffi.so.6 -dracut_install /lib64/libssl.so.1.1 -dracut_install /usr/lib64/python3.6/os.py +dracut_install /lib64/libffi.so.8 +dracut_install /lib64/libssl.so.3 +dracut_install /usr/lib64/python3.9/os.py dracut_install /usr/lib64/gconv/gconv-modules.cache -dracut_install /usr/lib64/python3.6 -dracut_install /usr/lib64/python3.6/collections -dracut_install /usr/lib64/python3.6/ctypes -dracut_install /usr/lib64/python3.6/email -dracut_install /usr/lib64/python3.6/encodings -dracut_install /usr/lib64/python3.6/http -dracut_install /usr/lib64/python3.6/lib-dynload -dracut_install /usr/lib64/python3.6/lib-dynload/fcntl.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/binascii.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_bisect.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_blake2.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_ctypes.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_datetime.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_hashlib.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_heapq.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/math.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_posixsubprocess.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_random.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/select.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_sha3.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_socket.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_ssl.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/_struct.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/unicodedata.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/site-packages -dracut_install /usr/lib64/python3.6/urllib +dracut_install /usr/lib64/python3.9 +dracut_install /usr/lib64/python3.9/collections +dracut_install /usr/lib64/python3.9/ctypes +dracut_install /usr/lib64/python3.9/email +dracut_install /usr/lib64/python3.9/encodings +dracut_install /usr/lib64/python3.9/http +dracut_install /usr/lib64/python3.9/lib-dynload +dracut_install /usr/lib64/python3.9/lib-dynload/fcntl.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/binascii.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_bisect.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_blake2.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_ctypes.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_datetime.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_hashlib.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_heapq.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/math.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_posixsubprocess.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_random.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/select.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_socket.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_ssl.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/_struct.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/unicodedata.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/site-packages +dracut_install /usr/lib64/python3.9/urllib dracut_install /usr/lib/locale/en_US.utf8/LC_ADDRESS dracut_install /usr/lib/locale/en_US.utf8/LC_COLLATE dracut_install /usr/lib/locale/en_US.utf8/LC_CTYPE @@ -45,96 +44,94 @@ dracut_install /usr/lib/locale/en_US.utf8/LC_NUMERIC dracut_install /usr/lib/locale/en_US.utf8/LC_PAPER dracut_install /usr/lib/locale/en_US.utf8/LC_TELEPHONE dracut_install /usr/lib/locale/en_US.utf8/LC_TIME -dracut_install /usr/lib/python3.6/site-packages -dracut_install /usr/lib64/python3.6/argparse.py -dracut_install /usr/lib64/python3.6/codecs.py -dracut_install /usr/lib64/python3.6/encodings/aliases.py -dracut_install /usr/lib64/python3.6/encodings/utf_8.py -dracut_install /usr/lib64/python3.6/encodings/latin_1.py -dracut_install /usr/lib64/python3.6/encodings/ascii.py -dracut_install /usr/lib64/python3.6/encodings/idna.py -dracut_install /usr/lib64/python3.6/io.py -dracut_install /usr/lib64/python3.6/abc.py -dracut_install /usr/lib64/python3.6/_weakrefset.py -dracut_install /usr/lib64/python3.6/weakref.py -dracut_install /usr/lib64/python3.6/site.py -dracut_install /usr/lib64/python3.6/stat.py -dracut_install /usr/lib64/python3.6/posixpath.py -dracut_install /usr/lib64/python3.6/genericpath.py -dracut_install /usr/lib64/python3.6/_collections_abc.py -dracut_install /usr/lib64/python3.6/_sitebuiltins.py -dracut_install /usr/lib64/python3.6/sysconfig.py -dracut_install /usr/lib64/python3.6/_sysconfigdata_m_linux_x86_64-linux-gnu.py -dracut_install /usr/lib64/python3.6/encodings/__init__.py -dracut_install /usr/lib64/python3.6/socket.py -dracut_install /usr/lib64/python3.6/selectors.py +dracut_install /usr/lib/python3.9/site-packages +dracut_install /usr/lib64/python3.9/argparse.py +dracut_install /usr/lib64/python3.9/codecs.py +dracut_install /usr/lib64/python3.9/encodings/aliases.py +dracut_install /usr/lib64/python3.9/encodings/utf_8.py +dracut_install /usr/lib64/python3.9/encodings/latin_1.py +dracut_install /usr/lib64/python3.9/encodings/ascii.py +dracut_install /usr/lib64/python3.9/encodings/idna.py +dracut_install /usr/lib64/python3.9/io.py +dracut_install /usr/lib64/python3.9/abc.py +dracut_install /usr/lib64/python3.9/_weakrefset.py +dracut_install /usr/lib64/python3.9/weakref.py +dracut_install /usr/lib64/python3.9/site.py +dracut_install /usr/lib64/python3.9/stat.py +dracut_install /usr/lib64/python3.9/posixpath.py +dracut_install /usr/lib64/python3.9/genericpath.py +dracut_install /usr/lib64/python3.9/_collections_abc.py +dracut_install /usr/lib64/python3.9/_sitebuiltins.py +dracut_install /usr/lib64/python3.9/sysconfig.py +dracut_install /usr/lib64/python3.9/_sysconfigdata_d_linux_x86_64-linux-gnu.py +dracut_install /usr/lib64/python3.9/encodings/__init__.py +dracut_install /usr/lib64/python3.9/socket.py +dracut_install /usr/lib64/python3.9/selectors.py dracut_install /usr/share/locale/locale.alias -dracut_install /usr/lib64/python3.6/collections/__init__.py -dracut_install /usr/lib64/python3.6/operator.py -dracut_install /usr/lib64/python3.6/keyword.py -dracut_install /usr/lib64/python3.6/heapq.py -dracut_install /usr/lib64/python3.6/reprlib.py -dracut_install /usr/lib64/python3.6/enum.py -dracut_install /usr/lib64/python3.6/types.py -dracut_install /usr/lib64/python3.6/functools.py -dracut_install /usr/lib64/python3.6/collections/abc.py -dracut_install /usr/lib64/python3.6/http/client.py -dracut_install /usr/lib64/python3.6/email/parser.py -dracut_install /usr/lib64/python3.6/email/feedparser.py -dracut_install /usr/lib64/python3.6/re.py -dracut_install /usr/lib64/python3.6/sre_compile.py -dracut_install /usr/lib64/python3.6/sre_parse.py -dracut_install /usr/lib64/python3.6/sre_constants.py -dracut_install /usr/lib64/python3.6/copyreg.py -dracut_install /usr/lib64/python3.6/email/errors.py -dracut_install /usr/lib64/python3.6/email/_policybase.py -dracut_install /usr/lib64/python3.6/email/header.py -dracut_install /usr/lib64/python3.6/email/quoprimime.py -dracut_install /usr/lib64/python3.6/string.py -dracut_install /usr/lib64/python3.6/stringprep.py -dracut_install /usr/lib64/python3.6/email/base64mime.py -dracut_install /usr/lib64/python3.6/base64.py -dracut_install /usr/lib64/python3.6/struct.py -dracut_install /usr/lib64/python3.6/email/charset.py -dracut_install /usr/lib64/python3.6/email/encoders.py -dracut_install /usr/lib64/python3.6/quopri.py -dracut_install /usr/lib64/python3.6/email/utils.py -dracut_install /usr/lib64/python3.6/random.py -dracut_install /usr/lib64/python3.6/warnings.py -dracut_install /usr/lib64/python3.6/hashlib.py -dracut_install /usr/lib64/python3.6/bisect.py -dracut_install /usr/lib64/python3.6/datetime.py -dracut_install /usr/lib64/python3.6/urllib/parse.py -dracut_install /usr/lib64/python3.6/email/_parseaddr.py -dracut_install /usr/lib64/python3.6/calendar.py -dracut_install /usr/lib64/python3.6/locale.py -dracut_install /usr/lib64/python3.6/email/message.py -dracut_install /usr/lib64/python3.6/uu.py -dracut_install /usr/lib64/python3.6/email/_encoded_words.py -dracut_install /usr/lib64/python3.6/email/iterators.py -dracut_install /usr/lib64/python3.6/http/__init__.py -dracut_install /usr/lib64/python3.6/http/cookies.py -dracut_install /usr/lib64/python3.6/argparse.py -dracut_install /usr/lib64/python3.6/copy.py -dracut_install /usr/lib64/python3.6/textwrap.py -dracut_install /usr/lib64/python3.6/gettext.py -dracut_install /usr/lib64/python3.6/subprocess.py -dracut_install /usr/lib64/python3.6/signal.py -dracut_install /usr/lib64/python3.6/threading.py -dracut_install /usr/lib64/python3.6/traceback.py -dracut_install /usr/lib64/python3.6/dummy_threading.py -dracut_install /usr/lib64/python3.6/_dummy_thread.py -dracut_install /usr/lib64/python3.6/linecache.py -dracut_install /usr/lib64/python3.6/tokenize.py -dracut_install /usr/lib64/python3.6/token.py -dracut_install /usr/lib64/python3.6/shlex.py -dracut_install /usr/lib64/python3.6/configparser.py -dracut_install /usr/lib64/python3.6/lib-dynload/readline.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/ctypes/__init__.py -dracut_install /usr/lib64/python3.6/ctypes/_endian.py -dracut_install /usr/lib64/python3.6/ctypes/util.py -dracut_install /usr/lib64/python3.6/ssl.py -dracut_install /usr/lib64/python3.6/ipaddress.py +dracut_install /usr/lib64/python3.9/collections/__init__.py +dracut_install /usr/lib64/python3.9/operator.py +dracut_install /usr/lib64/python3.9/keyword.py +dracut_install /usr/lib64/python3.9/heapq.py +dracut_install /usr/lib64/python3.9/reprlib.py +dracut_install /usr/lib64/python3.9/enum.py +dracut_install /usr/lib64/python3.9/types.py +dracut_install /usr/lib64/python3.9/functools.py +dracut_install /usr/lib64/python3.9/collections/abc.py +dracut_install /usr/lib64/python3.9/http/client.py +dracut_install /usr/lib64/python3.9/email/parser.py +dracut_install /usr/lib64/python3.9/email/feedparser.py +dracut_install /usr/lib64/python3.9/re.py +dracut_install /usr/lib64/python3.9/sre_compile.py +dracut_install /usr/lib64/python3.9/sre_parse.py +dracut_install /usr/lib64/python3.9/sre_constants.py +dracut_install /usr/lib64/python3.9/copyreg.py +dracut_install /usr/lib64/python3.9/email/errors.py +dracut_install /usr/lib64/python3.9/email/_policybase.py +dracut_install /usr/lib64/python3.9/email/header.py +dracut_install /usr/lib64/python3.9/email/quoprimime.py +dracut_install /usr/lib64/python3.9/string.py +dracut_install /usr/lib64/python3.9/stringprep.py +dracut_install /usr/lib64/python3.9/email/base64mime.py +dracut_install /usr/lib64/python3.9/base64.py +dracut_install /usr/lib64/python3.9/struct.py +dracut_install /usr/lib64/python3.9/email/charset.py +dracut_install /usr/lib64/python3.9/email/encoders.py +dracut_install /usr/lib64/python3.9/quopri.py +dracut_install /usr/lib64/python3.9/email/utils.py +dracut_install /usr/lib64/python3.9/random.py +dracut_install /usr/lib64/python3.9/warnings.py +dracut_install /usr/lib64/python3.9/hashlib.py +dracut_install /usr/lib64/python3.9/bisect.py +dracut_install /usr/lib64/python3.9/datetime.py +dracut_install /usr/lib64/python3.9/urllib/parse.py +dracut_install /usr/lib64/python3.9/email/_parseaddr.py +dracut_install /usr/lib64/python3.9/calendar.py +dracut_install /usr/lib64/python3.9/locale.py +dracut_install /usr/lib64/python3.9/email/message.py +dracut_install /usr/lib64/python3.9/uu.py +dracut_install /usr/lib64/python3.9/email/_encoded_words.py +dracut_install /usr/lib64/python3.9/email/iterators.py +dracut_install /usr/lib64/python3.9/http/__init__.py +dracut_install /usr/lib64/python3.9/http/cookies.py +dracut_install /usr/lib64/python3.9/argparse.py +dracut_install /usr/lib64/python3.9/copy.py +dracut_install /usr/lib64/python3.9/textwrap.py +dracut_install /usr/lib64/python3.9/gettext.py +dracut_install /usr/lib64/python3.9/subprocess.py +dracut_install /usr/lib64/python3.9/signal.py +dracut_install /usr/lib64/python3.9/threading.py +dracut_install /usr/lib64/python3.9/traceback.py +dracut_install /usr/lib64/python3.9/linecache.py +dracut_install /usr/lib64/python3.9/tokenize.py +dracut_install /usr/lib64/python3.9/token.py +dracut_install /usr/lib64/python3.9/shlex.py +dracut_install /usr/lib64/python3.9/configparser.py +dracut_install /usr/lib64/python3.9/lib-dynload/readline.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/ctypes/__init__.py +dracut_install /usr/lib64/python3.9/ctypes/_endian.py +dracut_install /usr/lib64/python3.9/ctypes/util.py +dracut_install /usr/lib64/python3.9/ssl.py +dracut_install /usr/lib64/python3.9/ipaddress.py dracut_install /usr/lib/locale/en_US.utf8/LC_ADDRESS dracut_install /usr/lib/locale/en_US.utf8/LC_IDENTIFICATION dracut_install /usr/lib/locale/en_US.utf8/LC_MEASUREMENT @@ -147,78 +144,77 @@ dracut_install /usr/lib/locale/en_US.utf8/LC_CTYPE dracut_install /usr/lib/locale/en_US.utf8/LC_NAME dracut_install /usr/lib/locale/en_US.utf8/LC_NUMERIC dracut_install /usr/lib/locale/en_US.utf8/LC_PAPER -dracut_install /usr/lib64/python3.6/json/__init__.py /usr/lib64/python3.6/json/decoder.py /usr/lib64/python3.6/json/encoder.py /usr/lib64/python3.6/json/scanner.py /usr/lib64/python3.6/json/tool.py /usr/lib64/python3.6/lib-dynload/_json.cpython-36m-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/json/__init__.py /usr/lib64/python3.9/json/decoder.py /usr/lib64/python3.9/json/encoder.py /usr/lib64/python3.9/json/scanner.py /usr/lib64/python3.9/json/tool.py /usr/lib64/python3.9/lib-dynload/_json.cpython-39-x86_64-linux-gnu.so # ansible dependencies -dracut_install /usr/lib64/python3.6/runpy.py -dracut_install /usr/lib64/python3.6/importlib/__init__.py -dracut_install /usr/lib64/python3.6/importlib/_bootstrap.py -dracut_install /usr/lib64/python3.6/importlib/_bootstrap_external.py -dracut_install /usr/lib64/python3.6/importlib/abc.py -dracut_install /usr/lib64/python3.6/importlib/machinery.py -dracut_install /usr/lib64/python3.6/importlib/util.py -dracut_install /usr/lib64/python3.6/contextlib.py -dracut_install /usr/lib64/python3.6/pkgutil.py -dracut_install /usr/lib64/python3.6/shutil.py -dracut_install /usr/lib64/python3.6/fnmatch.py -dracut_install /usr/lib64/python3.6/tempfile.py -dracut_install /usr/lib64/python3.6/zipfile.py -dracut_install /usr/lib64/python3.6/encodings/cp437.py -dracut_install /usr/lib64/python3.6/lib-dynload/zlib.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/grp.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/lib-dynload/array.cpython-36m-x86_64-linux-gnu.so -dracut_install /usr/lib64/python3.6/__future__.py -dracut_install /usr/lib64/python3.6/platform.py -dracut_install /usr/lib64/python3.6/logging/__init__.py -dracut_install /usr/lib64/python3.6/logging/config.py -dracut_install /usr/lib64/python3.6/logging/handlers.py -dracut_install /usr/lib64/python3.6/optparse.py -dracut_install /usr/lib64/python3.6/ast.py -dracut_install /usr/lib64/python3.6/multiprocessing/__init__.py -dracut_install /usr/lib64/python3.6/multiprocessing/connection.py -dracut_install /usr/lib64/python3.6/multiprocessing/context.py -dracut_install /usr/lib64/python3.6/multiprocessing/dummy/__init__.py -dracut_install /usr/lib64/python3.6/multiprocessing/dummy/connection.py -dracut_install /usr/lib64/python3.6/multiprocessing/forkserver.py -dracut_install /usr/lib64/python3.6/multiprocessing/heap.py -dracut_install /usr/lib64/python3.6/multiprocessing/managers.py -dracut_install /usr/lib64/python3.6/multiprocessing/pool.py -dracut_install /usr/lib64/python3.6/multiprocessing/popen_fork.py -dracut_install /usr/lib64/python3.6/multiprocessing/popen_forkserver.py -dracut_install /usr/lib64/python3.6/multiprocessing/popen_spawn_posix.py -dracut_install /usr/lib64/python3.6/multiprocessing/popen_spawn_win32.py -dracut_install /usr/lib64/python3.6/multiprocessing/process.py -dracut_install /usr/lib64/python3.6/multiprocessing/queues.py -dracut_install /usr/lib64/python3.6/multiprocessing/reduction.py -dracut_install /usr/lib64/python3.6/multiprocessing/resource_sharer.py -dracut_install /usr/lib64/python3.6/multiprocessing/semaphore_tracker.py -dracut_install /usr/lib64/python3.6/multiprocessing/sharedctypes.py -dracut_install /usr/lib64/python3.6/multiprocessing/spawn.py -dracut_install /usr/lib64/python3.6/multiprocessing/synchronize.py -dracut_install /usr/lib64/python3.6/multiprocessing/util.py -dracut_install /usr/lib64/python3.6/pickle.py -dracut_install /usr/lib64/python3.6/_compat_pickle.py -dracut_install /usr/lib64/python3.6/queue.py -dracut_install /usr/lib64/python3.6/glob.py -dracut_install /usr/lib64/python3.6/distutils/__init__.py -dracut_install /usr/lib64/python3.6/distutils/archive_util.py -dracut_install /usr/lib64/python3.6/distutils/cmd.py -dracut_install /usr/lib64/python3.6/distutils/config.py -dracut_install /usr/lib64/python3.6/distutils/core.py -dracut_install /usr/lib64/python3.6/distutils/debug.py -dracut_install /usr/lib64/python3.6/distutils/dep_util.py -dracut_install /usr/lib64/python3.6/distutils/dir_util.py -dracut_install /usr/lib64/python3.6/distutils/errors.py -dracut_install /usr/lib64/python3.6/distutils/extension.py -dracut_install /usr/lib64/python3.6/distutils/fancy_getopt.py -dracut_install /usr/lib64/python3.6/distutils/file_util.py -dracut_install /usr/lib64/python3.6/distutils/filelist.py -dracut_install /usr/lib64/python3.6/distutils/log.py -dracut_install /usr/lib64/python3.6/distutils/spawn.py -dracut_install /usr/lib64/python3.6/distutils/sysconfig.py -dracut_install /usr/lib64/python3.6/distutils/text_file.py -dracut_install /usr/lib64/python3.6/distutils/util.py -dracut_install /usr/lib64/python3.6/distutils/version.py -dracut_install /usr/lib64/python3.6/distutils/versionpredicate.py -dracut_install /usr/lib64/python3.6/getpass.py +dracut_install /usr/lib64/python3.9/runpy.py +dracut_install /usr/lib64/python3.9/importlib/__init__.py +dracut_install /usr/lib64/python3.9/importlib/_bootstrap.py +dracut_install /usr/lib64/python3.9/importlib/_bootstrap_external.py +dracut_install /usr/lib64/python3.9/importlib/abc.py +dracut_install /usr/lib64/python3.9/importlib/machinery.py +dracut_install /usr/lib64/python3.9/importlib/util.py +dracut_install /usr/lib64/python3.9/contextlib.py +dracut_install /usr/lib64/python3.9/pkgutil.py +dracut_install /usr/lib64/python3.9/shutil.py +dracut_install /usr/lib64/python3.9/fnmatch.py +dracut_install /usr/lib64/python3.9/tempfile.py +dracut_install /usr/lib64/python3.9/zipfile.py +dracut_install /usr/lib64/python3.9/encodings/cp437.pyc +dracut_install /usr/lib64/python3.9/lib-dynload/zlib.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/grp.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/lib-dynload/array.cpython-39-x86_64-linux-gnu.so +dracut_install /usr/lib64/python3.9/__future__.py +dracut_install /usr/lib64/python3.9/platform.py +dracut_install /usr/lib64/python3.9/logging/__init__.py +dracut_install /usr/lib64/python3.9/logging/config.py +dracut_install /usr/lib64/python3.9/logging/handlers.py +dracut_install /usr/lib64/python3.9/optparse.py +dracut_install /usr/lib64/python3.9/ast.py +dracut_install /usr/lib64/python3.9/multiprocessing/__init__.py +dracut_install /usr/lib64/python3.9/multiprocessing/connection.py +dracut_install /usr/lib64/python3.9/multiprocessing/context.py +dracut_install /usr/lib64/python3.9/multiprocessing/dummy/__init__.py +dracut_install /usr/lib64/python3.9/multiprocessing/dummy/connection.py +dracut_install /usr/lib64/python3.9/multiprocessing/forkserver.py +dracut_install /usr/lib64/python3.9/multiprocessing/heap.py +dracut_install /usr/lib64/python3.9/multiprocessing/managers.py +dracut_install /usr/lib64/python3.9/multiprocessing/pool.py +dracut_install /usr/lib64/python3.9/multiprocessing/popen_fork.py +dracut_install /usr/lib64/python3.9/multiprocessing/popen_forkserver.py +dracut_install /usr/lib64/python3.9/multiprocessing/popen_spawn_posix.py +dracut_install /usr/lib64/python3.9/multiprocessing/popen_spawn_win32.py +dracut_install /usr/lib64/python3.9/multiprocessing/process.py +dracut_install /usr/lib64/python3.9/multiprocessing/queues.py +dracut_install /usr/lib64/python3.9/multiprocessing/reduction.py +dracut_install /usr/lib64/python3.9/multiprocessing/resource_sharer.py +dracut_install /usr/lib64/python3.9/multiprocessing/sharedctypes.py +dracut_install /usr/lib64/python3.9/multiprocessing/spawn.py +dracut_install /usr/lib64/python3.9/multiprocessing/synchronize.py +dracut_install /usr/lib64/python3.9/multiprocessing/util.py +dracut_install /usr/lib64/python3.9/pickle.py +dracut_install /usr/lib64/python3.9/_compat_pickle.py +dracut_install /usr/lib64/python3.9/queue.py +dracut_install /usr/lib64/python3.9/glob.py +dracut_install /usr/lib64/python3.9/distutils/__init__.py +dracut_install /usr/lib64/python3.9/distutils/archive_util.py +dracut_install /usr/lib64/python3.9/distutils/cmd.py +dracut_install /usr/lib64/python3.9/distutils/config.py +dracut_install /usr/lib64/python3.9/distutils/core.py +dracut_install /usr/lib64/python3.9/distutils/debug.py +dracut_install /usr/lib64/python3.9/distutils/dep_util.py +dracut_install /usr/lib64/python3.9/distutils/dir_util.py +dracut_install /usr/lib64/python3.9/distutils/errors.py +dracut_install /usr/lib64/python3.9/distutils/extension.py +dracut_install /usr/lib64/python3.9/distutils/fancy_getopt.py +dracut_install /usr/lib64/python3.9/distutils/file_util.py +dracut_install /usr/lib64/python3.9/distutils/filelist.py +dracut_install /usr/lib64/python3.9/distutils/log.py +dracut_install /usr/lib64/python3.9/distutils/spawn.py +dracut_install /usr/lib64/python3.9/distutils/sysconfig.py +dracut_install /usr/lib64/python3.9/distutils/text_file.py +dracut_install /usr/lib64/python3.9/distutils/util.py +dracut_install /usr/lib64/python3.9/distutils/version.py +dracut_install /usr/lib64/python3.9/distutils/versionpredicate.py +dracut_install /usr/lib64/python3.9/getpass.py dracut_install /usr/libexec/openssh/sftp-server diff --git a/genesis/buildgenesis.sh b/genesis/buildgenesis.sh index 81c46790..8e0de608 100644 --- a/genesis/buildgenesis.sh +++ b/genesis/buildgenesis.sh @@ -29,7 +29,7 @@ for lic in $(cat /tmp/tmpliclist); do fname=$(basename $lo) dlo=$(dirname $lo) if [[ "$dlo" == *"-lib"* ]]; then - dlo=${dlo/-*} + dlo=${dlo/-lib*} elif [[ "$dlo" == "device-mapper-"* ]]; then dlo=${dlo/-*}-mapper elif [[ "$dlo" == "bind-"* ]]; then @@ -44,6 +44,10 @@ for lic in $(cat /tmp/tmpliclist); do cp $lic licenses/$dlo/$fname lo=$dlo/$fname echo %license /opt/confluent/genesis/%{arch}/licenses/$lo >> confluent-genesis-out.spec + if [ "$fname" == README ] && [ "$dlo" == "zlib" ]; then + cp $lic licenses/nss/$fname + echo %license /opt/confluent/genesis/%{arch}/licenses/nss/$fname >> confluent-genesis-out.spec + fi done mkdir -p licenses/ipmitool cp /usr/share/doc/ipmitool/COPYING licenses/ipmitool diff --git a/genesis/confluent-genesis.spec b/genesis/confluent-genesis.spec index c1b7d45c..beaeb5cd 100644 --- a/genesis/confluent-genesis.spec +++ b/genesis/confluent-genesis.spec @@ -1,5 +1,5 @@ %define arch x86_64 -Version: 3.7.1 +Version: 3.10.0 Release: 1 Name: confluent-genesis-%{arch} BuildArch: noarch diff --git a/genesis/extracttmuxlicenses.py b/genesis/extracttmuxlicenses.py index 9c10922d..1a6eeba5 100644 --- a/genesis/extracttmuxlicenses.py +++ b/genesis/extracttmuxlicenses.py @@ -1,8 +1,18 @@ import glob +import os yearsbyname = {} namesbylicense = {} filesbylicense = {} -for source in glob.glob('*.c'): +allfiles = glob.glob('*.c') +allfiles.extend(glob.glob('*.y')) +allfiles.extend(glob.glob('compat/*.c')) +foundbin = False +for source in allfiles: # glob.glob('*.c'): + if 'cmd-parse.c' == source: + continue + if not os.path.exists(source.replace('.c', '.o')): + continue + foundbin = True with open(source, 'r') as sourcein: cap = False thelicense = '' @@ -19,13 +29,27 @@ for source in glob.glob('*.c'): line = line[3:] if line.startswith('Author: '): continue + + if line == 'Copyright (c) 1989, 1993\n': + name = 'The Regents of the University of California' + if name not in yearsbyname: + yearsbyname[name] = set([]) + yearsbyname[name].add('1989') + yearsbyname[name].add('1993') + continue if line.startswith('Copyright'): _, _, years, name = line.split(maxsplit=3) - name = name.split('>', 1)[0] + '>' + if '>' in name: + name = name.split('>', 1)[0] + '>' currnames.add(name) if name not in yearsbyname: yearsbyname[name] = set([]) - yearsbyname[name].add(years) + if '-' in years: + strt, end = years.split('-') + for x in range(int(strt), int(end) + 1): + print(str(x)) + else: + yearsbyname[name].add(years) continue thelicense += line if thelicense not in namesbylicense: diff --git a/genesis/getlicenses.py b/genesis/getlicenses.py index 5a9b2ac2..e87e2786 100644 --- a/genesis/getlicenses.py +++ b/genesis/getlicenses.py @@ -29,12 +29,17 @@ with open(sys.argv[1]) as rpmlist: rpmlist = rpmlist.read().split('\n') licenses = set([]) licensesbyrpm = {} +lfirmlicenses = [ + 'WHENCE', + 'chelsio_firmware', + 'hfi1_firmware', + 'ice_enhaced', + 'rtlwifi_firmware.txt', +] for rpm in rpmlist: if not rpm: continue srpm = rpmtosrpm[rpm] - if srpm.startswith('linux-firmware'): - continue for relrpm in srpmtorpm[srpm]: if relrpm.startswith('libss-'): continue @@ -44,13 +49,19 @@ for rpm in rpmlist: continue if lic == '(contains no files)': continue + if srpm.startswith('linux-firmware'): + for desired in lfirmlicenses: + if desired in lic: + break + else: + continue licensesbyrpm[rpm] = lic licenses.add(lic) for lic in sorted(licenses): print(lic) manualrpms = [ 'ipmitool', - 'almalinux-release', + 'centos-stream-release', 'libaio', 'hwdata', 'snmp', @@ -63,10 +74,25 @@ manualrpms = [ ] manuallicenses = [ '/usr/share/licenses/lz4/LICENSE.BSD', + '/usr/share/licenses/nss/LICENSE.APACHE', # http://www.apache.org/licenses/LICENSE-2.0 + '/usr/share/licenses/openssh/COPYING.blowfish', # from header of blowfish file in bsd-compat + '/usr/share/licenses/bc/COPYING.GPLv2', + '/usr/share/licenses/bind-license/LICENSE', # MPLv2 from the source code + '/usr/share/licenses/procps-ng/COPYING.LIBv2.1', # fetched internet # cp /usr/share/doc/lz4-libs/LICENSE /usr/share/licenses/lz4/LICENSE.BSD #'lz4-1.8.3]# cp LICENSE /usr/share/licenses/lz4/LICENSE' # net-snmp has a bundled openssl, but the build does not avail itself of that copy '/usr/share/licenses/perl-libs/LICENSE', # ./dist/ExtUtils-CBuilder/LICENSE from perl srpm + '/usr/share/licenses/pam/COPYING.bison', # pam_conv_y + '/usr/share/licenses/pcre/LICENSE.BSD2', # stack-less just in time compiler, Zoltan Herzeg + '/usr/share/licenses/sqlite/LICENSE.md', # https://raw.githubusercontent.com/sqlite/sqlite/master/LICENSE.md + '/usr/share/licenses/pcre2/LICENSE.BSD2', + '/usr/share/licenses/dhcp-common/NOTICE', + '/usr/share/licenses/xz/COPYING.GPLv3', # manually extracted from xz source + '/usr/share/licenses/bash/NOTICE', + '/usr/share/licenses/libsepol/NOTICE', + '/usr/share/licenses/perl/COPYING.regexec', # regexec.c + '/usr/share/doc/python3/README.rst', '/usr/share/licenses/lz4/LICENSE', '/usr/share/licenses/lm_sensors/COPYING', '/usr/share/doc/libunistring/README', @@ -75,20 +101,16 @@ manuallicenses = [ '/usr/share/doc/zstd/README.md', '/usr/share/doc/hwdata/LICENSE', '/usr/share/doc/ipmitool/COPYING', + '/usr/share/licenses/linux-firmware/LICENSE.hfi1_firmware', # these two need to be extracted from srcrpm + '/usr/share/licenses/linux-firmware/LICENSE.ice_enhanced', # '/usr/share/doc/libaio/COPYING', '/usr/share/doc/net-snmp/COPYING', '/usr/share/doc/libnl3/COPYING', '/usr/share/licenses/xfsprogs/GPL-2.0', '/usr/share/licenses/xfsprogs/LGPL-2.1', - '/usr/share/licenses/tmux/NOTICE', - '/usr/share/licenses/kernel-extra/exceptions/Linux-syscall-note', - '/usr/share/licenses/kernel-extra/other/Apache-2.0', - '/usr/share/licenses/kernel-extra/other/CC-BY-SA-4.0', - '/usr/share/licenses/kernel-extra/other/CDDL-1.0', - '/usr/share/licenses/kernel-extra/other/GPL-1.0', - '/usr/share/licenses/kernel-extra/other/Linux-OpenIB', - '/usr/share/licenses/kernel-extra/other/MPL-1.1', - '/usr/share/licenses/kernel-extra/other/X11', + '/usr/share/licenses/tmux/NOTICE', # built by extracttmuxlicenses.py + '/usr/share/licenses/tmux/COPYING', # extracted from source + '/usr/share/licenses/tmux/README', # extracted from source '/usr/share/licenses/kernel-extra/preferred/BSD-2-Clause', '/usr/share/licenses/kernel-extra/preferred/BSD-3-Clause', '/usr/share/licenses/kernel-extra/preferred/BSD-3-Clause-Clear', @@ -96,10 +118,25 @@ manuallicenses = [ '/usr/share/licenses/kernel-extra/preferred/LGPL-2.0', '/usr/share/licenses/kernel-extra/preferred/LGPL-2.1', '/usr/share/licenses/kernel-extra/preferred/MIT', + '/usr/share/licenses/kernel-extra/deprecated/GFDL-1.1', + '/usr/share/licenses/kernel-extra/deprecated/GFDL-1.2', + '/usr/share/licenses/kernel-extra/deprecated/GPL-1.0', + '/usr/share/licenses/kernel-extra/deprecated/ISC', + '/usr/share/licenses/kernel-extra/deprecated/Linux-OpenIB', + '/usr/share/licenses/kernel-extra/deprecated/X11', + '/usr/share/licenses/kernel-extra/deprecated/Zlib', + '/usr/share/licenses/kernel-extra/dual/Apache-2.0', + '/usr/share/licenses/kernel-extra/dual/CC-BY-4.0', + '/usr/share/licenses/kernel-extra/dual/CDDL-1.0', + '/usr/share/licenses/kernel-extra/dual/MPL-1.1', + '/usr/share/licenses/kernel-extra/exceptions/GCC-exception-2.0', + '/usr/share/licenses/kernel-extra/exceptions/Linux-syscall-note', '/usr/share/licenses/util-linux/COPYING.GPLv3', # extract from parse-date.c, from srpm - '/usr/share/licenses/kmod/tools/COPYING', # GPL not LGPL, must extract from kmod srpm + '/usr/share/licenses/kmod/COPYING', # GPL not LGPL, must extract from kmod srpm '/usr/share/licenses/krb5-libs/NOTICE', # copy it verbatim from LICENSE, exact same file '/usr/share/doc/less/README', + '/usr/share/centos-release/EULA', + #'/usr/share/doc/almalinux-release/GPL', '/usr/share/licenses/libcap-ng-utils/COPYING', '/usr/share/licenses/libdb/copyright', # from libdb, db-5.3.28, lang/sql/odbc/debian/copyright '/usr/share/licenses/libgcrypt/LICENSES.ppc-aes-gcm', # libgcrypt license to carry forward @@ -108,6 +145,7 @@ manuallicenses = [ ] for lic in manuallicenses: print(lic) +missinglics = [] for rpm in rpmlist: if not rpm: continue @@ -116,7 +154,9 @@ for rpm in rpmlist: break else: if rpm not in licensesbyrpm: - raise Exception('Unresolved license info for ' + rpm) - print("UH OH: " + rpm) - + missinglics.append(rpm) +if missinglics: + for lic in missinglics: + print("Missing: " + lic) + raise Exception("Missing licenses: " + ','.join(missinglics)) diff --git a/imgutil/builddeb b/imgutil/builddeb new file mode 100755 index 00000000..a7cee375 --- /dev/null +++ b/imgutil/builddeb @@ -0,0 +1,28 @@ +#!/bin/bash +VERSION=`git describe|cut -d- -f 1` +NUMCOMMITS=`git describe|cut -d- -f 2` +if [ "$NUMCOMMITS" != "$VERSION" ]; then + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` +fi +mkdir -p /tmp/confluent-imgutil +cp -a * /tmp/confluent-imgutil +cp ../LICENSE /tmp/confluent-imgutil +cd /tmp/confluent-imgutil +rm -rf deb/confluent_imgutil_$VERSION/ +mkdir -p deb/confluent_imgutil_$VERSION/DEBIAN/ +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/bin +mv imgutil deb/confluent_imgutil_$VERSION/opt/confluent/bin/ +chmod a+x deb/confluent_imgutil_$VERSION/opt/confluent/bin/imgutil +mv ubuntu* suse15 el7 el9 el8 deb/confluent_imgutil_$VERSION/opt/confluent/lib/imgutil/ +mkdir -p deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil +cp LICENSE deb/confluent_imgutil_$VERSION/opt/confluent/share/licenses/confluent_imgutil +sed -e 's/#VERSION#/'$VERSION/ control.tmpl > deb/confluent_imgutil_$VERSION/DEBIAN/control +dpkg-deb --build deb/confluent_imgutil_$VERSION +if [ ! -z "$1" ]; then + mv deb/confluent_imgutil_$VERSION.deb $1 +fi diff --git a/imgutil/buildrpm b/imgutil/buildrpm index 87631ac0..4439b7ed 100755 --- a/imgutil/buildrpm +++ b/imgutil/buildrpm @@ -2,7 +2,11 @@ VERSION=`git describe|cut -d- -f 1` NUMCOMMITS=`git describe|cut -d- -f 2` if [ "$NUMCOMMITS" != "$VERSION" ]; then - VERSION=$VERSION.dev$NUMCOMMITS.g`git describe|cut -d- -f 3` + LASTNUM=$(echo $VERSION|rev|cut -d . -f 1|rev) + LASTNUM=$((LASTNUM+1)) + FIRSTPART=$(echo $VERSION|rev|cut -d . -f 2- |rev) + VERSION=${FIRSTPART}.${LASTNUM} + VERSION=$VERSION~dev$NUMCOMMITS+`git describe|cut -d- -f 3` fi sed -e "s/#VERSION#/$VERSION/" confluent_imgutil.spec.tmpl > confluent_imgutil.spec cp ../LICENSE . diff --git a/imgutil/confluent_imgutil.spec.tmpl b/imgutil/confluent_imgutil.spec.tmpl index 35ed4070..f7dea7a7 100644 --- a/imgutil/confluent_imgutil.spec.tmpl +++ b/imgutil/confluent_imgutil.spec.tmpl @@ -8,13 +8,13 @@ Source: confluent_imgutil.tar.xz BuildArch: noarch BuildRoot: /tmp/ %if "%{dist}" == ".el8" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else %if "%{dist}" == ".el9" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else %if "%{dist}" == ".el7" -Requires: squashfs-tools +Requires: squashfs-tools cpio %else Requires: squashfs %endif diff --git a/imgutil/control.tmpl b/imgutil/control.tmpl new file mode 100644 index 00000000..3bc8644c --- /dev/null +++ b/imgutil/control.tmpl @@ -0,0 +1,9 @@ +Package: confluent-imgutil +Version: #VERSION# +Section: base +Priority: optional +Maintainer: Jarrod Johnson +Description: Web frontend for confluent server +Architecture: all +Depends: debootstrap + diff --git a/imgutil/imgutil b/imgutil/imgutil index de3a9025..c5446069 100644 --- a/imgutil/imgutil +++ b/imgutil/imgutil @@ -61,13 +61,29 @@ FALLOC_FL_PUNCH_HOLE = 2 numregex = re.compile('([0-9]+)') def get_partition_info(): + with open('/proc/self/mountinfo') as procinfo: + mountinfo = procinfo.read() + capmounts = set([]) + for entry in mountinfo.split('\n'): + if not entry: + continue + firstinf, lastinf = entry.split(' - ') + root, mount = firstinf.split()[3:5] + filesystem = lastinf.split()[0] + if root != '/': + continue + if filesystem not in ('ext3', 'ext4', 'xfs', 'btrfs', 'vfat'): + continue + capmounts.add(mount) with open('/proc/mounts') as procmounts: mountinfo = procmounts.read() for entry in mountinfo.split('\n'): if not entry: continue dev, mount, fs, flags = entry.split()[:4] - if fs not in ('ext3', 'ext4', 'xfs', 'btrfs', 'vfat'): + if mount not in capmounts: + continue + if '/dev/zram' in dev: continue fsinfo = os.statvfs(mount) partinfo = { @@ -639,10 +655,27 @@ class DebHandler(OsHandler): def prep_root(self, args): shutil.copy('/etc/apt/sources.list', os.path.join(self.targpath, 'etc/apt/sources.list')) + for listfile in glob.glob('/etc/apt/sources.list.d/*'): + shutil.copy(listfile, os.path.join(self.targpath, listfile[1:])) args.cmd = ['apt-get', 'update'] run_constrainedx(fancy_chroot, (args, self.targpath)) args.cmd = ['apt-get', '-y', 'install'] + self.includepkgs run_constrainedx(fancy_chroot, (args, self.targpath)) + servicefile = os.path.join( + self.targpath, 'usr/lib/systemd/system/ssh.service') + if os.path.exists(servicefile): + targfile = os.path.join( + self.targpath, + 'etc/systemd/system/multi-user.target.wants/ssh.service') + if not os.path.exists(targfile): + os.symlink('/usr/lib/systemd/system/ssh.service', targfile) + else: + targfile = os.path.join( + self.targpath, + 'etc/systemd/system/multi-user.target.wants/sshd.service') + if not os.path.exists(targfile): + os.symlink('/usr/lib/systemd/system/sshd.service', targfile) + class ElHandler(OsHandler): @@ -918,6 +951,8 @@ def fancy_chroot(args, installroot): sourceresolv = '/etc/resolv.conf' if os.path.islink(sourceresolv): sourceresolv = os.readlink(sourceresolv) + # normalize and resolve relative and absolute paths + sourceresolv = os.path.normpath(os.path.join('/etc', sourceresolv)) dstresolv = os.path.join(installroot, 'etc/resolv.conf') if os.path.islink(dstresolv): dstresolv = os.path.join(installroot, os.readlink(dstresolv)[1:]) @@ -928,8 +963,7 @@ def fancy_chroot(args, installroot): _mount('none', dstresolv, flags=MS_RDONLY|MS_REMOUNT|MS_BIND) os.chroot(installroot) os.chdir('/') - _mount('/', '/', flags=MS_BIND) # Make / manifest as a mounted filesystem in exec - os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \W]$ '.format(imgname) + os.environ['PS1'] = '[\x1b[1m\x1b[4mIMGUTIL EXEC {0}\x1b[0m \\W]$ '.format(imgname) os.environ['CONFLUENT_IMGUTIL_MODE'] = 'exec' if oshandler: oshandler.set_source('/run/confluentdistro') @@ -969,7 +1003,13 @@ def build_root_backend(optargs): def _mount_constrained_fs(args, installroot): + # This is prepping for a chroot. + # For the target environment to be content with having a root + # filesystem, installroot must be a 'mount' entry of it's own, + # so bind mount to itself to satisfy + _mount(installroot, installroot, flags=MS_BIND) _mount('/dev', os.path.join(installroot, 'dev'), flags=MS_BIND|MS_RDONLY) + _mount('/dev/pts', os.path.join(installroot, 'dev/pts'), flags=MS_BIND|MS_RDONLY) _mount('proc', os.path.join(installroot, 'proc'), fstype='proc') _mount('sys', os.path.join(installroot, 'sys'), fstype='sysfs') _mount('runfs', os.path.join(installroot, 'run'), fstype='tmpfs') diff --git a/imgutil/ubuntu24.04 b/imgutil/ubuntu24.04 new file mode 120000 index 00000000..7d13753d --- /dev/null +++ b/imgutil/ubuntu24.04 @@ -0,0 +1 @@ +ubuntu \ No newline at end of file diff --git a/misc/getcsr.py b/misc/getcsr.py index 253bfcd8..6f956b2d 100644 --- a/misc/getcsr.py +++ b/misc/getcsr.py @@ -12,11 +12,40 @@ ap.add_argument('--state', help='State or Province') ap.add_argument('--city', help='City or Locality') ap.add_argument('--org', help='Organization name') ap.add_argument('--name', help='Common/Host Name') +ap.add_argument('outcsr', help='CSR filename to save') args = ap.parse_args() c = cmd.Command(args.xcc, os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True) -params = [ + +overview = c._do_web_request('/redfish/v1/') +cs = overview.get('CertificateService', {}).get('@odata.id', None) +if cs: + csinfo = c._do_web_request(cs) + gcsr = csinfo.get('Actions', {}).get('#CertificateService.GenerateCSR', {}).get('target', None) + if gcsr: + #https://n241-bmc/redfish/v1/Managers/1/NetworkProtocol HTTPS + #/redfish/v1/Managers/1/NetworkProtocol/HTTPS/Certificates + #/redfish/v1/CertificateService/CertificateLocations + csrargs = { + 'City': args.city, + 'State': args.state, + 'Organization': args.org, + 'Country': args.country, + 'CommonName': args.name, + 'KeyPairAlgorithm': 'TPM_ALG_ECDH', + 'KeyCurveId': 'TPM_ECC_NIST_P384', + 'CertificateCollection': { '@odata.id': '/redfish/v1/Managers/1/NetworkProtocol/HTTPS/Certificates'} + } + + csrinfo = c._do_web_request(gcsr, csrargs) + if 'CSRString' in csrinfo: + with open(args.outcsr, 'w') as csrout: + csrout.write(csrinfo['CSRString']) + sys.exit(0) + +else: + params = [ '0', # 'serviceType' args.country, args.state, @@ -32,15 +61,16 @@ params = [ '', '', '', -] -wc = c.oem.wc -rsp, status = wc.grab_json_response_with_status('/api/function', {'Sec_GenKeyAndCSR': ','.join(params)}) -rsp, status = wc.grab_json_response_with_status('/api/dataset', {'CSR_Format': '1'}) -rsp, status = wc.grab_json_response_with_status('/api/function', {'Sec_DownloadCSRANDCert': '0,4,0'}) -wc.request('GET', '/download/{0}'.format(rsp['FileName'])) -rsp = wc.getresponse() -csr = rsp.read() -if rsp.getheader('Content-Encoding', None) == 'gzip': - csr = gzip.GzipFile(fileobj=io.BytesIO(csr)).read() -print(csr) + ] + + wc = c.oem.wc + rsp, status = wc.grab_json_response_with_status('/api/function', {'Sec_GenKeyAndCSR': ','.join(params)}) + rsp, status = wc.grab_json_response_with_status('/api/dataset', {'CSR_Format': '1'}) + rsp, status = wc.grab_json_response_with_status('/api/function', {'Sec_DownloadCSRANDCert': '0,4,0'}) + wc.request('GET', '/download/{0}'.format(rsp['FileName'])) + rsp = wc.getresponse() + csr = rsp.read() + if rsp.getheader('Content-Encoding', None) == 'gzip': + csr = gzip.GzipFile(fileobj=io.BytesIO(csr)).read() + print(csr) diff --git a/misc/installcert.py b/misc/installcert.py index 9654bf54..2d53e800 100644 --- a/misc/installcert.py +++ b/misc/installcert.py @@ -8,8 +8,23 @@ ap.add_argument('xcc', help='XCC address') ap.add_argument('cert', help='Certificate in PEM format') args = ap.parse_args() +cert = open(args.cert, 'r').read() c = cmd.Command(args.xcc, os.environ['XCCUSER'], os.environ['XCCPASS'], verifycallback=lambda x: True) +overview = c._do_web_request('/redfish/v1/') +cs = overview.get('CertificateService', {}).get('@odata.id', None) +if cs: + csinfo = c._do_web_request(cs) + gcsr = csinfo.get('Actions', {}).get('#CertificateService.ReplaceCertificate', {}).get('target', None) + if gcsr: + repcertargs = { + 'CertificateUri': { '@odata.id': '/redfish/v1/Managers/1/NetworkProtocol/HTTPS/Certificates/1' }, + 'CertificateType': 'PEM', + 'CertificateString': cert } + print(repr(c._do_web_request(gcsr, repcertargs))) + sys.exit(0) + + #CertificateService.ReplaceCertificate wc = c.oem.wc cert = open(args.cert, 'rb').read() res = wc.grab_json_response_with_status('/api/function', {'Sec_ImportCert': '0,1,0,0,,{0}'.format(cert)}) diff --git a/misc/swraid b/misc/swraid index 3e234dc8..836f1fb1 100644 --- a/misc/swraid +++ b/misc/swraid @@ -1,6 +1,6 @@ DEVICES="/dev/sda /dev/sdb" RAIDLEVEL=1 -mdadm --detail /dev/md*|grep 'Version : 1.0' >& /dev/null && exit 0 +mdadm --detail /dev/md*|grep 'Version : 1.0' >& /dev/null || ( lvm vgchange -a n mdadm -S -s NUMDEVS=$(for dev in $DEVICES; do @@ -14,5 +14,6 @@ mdadm -C /dev/md/raid $DEVICES -n $NUMDEVS -e 1.0 -l $RAIDLEVEL # shut and restart array to prime things for anaconda mdadm -S -s mdadm --assemble --scan +) readlink /dev/md/raid|sed -e 's/.*\///' > /tmp/installdisk