2
0
mirror of https://github.com/xcat2/confluent.git synced 2025-07-09 06:11:40 +00:00

Implement a number of py3 compatible adjustments

This commit is contained in:
Jarrod Johnson
2019-10-02 08:58:39 -04:00
parent 147d59cba7
commit 90e546bcac
12 changed files with 71 additions and 31 deletions

View File

@ -82,7 +82,7 @@ elif args[0] == 'dump':
"or -s to do encrypted backup that requires keys.json from "
"another backup to restore.")
sys.exit(1)
os.umask(077)
os.umask(0o77)
main._initsecurity(conf.get_config())
if not os.path.exists(dumpdir):
os.makedirs(dumpdir)

View File

@ -16,7 +16,10 @@
# This defines config variable to store the global configuration for confluent
import ConfigParser
try:
import ConfigParser
except ModuleNotFoundError:
import configparser as ConfigParser
import os
_config = None

View File

@ -46,7 +46,10 @@ import Cryptodome.Protocol.KDF as KDF
from Cryptodome.Cipher import AES
from Cryptodome.Hash import HMAC
from Cryptodome.Hash import SHA256
import anydbm as dbm
try:
import anydbm as dbm
except ModuleNotFoundError:
import dbm
import ast
import base64
import confluent.config.attributes as allattributes
@ -57,7 +60,10 @@ import confluent.util
import confluent.netutil as netutil
import confluent.exceptions as exc
import copy
import cPickle
try:
import cPickle
except ModuleNotFoundError:
import pickle as cPickle
import errno
import eventlet
import eventlet.event as event
@ -74,6 +80,10 @@ import struct
import sys
import threading
import traceback
try:
unicode
except NameError:
unicode = str
_masterkey = None
@ -478,7 +488,7 @@ def _load_dict_from_dbm(dpath, tdb):
currdict[elem] = {}
currdict = currdict[elem]
try:
for tk in dbe:
for tk in dbe.keys():
currdict[tk] = cPickle.loads(dbe[tk])
except AttributeError:
tk = dbe.firstkey()
@ -1145,9 +1155,9 @@ class ConfigManager(object):
Returns an identifier that can be used to unsubscribe from these
notifications using remove_watcher
"""
notifierid = random.randint(0, sys.maxint)
notifierid = random.randint(0, sys.maxsize)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxint)
notifierid = random.randint(0, sys.maxsize)
self._notifierids[notifierid] = {'attriblist': []}
if self.tenant not in self._attribwatchers:
self._attribwatchers[self.tenant] = {}
@ -1186,9 +1196,9 @@ class ConfigManager(object):
# use in case of cancellation.
# I anticipate no more than a handful of watchers of this sort, so
# this loop should not have to iterate too many times
notifierid = random.randint(0, sys.maxint)
notifierid = random.randint(0, sys.maxsize)
while notifierid in self._notifierids:
notifierid = random.randint(0, sys.maxint)
notifierid = random.randint(0, sys.maxsize)
# going to track that this is a nodecollection type watcher,
# but there is no additional data associated.
self._notifierids[notifierid] = set(['nodecollection'])
@ -1665,6 +1675,8 @@ class ConfigManager(object):
node, group))
for group in attribmap:
group = group.encode('utf-8')
if not isinstance(group, str):
group = group.decode('utf-8')
if group not in self._cfgstore['nodegroups']:
self._cfgstore['nodegroups'][group] = {'nodes': set()}
cfgobj = self._cfgstore['nodegroups'][group]
@ -1836,6 +1848,8 @@ class ConfigManager(object):
# framework to trigger on
changeset[node] = {'_nodedeleted': 1}
node = node.encode('utf-8')
if not isinstance(node, str):
node = node.decode('utf-8')
if node in self._cfgstore['nodes']:
self._sync_groups_to_node(node=node, groups=[],
changeset=changeset)
@ -2012,6 +2026,8 @@ class ConfigManager(object):
# this mitigates risk of arguments being partially applied
for node in attribmap:
node = node.encode('utf-8')
if not isinstance(group, str):
node = node.decode('utf-8')
if node == '':
raise ValueError('"{0}" is not a valid node name'.format(node))
if autocreate:
@ -2068,6 +2084,8 @@ class ConfigManager(object):
attribmap[node][attrname] = attrval
for node in attribmap:
node = node.encode('utf-8')
if not isinstance(node, str):
node = node.decode('utf-8')
exprmgr = None
if node not in self._cfgstore['nodes']:
newnodes.append(node)
@ -2248,7 +2266,7 @@ class ConfigManager(object):
_cfgstore = {}
rootpath = cls._cfgdir
try:
with open(os.path.join(rootpath, 'transactioncount'), 'r') as f:
with open(os.path.join(rootpath, 'transactioncount'), 'rb') as f:
txbytes = f.read()
if len(txbytes) == 8:
_txcount = struct.unpack('!Q', txbytes)[0]
@ -2306,7 +2324,7 @@ class ConfigManager(object):
if statelessmode:
return
_mkpath(cls._cfgdir)
with open(os.path.join(cls._cfgdir, 'transactioncount'), 'w') as f:
with open(os.path.join(cls._cfgdir, 'transactioncount'), 'wb') as f:
f.write(struct.pack('!Q', _txcount))
if (fullsync or 'dirtyglobals' in _cfgstore and
'globals' in _cfgstore):
@ -2417,7 +2435,9 @@ def _restore_keys(jsond, password, newpassword=None, sync=True):
else:
keydata = json.loads(jsond)
cryptkey = _parse_key(keydata['cryptkey'], password)
integritykey = _parse_key(keydata['integritykey'], password)
integritykey = None
if 'integritykey' in keydata:
integritykey = _parse_key(keydata['integritykey'], password)
conf.init_config()
cfg = conf.get_config()
if cfg.has_option('security', 'externalcfgkey'):
@ -2426,8 +2446,9 @@ def _restore_keys(jsond, password, newpassword=None, sync=True):
newpassword = keyfile.read()
set_global('master_privacy_key', _format_key(cryptkey,
password=newpassword), sync)
set_global('master_integrity_key', _format_key(integritykey,
password=newpassword), sync)
if integritykey:
set_global('master_integrity_key', _format_key(integritykey,
password=newpassword), sync)
_masterkey = cryptkey
_masterintegritykey = integritykey
if sync:

View File

@ -106,6 +106,8 @@ def load_plugins():
for plugin in os.listdir(plugindir):
if plugin.startswith('.'):
continue
if '__pycache__' in plugin:
continue
(plugin, plugtype) = os.path.splitext(plugin)
if plugtype == '.sh':
pluginmap[plugin] = shellmodule.Plugin(

View File

@ -98,7 +98,7 @@ def snoop(handler, protocol=None):
netaddr = ':'.join(['{0:02x}'.format(x) for x in netaddr])
optidx = 0
try:
optidx = rq.index('\x63\x82\x53\x63') + 4
optidx = rq.index(b'\x63\x82\x53\x63') + 4
except ValueError:
continue
uuid, arch = find_info_in_options(rq, optidx)

View File

@ -249,18 +249,18 @@ def _parse_attrlist(attrstr):
attribs = {}
while attrstr:
if attrstr[0] == '(':
if ')' not in attrstr:
if b')' not in attrstr:
attribs['INCOMPLETE'] = True
return attribs
currattr = attrstr[1:attrstr.index(')')]
if '=' not in currattr: # Not allegedly kosher, but still..
currattr = attrstr[1:attrstr.index(b')')]
if b'=' not in currattr: # Not allegedly kosher, but still..
currattr = currattr.decode('utf-8')
attribs[currattr] = None
else:
attrname, attrval = currattr.split('=', 1)
attrname = attrname.decode('utf-8')
attribs[attrname] = []
for val in attrval.split(','):
for val in attrval.split(b','):
try:
val = val.decode('utf-8')
except UnicodeDecodeError:
@ -284,9 +284,9 @@ def _parse_attrlist(attrstr):
).lower()
attribs[attrname].append(val)
attrstr = attrstr[attrstr.index(')'):]
elif attrstr[0] == ',':
elif attrstr[0] == b','[0]:
attrstr = attrstr[1:]
elif ',' in attrstr:
elif b',' in attrstr:
currattr = attrstr[:attrstr.index(',')]
attribs[currattr] = None
attrstr = attrstr[attrstr.index(','):]

View File

@ -17,7 +17,10 @@
# This SCGI server provides a http wrap to confluent api
# It additionally manages httprequest console sessions
import base64
import Cookie
try:
import Cookie
except ModuleNotFoundError:
import http.cookies as Cookie
import confluent.auth as auth
import confluent.config.attributes as attribs
import confluent.consoleserver as consoleserver
@ -39,7 +42,10 @@ import socket
import sys
import traceback
import time
import urlparse
try:
import urlparse
except ModuleNotFoundError:
import urllib.parse as urlparse
import eventlet.wsgi
#scgi = eventlet.import_patched('flup.server.scgi')
tlvdata = confluent.tlvdata

View File

@ -76,6 +76,10 @@ import stat
import struct
import time
import traceback
try:
unicode
except NameError:
unicode = str
daemonized = False
logfull = False
@ -176,6 +180,8 @@ class BaseRotatingHandler(object):
self.textfile = open(self.textpath, mode='ab')
if self.binfile is None:
self.binfile = open(self.binpath, mode='ab')
if not isinstance(textrecord, bytes):
textrecord = textrecord.encode('utf-8')
self.textfile.write(textrecord)
self.binfile.write(binrecord)
self.textfile.flush()

View File

@ -43,9 +43,11 @@ except ImportError:
import confluent.discovery.core as disco
import eventlet
dbgif = False
if map(int, (eventlet.__version__.split('.'))) > [0, 18]:
try:
import eventlet.backdoor as backdoor
dbgif = True
except Exception:
pass
havefcntl = True
try:
import fcntl

View File

@ -26,7 +26,7 @@ neightime = 0
import re
_validmac = re.compile('..:..:..:..:..:..')
_validmac = re.compile(b'..:..:..:..:..:..')
def update_neigh():
@ -39,11 +39,11 @@ def update_neigh():
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(neighdata, err) = ipn.communicate()
for entry in neighdata.split('\n'):
entry = entry.split(' ')
for entry in neighdata.split(b'\n'):
entry = entry.split(b' ')
if len(entry) < 5 or not entry[4]:
continue
if entry[0] in ('192.168.0.100', '192.168.70.100', '192.168.70.125'):
if entry[0] in (b'192.168.0.100', b'192.168.70.100', b'192.168.70.125'):
# Note that these addresses are common static ip addresses
# that are hopelessly ambiguous if there are many
# so ignore such entries and move on

View File

@ -89,7 +89,7 @@ class ExecConsole(conapi.Console):
stdin=slave, stdout=slave,
stderr=subprocess.PIPE, close_fds=True)
except OSError:
print "Unable to execute " + self.executable + " (permissions?)"
print("Unable to execute " + self.executable + " (permissions?)")
self.close()
return
os.close(slave)
@ -104,7 +104,7 @@ class ExecConsole(conapi.Console):
try:
os.close(self._master)
except OSError:
print "Error closing master of child process, ignoring"
print("Error closing master of child process, ignoring")
if self.subproc is None or self.subproc.poll() is not None:
return
self.subproc.terminate()

View File

@ -412,7 +412,7 @@ def _unixdomainhandler():
except OSError: # if file does not exist, no big deal
pass
if not os.path.isdir("/var/run/confluent"):
os.makedirs('/var/run/confluent', 0755)
os.makedirs('/var/run/confluent', 0o755)
unixsocket.bind("/var/run/confluent/api.sock")
os.chmod("/var/run/confluent/api.sock",
stat.S_IWOTH | stat.S_IROTH | stat.S_IWGRP |