diff options
author | Celia Wang <celia.wang@canonical.com> | 2020-05-21 16:54:00 +1000 |
---|---|---|
committer | Celia Wang <celia.wang@canonical.com> | 2020-05-21 16:54:00 +1000 |
commit | d0f92fe9721d2d7bcc372298d0d594e3bb2e353a (patch) | |
tree | 56f48f9ddacd4221b45a31c39d477025a8865eee | |
parent | ed31d71d1f45df6027bdcf8cd7ea760b9eced5d5 (diff) |
Sync charmhelpers for release 20.05
- Fix the sync command in Makefile - contrib/python is moved to fetch/python
28 files changed, 2135 insertions, 227 deletions
@@ -43,7 +43,7 @@ functional: sync: @mkdir -p bin - @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py > bin/charm_helpers_sync.py + @curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml publish: lint unit_test diff --git a/charm-helpers-sync.yaml b/charm-helpers-sync.yaml index ed5ea5f..64273d7 100644 --- a/charm-helpers-sync.yaml +++ b/charm-helpers-sync.yaml @@ -1,10 +1,9 @@ -branch: lp:charm-helpers +repo: https://github.com/juju/charm-helpers destination: charmhelpers include: - core - fetch - contrib.hahelpers.cluster - - contrib.python.packages - payload.execd - contrib.charmsupport - osplatform diff --git a/charmhelpers/__init__.py b/charmhelpers/__init__.py index 4886788..61ef907 100644 --- a/charmhelpers/__init__.py +++ b/charmhelpers/__init__.py @@ -14,23 +14,84 @@ # Bootstrap charm-helpers, installing its dependencies if necessary using # only standard libraries. +from __future__ import print_function +from __future__ import absolute_import + +import functools +import inspect import subprocess import sys try: - import six # flake8: noqa + import six # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-six']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-six']) - import six # flake8: noqa + import six # NOQA:F401 try: - import yaml # flake8: noqa + import yaml # NOQA:F401 except ImportError: if sys.version_info.major == 2: subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml']) else: subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml']) - import yaml # flake8: noqa + import yaml # NOQA:F401 + + +# Holds a list of mapping of mangled function names that have been deprecated +# using the @deprecate decorator below. This is so that the warning is only +# printed once for each usage of the function. +__deprecated_functions = {} + + +def deprecate(warning, date=None, log=None): + """Add a deprecation warning the first time the function is used. + The date, which is a string in semi-ISO8660 format indicate the year-month + that the function is officially going to be removed. + + usage: + + @deprecate('use core/fetch/add_source() instead', '2017-04') + def contributed_add_source_thing(...): + ... + + And it then prints to the log ONCE that the function is deprecated. + The reason for passing the logging function (log) is so that hookenv.log + can be used for a charm if needed. + + :param warning: String to indicat where it has moved ot. + :param date: optional sting, in YYYY-MM format to indicate when the + function will definitely (probably) be removed. + :param log: The log function to call to log. If not, logs to stdout + """ + def wrap(f): + + @functools.wraps(f) + def wrapped_f(*args, **kwargs): + try: + module = inspect.getmodule(f) + file = inspect.getsourcefile(f) + lines = inspect.getsourcelines(f) + f_name = "{}-{}-{}..{}-{}".format( + module.__name__, file, lines[0], lines[-1], f.__name__) + except (IOError, TypeError): + # assume it was local, so just use the name of the function + f_name = f.__name__ + if f_name not in __deprecated_functions: + __deprecated_functions[f_name] = True + s = "DEPRECATION WARNING: Function {} is being removed".format( + f.__name__) + if date: + s = "{} on/around {}".format(s, date) + if warning: + s = "{} : {}".format(s, warning) + if log: + log(s) + else: + print(s) + return f(*args, **kwargs) + return wrapped_f + return wrap diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index 424b7f7..d775861 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -30,8 +30,10 @@ import yaml from charmhelpers.core.hookenv import ( config, + hook_name, local_unit, log, + relation_get, relation_ids, relation_set, relations_of_type, @@ -125,7 +127,7 @@ class CheckException(Exception): class Check(object): - shortname_re = '[A-Za-z0-9-_]+$' + shortname_re = '[A-Za-z0-9-_.@]+$' service_template = (""" #--------------------------------------------------- # This file is Juju managed @@ -259,11 +261,23 @@ class NRPE(object): relation = relation_ids('nrpe-external-master') if relation: log("Setting charm primary status {}".format(primary)) - for rid in relation_ids('nrpe-external-master'): + for rid in relation: relation_set(relation_id=rid, relation_settings={'primary': self.primary}) + self.remove_check_queue = set() def add_check(self, *args, **kwargs): + shortname = None + if kwargs.get('shortname') is None: + if len(args) > 0: + shortname = args[0] + else: + shortname = kwargs['shortname'] + self.checks.append(Check(*args, **kwargs)) + try: + self.remove_check_queue.remove(shortname) + except KeyError: + pass def remove_check(self, *args, **kwargs): if kwargs.get('shortname') is None: @@ -280,12 +294,13 @@ class NRPE(object): check = Check(*args, **kwargs) check.remove(self.hostname) + self.remove_check_queue.add(kwargs['shortname']) def write(self): try: nagios_uid = pwd.getpwnam('nagios').pw_uid nagios_gid = grp.getgrnam('nagios').gr_gid - except: + except Exception: log("Nagios user not set up, nrpe checks not updated") return @@ -302,12 +317,34 @@ class NRPE(object): "command": nrpecheck.command, } - service('restart', 'nagios-nrpe-server') + # update-status hooks are configured to firing every 5 minutes by + # default. When nagios-nrpe-server is restarted, the nagios server + # reports checks failing causing unnecessary alerts. Let's not restart + # on update-status hooks. + if not hook_name() == 'update-status': + service('restart', 'nagios-nrpe-server') monitor_ids = relation_ids("local-monitors") + \ relation_ids("nrpe-external-master") for rid in monitor_ids: - relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + reldata = relation_get(unit=local_unit(), rid=rid) + if 'monitors' in reldata: + # update the existing set of monitors with the new data + old_monitors = yaml.safe_load(reldata['monitors']) + old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe'] + # remove keys that are in the remove_check_queue + old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items() + if k not in self.remove_check_queue} + # update/add nrpe_monitors + old_nrpe_monitors.update(nrpe_monitors) + old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors + # write back to the relation + relation_set(relation_id=rid, monitors=yaml.dump(old_monitors)) + else: + # write a brand new set of monitors, as no existing ones. + relation_set(relation_id=rid, monitors=yaml.dump(monitors)) + + self.remove_check_queue.clear() def get_nagios_hostcontext(relation_name='nrpe-external-master'): @@ -404,16 +441,26 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True): os.chmod(checkpath, 0o644) -def copy_nrpe_checks(): +def copy_nrpe_checks(nrpe_files_dir=None): """ Copy the nrpe checks into place """ NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' - nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks', - 'charmhelpers', 'contrib', 'openstack', - 'files') - + if nrpe_files_dir is None: + # determine if "charmhelpers" is in CHARMDIR or CHARMDIR/hooks + for segment in ['.', 'hooks']: + nrpe_files_dir = os.path.abspath(os.path.join( + os.getenv('CHARM_DIR'), + segment, + 'charmhelpers', + 'contrib', + 'openstack', + 'files')) + if os.path.isdir(nrpe_files_dir): + break + else: + raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): @@ -437,3 +484,17 @@ def add_haproxy_checks(nrpe, unit_name): shortname='haproxy_queue', description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + + +def remove_deprecated_check(nrpe, deprecated_services): + """ + Remove checks fro deprecated services in list + + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list + """ + for dep_svc in deprecated_services: + log('Deprecated service: {}'.format(dep_svc)) + nrpe.remove_check(shortname=dep_svc) diff --git a/charmhelpers/contrib/hahelpers/cluster.py b/charmhelpers/contrib/hahelpers/cluster.py index e02350e..ba34fba 100644 --- a/charmhelpers/contrib/hahelpers/cluster.py +++ b/charmhelpers/contrib/hahelpers/cluster.py @@ -25,8 +25,10 @@ Helpers for clustering and determining "cluster leadership" and other clustering-related helpers. """ +import functools import subprocess import os +import time from socket import gethostname as get_unit_hostname @@ -45,6 +47,9 @@ from charmhelpers.core.hookenv import ( is_leader as juju_is_leader, status_set, ) +from charmhelpers.core.host import ( + modulo_distribution, +) from charmhelpers.core.decorators import ( retry_on_exception, ) @@ -219,6 +224,11 @@ def https(): return True if config_get('ssl_cert') and config_get('ssl_key'): return True + for r_id in relation_ids('certificates'): + for unit in relation_list(r_id): + ca = relation_get('ca', rid=r_id, unit=unit) + if ca: + return True for r_id in relation_ids('identity-service'): for unit in relation_list(r_id): # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN @@ -272,6 +282,10 @@ def determine_apache_port(public_port, singlenode_mode=False): return public_port - (i * 10) +determine_apache_port_single = functools.partial( + determine_apache_port, singlenode_mode=True) + + def get_hacluster_config(exclude_keys=None): ''' Obtains all relevant configuration from charm configuration required @@ -361,3 +375,77 @@ def canonical_url(configs, vip_setting='vip'): else: addr = unit_get('private-address') return '%s://%s' % (scheme, addr) + + +def distributed_wait(modulo=None, wait=None, operation_name='operation'): + ''' Distribute operations by waiting based on modulo_distribution + + If modulo and or wait are not set, check config_get for those values. + If config values are not set, default to modulo=3 and wait=30. + + :param modulo: int The modulo number creates the group distribution + :param wait: int The constant time wait value + :param operation_name: string Operation name for status message + i.e. 'restart' + :side effect: Calls config_get() + :side effect: Calls log() + :side effect: Calls status_set() + :side effect: Calls time.sleep() + ''' + if modulo is None: + modulo = config_get('modulo-nodes') or 3 + if wait is None: + wait = config_get('known-wait') or 30 + if juju_is_leader(): + # The leader should never wait + calculated_wait = 0 + else: + # non_zero_wait=True guarantees the non-leader who gets modulo 0 + # will still wait + calculated_wait = modulo_distribution(modulo=modulo, wait=wait, + non_zero_wait=True) + msg = "Waiting {} seconds for {} ...".format(calculated_wait, + operation_name) + log(msg, DEBUG) + status_set('maintenance', msg) + time.sleep(calculated_wait) + + +def get_managed_services_and_ports(services, external_ports, + external_services=None, + port_conv_f=determine_apache_port_single): + """Get the services and ports managed by this charm. + + Return only the services and corresponding ports that are managed by this + charm. This excludes haproxy when there is a relation with hacluster. This + is because this charm passes responsability for stopping and starting + haproxy to hacluster. + + Similarly, if a relation with hacluster exists then the ports returned by + this method correspond to those managed by the apache server rather than + haproxy. + + :param services: List of services. + :type services: List[str] + :param external_ports: List of ports managed by external services. + :type external_ports: List[int] + :param external_services: List of services to be removed if ha relation is + present. + :type external_services: List[str] + :param port_conv_f: Function to apply to ports to calculate the ports + managed by services controlled by this charm. + :type port_convert_func: f() + :returns: A tuple containing a list of services first followed by a list of + ports. + :rtype: Tuple[List[str], List[int]] + """ + if external_services is None: + external_services = ['haproxy'] + if relation_ids('ha'): + for svc in external_services: + try: + services.remove(svc) + except ValueError: + pass + external_ports = [port_conv_f(p) for p in external_ports] + return services, external_ports diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index e44e22b..d7c37c1 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -21,29 +21,50 @@ from __future__ import print_function import copy from distutils.version import LooseVersion +from enum import Enum from functools import wraps +from collections import namedtuple import glob import os import json import yaml +import re import subprocess import sys import errno import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict else: from collections import UserDict + CRITICAL = "CRITICAL" ERROR = "ERROR" WARNING = "WARNING" INFO = "INFO" DEBUG = "DEBUG" +TRACE = "TRACE" MARKER = object() +SH_MAX_ARG = 131071 + + +RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. ' + 'This may not be compatible with software you are ' + 'running in your shell.') + + +class WORKLOAD_STATES(Enum): + ACTIVE = 'active' + BLOCKED = 'blocked' + MAINTENANCE = 'maintenance' + WAITING = 'waiting' + cache = {} @@ -64,7 +85,7 @@ def cached(func): @wraps(func) def wrapper(*args, **kwargs): global cache - key = str((func, args, kwargs)) + key = json.dumps((func, args, kwargs), sort_keys=True, default=str) try: return cache[key] except KeyError: @@ -94,7 +115,7 @@ def log(message, level=None): command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) - command += [message] + command += [message[:SH_MAX_ARG]] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: @@ -109,6 +130,24 @@ def log(message, level=None): raise +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" @@ -197,11 +236,58 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -def service_name(): - """The name service group this unit belongs to""" +def application_name(): + """ + The name of the deployed application this unit belongs to. + """ return local_unit().split('/')[0] +def service_name(): + """ + .. deprecated:: 0.19.1 + Alias for :func:`application_name`. + """ + return application_name() + + +def model_name(): + """ + Name of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_NAME'] + + +def model_uuid(): + """ + UUID of the model that this unit is deployed in. + """ + return os.environ['JUJU_MODEL_UUID'] + + +def principal_unit(): + """Returns the principal unit of this unit, otherwise None""" + # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT + principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None) + # If it's empty, then this unit is the principal + if principal_unit == '': + return os.environ['JUJU_UNIT_NAME'] + elif principal_unit is not None: + return principal_unit + # For Juju 2.1 and below, let's try work out the principle unit by + # the various charms' metadata.yaml. + for reltype in relation_types(): + for rid in relation_ids(reltype): + for unit in related_units(rid): + md = _metadata_unit(unit) + if not md: + continue + subordinate = md.pop('subordinate', None) + if not subordinate: + return unit + return None + + @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" @@ -263,7 +349,7 @@ class Config(dict): self.implicit_save = True self._prev_dict = None self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) - if os.path.exists(self.path): + if os.path.exists(self.path) and os.stat(self.path).st_size: self.load_previous() atexit(self._implicit_save) @@ -283,7 +369,11 @@ class Config(dict): """ self.path = path or self.path with open(self.path) as f: - self._prev_dict = json.load(f) + try: + self._prev_dict = json.load(f) + except ValueError as e: + log('Unable to parse previous config data - {}'.format(str(e)), + level=ERROR) for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -319,6 +409,7 @@ class Config(dict): """ with open(self.path, 'w') as f: + os.fchmod(f.fileno(), 0o600) json.dump(self, f) def _implicit_save(self): @@ -326,22 +417,40 @@ class Config(dict): self.save() -@cached +_cache_config = None + + def config(scope=None): - """Juju charm configuration""" - config_cmd_line = ['config-get'] - if scope is not None: - config_cmd_line.append(scope) - else: - config_cmd_line.append('--all') - config_cmd_line.append('--format=json') + """ + Get the juju charm configuration (scope==None) or individual key, + (scope=str). The returned value is a Python data structure loaded as + JSON from the Juju config command. + + :param scope: If set, return the value for the specified key. + :type scope: Optional[str] + :returns: Either the whole config as a Config, or a key from it. + :rtype: Any + """ + global _cache_config + config_cmd_line = ['config-get', '--all', '--format=json'] try: - config_data = json.loads( - subprocess.check_output(config_cmd_line).decode('UTF-8')) + # JSON Decode Exception for Python3.5+ + exc_json = json.decoder.JSONDecodeError + except AttributeError: + # JSON Decode Exception for Python2.7 through Python3.4 + exc_json = ValueError + try: + if _cache_config is None: + config_data = json.loads( + subprocess.check_output(config_cmd_line).decode('UTF-8')) + _cache_config = Config(config_data) if scope is not None: - return config_data - return Config(config_data) - except ValueError: + return _cache_config.get(scope) + return _cache_config + except (exc_json, UnicodeDecodeError) as e: + log('Unable to parse output from config-get: config_cmd_line="{}" ' + 'message="{}"' + .format(config_cmd_line, str(e)), level=ERROR) return None @@ -435,6 +544,67 @@ def related_units(relid=None): subprocess.check_output(units_cmd_line).decode('UTF-8')) or [] +def expected_peer_units(): + """Get a generator for units we expect to join peer relation based on + goal-state. + + The local unit is excluded from the result to make it easy to gauge + completion of all peers joining the relation with existing hook tools. + + Example usage: + log('peer {} of {} joined peer relation' + .format(len(related_units()), + len(list(expected_peer_units())))) + + This function will raise NotImplementedError if used with juju versions + without goal-state support. + + :returns: iterator + :rtype: types.GeneratorType + :raises: NotImplementedError + """ + if not has_juju_version("2.4.0"): + # goal-state first appeared in 2.4.0. + raise NotImplementedError("goal-state") + _goal_state = goal_state() + return (key for key in _goal_state['units'] + if '/' in key and key != local_unit()) + + +def expected_related_units(reltype=None): + """Get a generator for units we expect to join relation based on + goal-state. + + Note that you can not use this function for the peer relation, take a look + at expected_peer_units() for that. + + This function will raise KeyError if you request information for a + relation type for which juju goal-state does not have information. It will + raise NotImplementedError if used with juju versions without goal-state + support. + + Example usage: + log('participant {} of {} joined relation {}' + .format(len(related_units()), + len(list(expected_related_units())), + relation_type())) + + :param reltype: Relation type to list data for, default is to list data for + the realtion type we are currently executing a hook for. + :type reltype: str + :returns: iterator + :rtype: types.GeneratorType + :raises: KeyError, NotImplementedError + """ + if not has_juju_version("2.4.4"): + # goal-state existed in 2.4.0, but did not list individual units to + # join a relation in 2.4.1 through 2.4.3. (LP: #1794739) + raise NotImplementedError("goal-state relation unit count") + reltype = reltype or relation_type() + _goal_state = goal_state() + return (key for key in _goal_state['relations'][reltype] if '/' in key) + + @cached def relation_for_unit(unit=None, rid=None): """Get the json represenation of a unit's relation""" @@ -478,6 +648,24 @@ def metadata(): return yaml.safe_load(md) +def _metadata_unit(unit): + """Given the name of a unit (e.g. apache2/0), get the unit charm's + metadata.yaml. Very similar to metadata() but allows us to inspect + other units. Unit needs to be co-located, such as a subordinate or + principal/primary. + + :returns: metadata.yaml as a python object. + + """ + basedir = os.sep.join(charm_dir().split(os.sep)[:-2]) + unitdir = 'unit-{}'.format(unit.replace(os.sep, '-')) + joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml') + if not os.path.exists(joineddir): + return None + with open(joineddir) as md: + return yaml.safe_load(md) + + @cached def relation_types(): """Get a list of relation types supported by this charm""" @@ -602,18 +790,31 @@ def is_relation_made(relation, keys='private-address'): return False +def _port_op(op_name, port, protocol="TCP"): + """Open or close a service network port""" + _args = [op_name] + icmp = protocol.upper() == "ICMP" + if icmp: + _args.append(protocol) + else: + _args.append('{}/{}'.format(port, protocol)) + try: + subprocess.check_call(_args) + except subprocess.CalledProcessError: + # Older Juju pre 2.3 doesn't support ICMP + # so treat it as a no-op if it fails. + if not icmp: + raise + + def open_port(port, protocol="TCP"): """Open a service network port""" - _args = ['open-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('open-port', port, protocol) def close_port(port, protocol="TCP"): """Close a service network port""" - _args = ['close-port'] - _args.append('{}/{}'.format(port, protocol)) - subprocess.check_call(_args) + _port_op('close-port', port, protocol) def open_ports(start, end, protocol="TCP"): @@ -630,6 +831,17 @@ def close_ports(start, end, protocol="TCP"): subprocess.check_call(_args) +def opened_ports(): + """Get the opened ports + + *Note that this will only show ports opened in a previous hook* + + :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']`` + """ + _args = ['opened-ports', '--format=json'] + return json.loads(subprocess.check_output(_args).decode('UTF-8')) + + @cached def unit_get(attribute): """Get the unit ID for the remote unit""" @@ -751,14 +963,35 @@ class Hooks(object): return wrapper +class NoNetworkBinding(Exception): + pass + + def charm_dir(): """Return the root directory of the current charm""" + d = os.environ.get('JUJU_CHARM_DIR') + if d is not None: + return d return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -767,52 +1000,130 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] + + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) def action_fail(message): - """Sets the action status to failed and sets the error message. + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. - The results set by action_set are preserved.""" + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') -def status_set(workload_state, message): +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + +def status_set(workload_state, message, application=False): """Set the workload state with a message Use status-set to set the workload state with a message which is visible to the user via juju status. If the status-set command is not found then - assume this is juju < 1.23 and juju-log the message unstead. + assume this is juju < 1.23 and juju-log the message instead. - workload_state -- valid juju workload state. - message -- status update message + workload_state -- valid juju workload state. str or WORKLOAD_STATES + message -- status update message + application -- Whether this is an application state set """ - valid_states = ['maintenance', 'blocked', 'waiting', 'active'] - if workload_state not in valid_states: - raise ValueError( - '{!r} is not a valid workload state'.format(workload_state) - ) - cmd = ['status-set', workload_state, message] + bad_state_msg = '{!r} is not a valid workload state' + + if isinstance(workload_state, str): + try: + # Convert string to enum. + workload_state = WORKLOAD_STATES[workload_state.upper()] + except KeyError: + raise ValueError(bad_state_msg.format(workload_state)) + + if workload_state not in WORKLOAD_STATES: + raise ValueError(bad_state_msg.format(workload_state)) + + cmd = ['status-set'] + if application: + cmd.append('--application') + cmd.extend([workload_state.value, message]) try: ret = subprocess.call(cmd) if ret == 0: @@ -820,7 +1131,7 @@ def status_set(workload_state, message): except OSError as e: if e.errno != errno.ENOENT: raise - log_message = 'status-set failed: {} {}'.format(workload_state, + log_message = 'status-set failed: {} {}'.format(workload_state.value, message) log(log_message, level='INFO') @@ -874,6 +1185,14 @@ def application_version_set(version): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) +@cached +def goal_state(): + """Juju goal state values""" + cmd = ['goal-state', '--format=json'] + return json.loads(subprocess.check_output(cmd).decode('UTF-8')) + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) def is_leader(): """Does the current unit hold the juju leadership @@ -967,7 +1286,6 @@ def juju_version(): universal_newlines=True).strip() -@cached def has_juju_version(minimum_version): """Return True if the Juju version is at least the provided version""" return LooseVersion(juju_version()) >= LooseVersion(minimum_version) @@ -1027,6 +1345,8 @@ def _run_atexit(): @translate_exc(from_exc=OSError, to_exc=NotImplementedError) def network_get_primary_address(binding): ''' + Deprecated since Juju 2.3; use network_get() + Retrieve the primary network address for a named binding :param binding: string. The name of a relation of extra-binding @@ -1034,7 +1354,41 @@ def network_get_primary_address(binding): :raise: NotImplementedError if run on Juju < 2.0 ''' cmd = ['network-get', '--primary-address', binding] - return subprocess.check_output(cmd).decode('UTF-8').strip() + try: + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + except CalledProcessError as e: + if 'no network config found for binding' in e.output.decode('UTF-8'): + raise NoNetworkBinding("No network binding for {}" + .format(binding)) + else: + raise + return response + + +def network_get(endpoint, relation_id=None): + """ + Retrieve the network details for a relation endpoint + + :param endpoint: string. The name of a relation endpoint + :param relation_id: int. The ID of the relation for the current context. + :return: dict. The loaded YAML output of the network-get query. + :raise: NotImplementedError if request not supported by the Juju version. + """ + if not has_juju_version('2.2'): + raise NotImplementedError(juju_version()) # earlier versions require --primary-address + if relation_id and not has_juju_version('2.3'): + raise NotImplementedError # 2.3 added the -r option + + cmd = ['network-get', endpoint, '--format', 'yaml'] + if relation_id: + cmd.append('-r') + cmd.append(relation_id) + response = subprocess.check_output( + cmd, + stderr=subprocess.STDOUT).decode('UTF-8').strip() + return yaml.safe_load(response) def add_metric(*args, **kwargs): @@ -1066,3 +1420,192 @@ def meter_info(): """Get the meter status information, if running in the meter-status-changed hook.""" return os.environ.get('JUJU_METER_INFO') + + +def iter_units_for_relation_name(relation_name): + """Iterate through all units in a relation + + Generator that iterates through all the units in a relation and yields + a named tuple with rid and unit field names. + + Usage: + data = [(u.rid, u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param relation_name: string relation name + :yield: Named Tuple with rid and unit field names + """ + RelatedUnit = namedtuple('RelatedUnit', 'rid, unit') + for rid in relation_ids(relation_name): + for unit in related_units(rid): + yield RelatedUnit(rid, unit) + + +def ingress_address(rid=None, unit=None): + """ + Retrieve the ingress-address from a relation when available. + Otherwise, return the private-address. + + When used on the consuming side of the relation (unit is a remote + unit), the ingress-address is the IP address that this unit needs + to use to reach the provided service on the remote unit. + + When used on the providing side of the relation (unit == local_unit()), + the ingress-address is the IP address that is advertised to remote + units on this relation. Remote units need to use this address to + reach the local provided service on this unit. + + Note that charms may document some other method to use in + preference to the ingress_address(), such as an address provided + on a different relation attribute or a service discovery mechanism. + This allows charms to redirect inbound connections to their peers + or different applications such as load balancers. + + Usage: + addresses = [ingress_address(rid=u.rid, unit=u.unit) + for u in iter_units_for_relation_name(relation_name)] + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: string IP address + """ + settings = relation_get(rid=rid, unit=unit) + return (settings.get('ingress-address') or + settings.get('private-address')) + + +def egress_subnets(rid=None, unit=None): + """ + Retrieve the egress-subnets from a relation. + + This function is to be used on the providing side of the + relation, and provides the ranges of addresses that client + connections may come from. The result is uninteresting on + the consuming side of a relation (unit == local_unit()). + + Returns a stable list of subnets in CIDR format. + eg. ['192.168.1.0/24', '2001::F00F/128'] + + If egress-subnets is not available, falls back to using the published + ingress-address, or finally private-address. + + :param rid: string relation id + :param unit: string unit name + :side effect: calls relation_get + :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128'] + """ + def _to_range(addr): + if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None: + addr += '/32' + elif ':' in addr and '/' not in addr: # IPv6 + addr += '/128' + return addr + + settings = relation_get(rid=rid, unit=unit) + if 'egress-subnets' in settings: + return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()] + if 'ingress-address' in settings: + return [_to_range(settings['ingress-address'])] + if 'private-address' in settings: + return [_to_range(settings['private-address'])] + return [] # Should never happen + + +def unit_doomed(unit=None): + """Determines if the unit is being removed from the model + + Requires Juju 2.4.1. + + :param unit: string unit name, defaults to local_unit + :side effect: calls goal_state + :side effect: calls local_unit + :side effect: calls has_juju_version + :return: True if the unit is being removed, already gone, or never existed + """ + if not has_juju_version("2.4.1"): + # We cannot risk blindly returning False for 'we don't know', + # because that could cause data loss; if call sites don't + # need an accurate answer, they likely don't need this helper + # at all. + # goal-state existed in 2.4.0, but did not handle removals + # correctly until 2.4.1. + raise NotImplementedError("is_doomed") + if unit is None: + unit = local_unit() + gs = goal_state() + units = gs.get('units', {}) + if unit not in units: + return True + # I don't think 'dead' units ever show up in the goal-state, but + # check anyway in addition to 'dying'. + return units[unit]['status'] in ('dying', 'dead') + + +def env_proxy_settings(selected_settings=None): + """Get proxy settings from process environment variables. + + Get charm proxy settings from environment variables that correspond to + juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see + lp:1782236) and juju-ftp-proxy in a format suitable for passing to an + application that reacts to proxy settings passed as environment variables. + Some applications support lowercase or uppercase notation (e.g. curl), some + support only lowercase (e.g. wget), there are also subjectively rare cases + of only uppercase notation support. no_proxy CIDR and wildcard support also + varies between runtimes and applications as there is no enforced standard. + + Some applications may connect to multiple destinations and expose config + options that would affect only proxy settings for a specific destination + these should be handled in charms in an application-specific manner. + + :param selected_settings: format only a subset of possible settings + :type selected_settings: list + :rtype: Option(None, dict[str, str]) + """ + SUPPORTED_SETTINGS = { + 'http': 'HTTP_PROXY', + 'https': 'HTTPS_PROXY', + 'no_proxy': 'NO_PROXY', + 'ftp': 'FTP_PROXY' + } + if selected_settings is None: + selected_settings = SUPPORTED_SETTINGS + + selected_vars = [v for k, v in SUPPORTED_SETTINGS.items() + if k in selected_settings] + proxy_settings = {} + for var in selected_vars: + var_val = os.getenv(var) + if var_val: + proxy_settings[var] = var_val + proxy_settings[var.lower()] = var_val + # Now handle juju-prefixed environment variables. The legacy vs new + # environment variable usage is mutually exclusive + charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var)) + if charm_var_val: + proxy_settings[var] = charm_var_val + proxy_settings[var.lower()] = charm_var_val + if 'no_proxy' in proxy_settings: + if _contains_range(proxy_settings['no_proxy']): + log(RANGE_WARNING, level=WARNING) + return proxy_settings if proxy_settings else None + + +def _contains_range(addresses): + """Check for cidr or wildcard domain in a string. + + Given a string comprising a comma seperated list of ip addresses + and domain names, determine whether the string contains IP ranges + or wildcard domains. + + :param addresses: comma seperated list of domains and ip addresses. + :type addresses: str + """ + return ( + # Test for cidr (e.g. 10.20.20.0/24) + "/" in addresses or + # Test for wildcard domains (*.foo.com or .foo.com) + "*" in addresses or + addresses.startswith(".") or + ",." in addresses or + " ." in addresses) diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index b0043cb..b33ac90 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -34,21 +34,23 @@ import six from contextlib import contextmanager from collections import OrderedDict -from .hookenv import log +from .hookenv import log, INFO, DEBUG, local_unit, charm_name from .fstab import Fstab from charmhelpers.osplatform import get_platform __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.host_factory.ubuntu import ( + from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401 service_available, add_new_group, lsb_release, cmp_pkgrevno, CompareHostReleases, + get_distrib_codename, + arch ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.host_factory.centos import ( + from charmhelpers.core.host_factory.centos import ( # NOQA:F401 service_available, add_new_group, lsb_release, @@ -58,6 +60,7 @@ elif __platform__ == "centos": UPDATEDB_PATH = '/etc/updatedb.conf' + def service_start(service_name, **kwargs): """Start a system service. @@ -287,8 +290,8 @@ def service_running(service_name, **kwargs): for key, value in six.iteritems(kwargs): parameter = '%s=%s' % (key, value) cmd.append(parameter) - output = subprocess.check_output(cmd, - stderr=subprocess.STDOUT).decode('UTF-8') + output = subprocess.check_output( + cmd, stderr=subprocess.STDOUT).decode('UTF-8') except subprocess.CalledProcessError: return False else: @@ -441,6 +444,51 @@ def add_user_to_group(username, group): subprocess.check_call(cmd) +def chage(username, lastday=None, expiredate=None, inactive=None, + mindays=None, maxdays=None, root=None, warndays=None): + """Change user password expiry information + + :param str username: User to update + :param str lastday: Set when password was changed in YYYY-MM-DD format + :param str expiredate: Set when user's account will no longer be + accessible in YYYY-MM-DD format. + -1 will remove an account expiration date. + :param str inactive: Set the number of days of inactivity after a password + has expired before the account is locked. + -1 will remove an account's inactivity. + :param str mindays: Set the minimum number of days between password + changes to MIN_DAYS. + 0 indicates the password can be changed anytime. + :param str maxdays: Set the maximum number of days during which a + password is valid. + -1 as MAX_DAYS will remove checking maxdays + :param str root: Apply changes in the CHROOT_DIR directory + :param str warndays: Set the number of days of warning before a password + change is required + :raises subprocess.CalledProcessError: if call to chage fails + """ + cmd = ['chage'] + if root: + cmd.extend(['--root', root]) + if lastday: + cmd.extend(['--lastday', lastday]) + if expiredate: + cmd.extend(['--expiredate', expiredate]) + if inactive: + cmd.extend(['--inactive', inactive]) + if mindays: + cmd.extend(['--mindays', mindays]) + if maxdays: + cmd.extend(['--maxdays', maxdays]) + if warndays: + cmd.extend(['--warndays', warndays]) + cmd.append(username) + subprocess.check_call(cmd) + + +remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1') + + def rsync(from_path, to_path, flags='-r', options=None, timeout=None): """Replicate the contents of a path""" options = options or ['--delete', '--executability'] @@ -487,13 +535,45 @@ def mkdir(path, owner='root', group='root', perms=0o555, force=False): def write_file(path, content, owner='root', group='root', perms=0o444): """Create or overwrite a file with the contents of a byte string.""" - log("Writing file {} {}:{} {:o}".format(path, owner, group, perms)) uid = pwd.getpwnam(owner).pw_uid gid = grp.getgrnam(group).gr_gid - with open(path, 'wb') as target: - os.fchown(target.fileno(), uid, gid) - os.fchmod(target.fileno(), perms) - target.write(content) + # lets see if we can grab the file and compare the context, to avoid doing + # a write. + existing_content = None + existing_uid, existing_gid, existing_perms = None, None, None + try: + with open(path, 'rb') as target: + existing_content = target.read() + stat = os.stat(path) + existing_uid, existing_gid, existing_perms = ( + stat.st_uid, stat.st_gid, stat.st_mode + ) + except Exception: + pass + if content != existing_content: + log("Writing file {} {}:{} {:o}".format(path, owner, group, perms), + level=DEBUG) + with open(path, 'wb') as target: + os.fchown(target.fileno(), uid, gid) + os.fchmod(target.fileno(), perms) + if six.PY3 and isinstance(content, six.string_types): + content = content.encode('UTF-8') + target.write(content) + return + # the contents were the same, but we might still need to change the + # ownership or permissions. + if existing_uid != uid: + log("Changing uid on already existing content: {} -> {}" + .format(existing_uid, uid), level=DEBUG) + os.chown(path, uid, -1) + if existing_gid != gid: + log("Changing gid on already existing content: {} -> {}" + .format(existing_gid, gid), level=DEBUG) + os.chown(path, -1, gid) + if existing_perms != perms: + log("Changing permissions on existing content: {} -> {}" + .format(existing_perms, perms), level=DEBUG) + os.chmod(path, perms) def fstab_remove(mp): @@ -758,7 +838,7 @@ def list_nics(nic_type=None): ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') ip_output = (line.strip() for line in ip_output if line) - key = re.compile('^[0-9]+:\s+(.+):') + key = re.compile(r'^[0-9]+:\s+(.+):') for line in ip_output: matched = re.search(key, line) if matched: @@ -903,6 +983,20 @@ def is_container(): def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH): + """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list. + + This method has no effect if the path specified by updatedb_path does not + exist or is not a file. + + @param path: string the path to add to the updatedb.conf PRUNEPATHS value + @param updatedb_path: the path the updatedb.conf file + """ + if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path): + # If the updatedb.conf file doesn't exist then don't attempt to update + # the file as the package providing mlocate may not be installed on + # the local system + return + with open(updatedb_path, 'r+') as f_id: updatedb_text = f_id.read() output = updatedb(updatedb_text, path) @@ -922,3 +1016,89 @@ def updatedb(updatedb_text, new_path): lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths)) output = "\n".join(lines) return output + + +def modulo_distribution(modulo=3, wait=30, non_zero_wait=False): + """ Modulo distribution + + This helper uses the unit number, a modulo value and a constant wait time + to produce a calculated wait time distribution. This is useful in large + scale deployments to distribute load during an expensive operation such as + service restarts. + + If you have 1000 nodes that need to restart 100 at a time 1 minute at a + time: + + time.wait(modulo_distribution(modulo=100, wait=60)) + restart() + + If you need restarts to happen serially set modulo to the exact number of + nodes and set a high constant wait time: + + time.wait(modulo_distribution(modulo=10, wait=120)) + restart() + + @param modulo: int The modulo number creates the group distribution + @param wait: int The constant time wait value + @param non_zero_wait: boolean Override unit % modulo == 0, + return modulo * wait. Used to avoid collisions with + leader nodes which are often given priority. + @return: int Calculated time to wait for unit operation + """ + unit_number = int(local_unit().split('/')[1]) + calculated_wait_time = (unit_number % modulo) * wait + if non_zero_wait and calculated_wait_time == 0: + return modulo * wait + else: + return calculated_wait_time + + +def install_ca_cert(ca_cert, name=None): + """ + Install the given cert as a trusted CA. + + The ``name`` is the stem of the filename where the cert is written, and if + not provided, it will default to ``juju-{charm_name}``. + + If the cert is empty or None, or is unchanged, nothing is done. + """ + if not ca_cert: + return + if not isinstance(ca_cert, bytes): + ca_cert = ca_cert.encode('utf8') + if not name: + name = 'juju-{}'.format(charm_name()) + cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name) + new_hash = hashlib.md5(ca_cert).hexdigest() + if file_hash(cert_file) == new_hash: + return + log("Installing new CA cert at: {}".format(cert_file), level=INFO) + write_file(cert_file, ca_cert) + subprocess.check_call(['update-ca-certificates', '--fresh']) + + +def get_system_env(key, default=None): + """Get data from system environment as represented in ``/etc/environment``. + + :param key: Key to look up + :type key: str + :param default: Value to return if key is not found + :type default: any + :returns: Value for key if found or contents of default parameter + :rtype: any + :raises: subprocess.CalledProcessError + """ + env_file = '/etc/environment' + # use the shell and env(1) to parse the global environments file. This is + # done to get the correct result even if the user has shell variable + # substitutions or other shell logic in that file. + output = subprocess.check_output( + ['env', '-i', '/bin/bash', '-c', + 'set -a && source {} && env'.format(env_file)], + universal_newlines=True) + for k, v in (line.split('=', 1) + for line in output.splitlines() if '=' in line): + if k == key: + return v + else: + return default diff --git a/charmhelpers/core/host_factory/ubuntu.py b/charmhelpers/core/host_factory/ubuntu.py index d8dc378..3edc068 100644 --- a/charmhelpers/core/host_factory/ubuntu.py +++ b/charmhelpers/core/host_factory/ubuntu.py @@ -1,5 +1,6 @@ import subprocess +from charmhelpers.core.hookenv import cached from charmhelpers.core.strutils import BasicStringComparator @@ -20,6 +21,11 @@ UBUNTU_RELEASES = ( 'yakkety', 'zesty', 'artful', + 'bionic', + 'cosmic', + 'disco', + 'eoan', + 'focal' ) @@ -70,6 +76,14 @@ def lsb_release(): return d +def get_distrib_codename(): + """Return the codename of the distribution + :returns: The codename + :rtype: str + """ + return lsb_release()['DISTRIB_CODENAME'].lower() + + def cmp_pkgrevno(package, revno, pkgcache=None): """Compare supplied revno with the revno of the installed package. @@ -81,9 +95,22 @@ def cmp_pkgrevno(package, revno, pkgcache=None): the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. """ - import apt_pkg + from charmhelpers.fetch import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache pkgcache = apt_cache() pkg = pkgcache[package] return apt_pkg.version_compare(pkg.current_ver.ver_str, revno) + + +@cached +def arch(): + """Return the package architecture as a string. + + :returns: the architecture + :rtype: str + :raises: subprocess.CalledProcessError if dpkg command fails + """ + return subprocess.check_output( + ['dpkg', '--print-architecture'] + ).rstrip().decode('UTF-8') diff --git a/charmhelpers/core/kernel.py b/charmhelpers/core/kernel.py index 2d40452..e01f4f8 100644 --- a/charmhelpers/core/kernel.py +++ b/charmhelpers/core/kernel.py @@ -26,12 +26,12 @@ from charmhelpers.core.hookenv import ( __platform__ = get_platform() if __platform__ == "ubuntu": - from charmhelpers.core.kernel_factory.ubuntu import ( + from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import elif __platform__ == "centos": - from charmhelpers.core.kernel_factory.centos import ( + from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401 persistent_modprobe, update_initramfs, ) # flake8: noqa -- ignore F401 for this import diff --git a/charmhelpers/core/services/base.py b/charmhelpers/core/services/base.py index ca9dc99..179ad4f 100644 --- a/charmhelpers/core/services/base.py +++ b/charmhelpers/core/services/base.py @@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback): """ def __call__(self, manager, service_name, event_name): service = manager.get_service(service_name) - new_ports = service.get('ports', []) + # turn this generator into a list, + # as we'll be going over it multiple times + new_ports = list(service.get('ports', [])) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) if os.path.exists(port_file): with open(port_file) as fp: old_ports = fp.read().split(',') for old_port in old_ports: - if bool(old_port): - old_port = int(old_port) - if old_port not in new_ports: - hookenv.close_port(old_port) + if bool(old_port) and not self.ports_contains(old_port, new_ports): + hookenv.close_port(old_port) with open(port_file, 'w') as fp: fp.write(','.join(str(port) for port in new_ports)) for port in new_ports: + # A port is either a number or 'ICMP' + protocol = 'TCP' + if str(port).upper() == 'ICMP': + protocol = 'ICMP' if event_name == 'start': - hookenv.open_port(port) + hookenv.open_port(port, protocol) elif event_name == 'stop': - hookenv.close_port(port) + hookenv.close_port(port, protocol) + + def ports_contains(self, port, ports): + if not bool(port): + return False + if str(port).upper() != 'ICMP': + port = int(port) + return port in ports def service_stop(service_name): diff --git a/charmhelpers/core/strutils.py b/charmhelpers/core/strutils.py index 685dabd..e8df045 100644 --- a/charmhelpers/core/strutils.py +++ b/charmhelpers/core/strutils.py @@ -61,13 +61,19 @@ def bytes_from_string(value): if isinstance(value, six.string_types): value = six.text_type(value) else: - msg = "Unable to interpret non-string value '%s' as boolean" % (value) + msg = "Unable to interpret non-string value '%s' as bytes" % (value) raise ValueError(msg) matches = re.match("([0-9]+)([a-zA-Z]+)", value) - if not matches: - msg = "Unable to interpret string value '%s' as bytes" % (value) - raise ValueError(msg) - return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + if matches: + size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) + else: + # Assume that value passed in is bytes + try: + size = int(value) + except ValueError: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return size class BasicStringComparator(object): diff --git a/charmhelpers/core/sysctl.py b/charmhelpers/core/sysctl.py index 6e413e3..386428d 100644 --- a/charmhelpers/core/sysctl.py +++ b/charmhelpers/core/sysctl.py @@ -17,38 +17,59 @@ import yaml -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.core.hookenv import ( log, DEBUG, ERROR, + WARNING, ) +from charmhelpers.core.host import is_container + __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' -def create(sysctl_dict, sysctl_file): +def create(sysctl_dict, sysctl_file, ignore=False): """Creates a sysctl.conf file from a YAML associative array - :param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" + :param sysctl_dict: a dict or YAML-formatted string of sysctl + options eg "{ 'kernel.max_pid': 1337 }" :type sysctl_dict: str :param sysctl_file: path to the sysctl file to be saved :type sysctl_file: str or unicode + :param ignore: If True, ignore "unknown variable" errors. + :type ignore: bool :returns: None """ - try: - sysctl_dict_parsed = yaml.safe_load(sysctl_dict) - except yaml.YAMLError: - log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), - level=ERROR) - return + if type(sysctl_dict) is not dict: + try: + sysctl_dict_parsed = yaml.safe_load(sysctl_dict) + except yaml.YAMLError: + log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), + level=ERROR) + return + else: + sysctl_dict_parsed = sysctl_dict with open(sysctl_file, "w") as fd: for key, value in sysctl_dict_parsed.items(): fd.write("{}={}\n".format(key, value)) - log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed), + log("Updating sysctl_file: {} values: {}".format(sysctl_file, + sysctl_dict_parsed), level=DEBUG) - check_call(["sysctl", "-p", sysctl_file]) + call = ["sysctl", "-p", sysctl_file] + if ignore: + call.append("-e") + + try: + check_call(call) + except CalledProcessError as e: + if is_container(): + log("Error setting some sysctl keys in this container: {}".format(e.output), + level=WARNING) + else: + raise e diff --git a/charmhelpers/core/templating.py b/charmhelpers/core/templating.py index 7b801a3..9014015 100644 --- a/charmhelpers/core/templating.py +++ b/charmhelpers/core/templating.py @@ -20,7 +20,8 @@ from charmhelpers.core import hookenv def render(source, target, context, owner='root', group='root', - perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None): + perms=0o444, templates_dir=None, encoding='UTF-8', + template_loader=None, config_template=None): """ Render a template. @@ -32,6 +33,9 @@ def render(source, target, context, owner='root', group='root', The context should be a dict containing the values to be replaced in the template. + config_template may be provided to render from a provided template instead + of loading from a file. + The `owner`, `group`, and `perms` options will be passed to `write_file`. If omitted, `templates_dir` defaults to the `templates` folder in the charm. @@ -65,14 +69,19 @@ def render(source, target, context, owner='root', group='root', if templates_dir is None: templates_dir = os.path.join(hookenv.charm_dir(), 'templates') template_env = Environment(loader=FileSystemLoader(templates_dir)) - try: - source = source - template = template_env.get_template(source) - except exceptions.TemplateNotFound as e: - hookenv.log('Could not load template %s from %s.' % - (source, templates_dir), - level=hookenv.ERROR) - raise e + + # load from a string if provided explicitly + if config_template is not None: + template = template_env.from_string(config_template) + else: + try: + source = source + template = template_env.get_template(source) + except exceptions.TemplateNotFound as e: + hookenv.log('Could not load template %s from %s.' % + (source, templates_dir), + level=hookenv.ERROR) + raise e content = template.render(context) if target is not None: target_dir = os.path.dirname(target) diff --git a/charmhelpers/core/unitdata.py b/charmhelpers/core/unitdata.py index 54ec969..ab55432 100644 --- a/charmhelpers/core/unitdata.py +++ b/charmhelpers/core/unitdata.py @@ -166,6 +166,10 @@ class Storage(object): To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. + + Note: to facilitate unit testing, ':memory:' can be passed as the + path parameter which causes sqlite3 to only build the db in memory. + This should only be used for testing purposes. """ def __init__(self, path=None): self.db_path = path @@ -175,6 +179,9 @@ class Storage(object): else: self.db_path = os.path.join( os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if self.db_path != ':memory:': + with open(self.db_path, 'a') as f: + os.fchmod(f.fileno(), 0o600) self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -358,7 +365,7 @@ class Storage(object): try: yield self.revision self.revision = None - except: + except Exception: self.flush(False) self.revision = None raise diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py index ec5e0fe..0cc7fc8 100644 --- a/charmhelpers/fetch/__init__.py +++ b/charmhelpers/fetch/__init__.py @@ -48,6 +48,13 @@ class AptLockError(Exception): pass +class GPGKeyError(Exception): + """Exception occurs when a GPG key cannot be fetched or used. The message + indicates what the problem is. + """ + pass + + class BaseFetchHandler(object): """Base class for FetchHandler implementations in fetch plugins""" @@ -77,22 +84,27 @@ module = "charmhelpers.fetch.%s" % __platform__ fetch = importlib.import_module(module) filter_installed_packages = fetch.filter_installed_packages -install = fetch.install -upgrade = fetch.upgrade -update = fetch.update -purge = fetch.purge +filter_missing_packages = fetch.filter_missing_packages +install = fetch.apt_install +upgrade = fetch.apt_upgrade +update = _fetch_update = fetch.apt_update +purge = fetch.apt_purge add_source = fetch.add_source if __platform__ == "ubuntu": apt_cache = fetch.apt_cache - apt_install = fetch.install - apt_update = fetch.update - apt_upgrade = fetch.upgrade - apt_purge = fetch.purge + apt_install = fetch.apt_install + apt_update = fetch.apt_update + apt_upgrade = fetch.apt_upgrade + apt_purge = fetch.apt_purge + apt_autoremove = fetch.apt_autoremove apt_mark = fetch.apt_mark apt_hold = fetch.apt_hold apt_unhold = fetch.apt_unhold + import_key = fetch.import_key get_upstream_version = fetch.get_upstream_version + apt_pkg = fetch.ubuntu_apt_pkg + get_apt_dpkg_env = fetch.get_apt_dpkg_env elif __platform__ == "centos": yum_search = fetch.yum_search @@ -135,7 +147,7 @@ def configure_sources(update=False, for source, key in zip(sources, keys): add_source(source, key) if update: - fetch.update(fatal=True) + _fetch_update(fatal=True) def install_remote(source, *args, **kwargs): diff --git a/charmhelpers/fetch/archiveurl.py b/charmhelpers/fetch/archiveurl.py index dd24f9e..d25587a 100644 --- a/charmhelpers/fetch/archiveurl.py +++ b/charmhelpers/fetch/archiveurl.py @@ -89,7 +89,7 @@ class ArchiveUrlFetchHandler(BaseFetchHandler): :param str source: URL pointing to an archive file. :param str dest: Local path location to download archive file to. """ - # propogate all exceptions + # propagate all exceptions # URLError, OSError, etc proto, netloc, path, params, query, fragment = urlparse(source) if proto in ('http', 'https'): diff --git a/charmhelpers/fetch/bzrurl.py b/charmhelpers/fetch/bzrurl.py index 07cd029..c4ab3ff 100644 --- a/charmhelpers/fetch/bzrurl.py +++ b/charmhelpers/fetch/bzrurl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call +from subprocess import STDOUT, check_output from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler): cmd = ['bzr', 'branch'] cmd += cmd_opts cmd += [source, dest] - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, dest=None, revno=None): url_parts = self.parse_url(source) diff --git a/charmhelpers/fetch/centos.py b/charmhelpers/fetch/centos.py index 604bbfb..a91dcff 100644 --- a/charmhelpers/fetch/centos.py +++ b/charmhelpers/fetch/centos.py @@ -132,7 +132,7 @@ def add_source(source, key=None): key_file.write(key) key_file.flush() key_file.seek(0) - subprocess.check_call(['rpm', '--import', key_file]) + subprocess.check_call(['rpm', '--import', key_file.name]) else: subprocess.check_call(['rpm', '--import', key]) diff --git a/charmhelpers/fetch/giturl.py b/charmhelpers/fetch/giturl.py index 4cf21bc..070ca9b 100644 --- a/charmhelpers/fetch/giturl.py +++ b/charmhelpers/fetch/giturl.py @@ -13,7 +13,7 @@ # limitations under the License. import os -from subprocess import check_call, CalledProcessError +from subprocess import check_output, CalledProcessError, STDOUT from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler): cmd = ['git', 'clone', source, dest, '--branch', branch] if depth: cmd.extend(['--depth', depth]) - check_call(cmd) + check_output(cmd, stderr=STDOUT) def install(self, source, branch="master", dest=None, depth=None): url_parts = self.parse_url(source) diff --git a/charmhelpers/contrib/python/__init__.py b/charmhelpers/fetch/python/__init__.py index d7567b8..bff99dc 100644 --- a/charmhelpers/contrib/python/__init__.py +++ b/charmhelpers/fetch/python/__init__.py @@ -1,4 +1,4 @@ -# Copyright 2014-2015 Canonical Limited. +# Copyright 2014-2019 Canonical Limited. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/charmhelpers/fetch/python/debug.py b/charmhelpers/fetch/python/debug.py new file mode 100644 index 0000000..757135e --- /dev/null +++ b/charmhelpers/fetch/python/debug.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import atexit +import sys + +from charmhelpers.fetch.python.rpdb import Rpdb +from charmhelpers.core.hookenv import ( + open_port, + close_port, + ERROR, + log +) + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + +DEFAULT_ADDR = "0.0.0.0" +DEFAULT_PORT = 4444 + + +def _error(message): + log(message, level=ERROR) + + +def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT): + """ + Set a trace point using the remote debugger + """ + atexit.register(close_port, port) + try: + log("Starting a remote python debugger session on %s:%s" % (addr, + port)) + open_port(port) + debugger = Rpdb(addr=addr, port=port) + debugger.set_trace(sys._getframe().f_back) + except Exception: + _error("Cannot start a remote debug session on %s:%s" % (addr, + port)) diff --git a/charmhelpers/contrib/python/packages.py b/charmhelpers/fetch/python/packages.py index 6e95028..6e95028 100644 --- a/charmhelpers/contrib/python/packages.py +++ b/charmhelpers/fetch/python/packages.py diff --git a/charmhelpers/fetch/python/rpdb.py b/charmhelpers/fetch/python/rpdb.py new file mode 100644 index 0000000..9b31610 --- /dev/null +++ b/charmhelpers/fetch/python/rpdb.py @@ -0,0 +1,56 @@ +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Remote Python Debugger (pdb wrapper).""" + +import pdb +import socket +import sys + +__author__ = "Bertrand Janin <b@janin.com>" +__version__ = "0.1.3" + + +class Rpdb(pdb.Pdb): + + def __init__(self, addr="127.0.0.1", port=4444): + """Initialize the socket and initialize pdb.""" + + # Backup stdin and stdout before replacing them by the socket handle + self.old_stdout = sys.stdout + self.old_stdin = sys.stdin + + # Open a 'reusable' socket to let the webapp reload on the same port + self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True) + self.skt.bind((addr, port)) + self.skt.listen(1) + (clientsocket, address) = self.skt.accept() + handle = clientsocket.makefile('rw') + pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle) + sys.stdout = sys.stdin = handle + + def shutdown(self): + """Revert stdin and stdout, close the socket.""" + sys.stdout = self.old_stdout + sys.stdin = self.old_stdin + self.skt.close() + self.set_continue() + + def do_continue(self, arg): + """Stop all operation on ``continue``.""" + self.shutdown() + return 1 + + do_EOF = do_quit = do_exit = do_c = do_cont = do_continue diff --git a/charmhelpers/fetch/python/version.py b/charmhelpers/fetch/python/version.py new file mode 100644 index 0000000..3eb4210 --- /dev/null +++ b/charmhelpers/fetch/python/version.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# coding: utf-8 + +# Copyright 2014-2015 Canonical Limited. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + +__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" + + +def current_version(): + """Current system python version""" + return sys.version_info + + +def current_version_string(): + """Current system python version as string major.minor.micro""" + return "{0}.{1}.{2}".format(sys.version_info.major, + sys.version_info.minor, + sys.version_info.micro) diff --git a/charmhelpers/fetch/snap.py b/charmhelpers/fetch/snap.py index 23c707b..fc70aa9 100644 --- a/charmhelpers/fetch/snap.py +++ b/charmhelpers/fetch/snap.py @@ -18,21 +18,33 @@ If writing reactive charms, use the snap layer: https://lists.ubuntu.com/archives/snapcraft/2016-September/001114.html """ import subprocess -from os import environ +import os from time import sleep from charmhelpers.core.hookenv import log __author__ = 'Joseph Borg <joseph.borg@canonical.com>' -SNAP_NO_LOCK = 1 # The return code for "couldn't acquire lock" in Snap (hopefully this will be improved). +# The return code for "couldn't acquire lock" in Snap +# (hopefully this will be improved). +SNAP_NO_LOCK = 1 SNAP_NO_LOCK_RETRY_DELAY = 10 # Wait X seconds between Snap lock checks. SNAP_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times. +SNAP_CHANNELS = [ + 'edge', + 'beta', + 'candidate', + 'stable', +] class CouldNotAcquireLockException(Exception): pass +class InvalidSnapChannel(Exception): + pass + + def _snap_exec(commands): """ Execute snap commands. @@ -47,13 +59,17 @@ def _snap_exec(commands): while return_code is None or return_code == SNAP_NO_LOCK: try: - return_code = subprocess.check_call(['snap'] + commands, env=environ) + return_code = subprocess.check_call(['snap'] + commands, + env=os.environ) except subprocess.CalledProcessError as e: retry_count += + 1 if retry_count > SNAP_NO_LOCK_RETRY_COUNT: - raise CouldNotAcquireLockException('Could not aquire lock after %s attempts' % SNAP_NO_LOCK_RETRY_COUNT) + raise CouldNotAcquireLockException( + 'Could not aquire lock after {} attempts' + .format(SNAP_NO_LOCK_RETRY_COUNT)) return_code = e.returncode - log('Snap failed to acquire lock, trying again in %s seconds.' % SNAP_NO_LOCK_RETRY_DELAY, level='WARN') + log('Snap failed to acquire lock, trying again in {} seconds.' + .format(SNAP_NO_LOCK_RETRY_DELAY), level='WARN') sleep(SNAP_NO_LOCK_RETRY_DELAY) return return_code @@ -120,3 +136,15 @@ def snap_refresh(packages, *flags): log(message, level='INFO') return _snap_exec(['refresh'] + flags + packages) + + +def valid_snap_channel(channel): + """ Validate snap channel exists + + :raises InvalidSnapChannel: When channel does not exist + :return: Boolean + """ + if channel.lower() in SNAP_CHANNELS: + return True + else: + raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel)) diff --git a/charmhelpers/fetch/ubuntu.py b/charmhelpers/fetch/ubuntu.py index 7bc6cc7..3ddaf0d 100644 --- a/charmhelpers/fetch/ubuntu.py +++ b/charmhelpers/fetch/ubuntu.py @@ -12,29 +12,49 @@ # See the License for the specific language governing permissions and # limitations under the License. -import os +from collections import OrderedDict +import platform +import re import six -import time import subprocess +import sys +import time -from tempfile import NamedTemporaryFile -from charmhelpers.core.host import ( - lsb_release -) -from charmhelpers.core.hookenv import log -from charmhelpers.fetch import SourceConfigError +from charmhelpers.core.host import get_distrib_codename, get_system_env +from charmhelpers.core.hookenv import ( + log, + DEBUG, + WARNING, + env_proxy_settings, +) +from charmhelpers.fetch import SourceConfigError, GPGKeyError +from charmhelpers.fetch import ubuntu_apt_pkg + +PROPOSED_POCKET = ( + "# Proposed\n" + "deb http://archive.ubuntu.com/ubuntu {}-proposed main universe " + "multiverse restricted\n") +PROPOSED_PORTS_POCKET = ( + "# Proposed\n" + "deb http://ports.ubuntu.com/ubuntu-ports {}-proposed main universe " + "multiverse restricted\n") +# Only supports 64bit and ppc64 at the moment. +ARCH_TO_PROPOSED_POCKET = { + 'x86_64': PROPOSED_POCKET, + 'ppc64le': PROPOSED_PORTS_POCKET, + 'aarch64': PROPOSED_PORTS_POCKET, + 's390x': PROPOSED_PORTS_POCKET, +} +CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" +CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE = """# Ubuntu Cloud Archive deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main """ - -PROPOSED_POCKET = """# Proposed -deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted -""" - CLOUD_ARCHIVE_POCKETS = { # Folsom 'folsom': 'precise-updates/folsom', + 'folsom/updates': 'precise-updates/folsom', 'precise-folsom': 'precise-updates/folsom', 'precise-folsom/updates': 'precise-updates/folsom', 'precise-updates/folsom': 'precise-updates/folsom', @@ -43,6 +63,7 @@ CLOUD_ARCHIVE_POCKETS = { 'precise-proposed/folsom': 'precise-proposed/folsom', # Grizzly 'grizzly': 'precise-updates/grizzly', + 'grizzly/updates': 'precise-updates/grizzly', 'precise-grizzly': 'precise-updates/grizzly', 'precise-grizzly/updates': 'precise-updates/grizzly', 'precise-updates/grizzly': 'precise-updates/grizzly', @@ -51,6 +72,7 @@ CLOUD_ARCHIVE_POCKETS = { 'precise-proposed/grizzly': 'precise-proposed/grizzly', # Havana 'havana': 'precise-updates/havana', + 'havana/updates': 'precise-updates/havana', 'precise-havana': 'precise-updates/havana', 'precise-havana/updates': 'precise-updates/havana', 'precise-updates/havana': 'precise-updates/havana', @@ -59,6 +81,7 @@ CLOUD_ARCHIVE_POCKETS = { 'precise-proposed/havana': 'precise-proposed/havana', # Icehouse 'icehouse': 'precise-updates/icehouse', + 'icehouse/updates': 'precise-updates/icehouse', 'precise-icehouse': 'precise-updates/icehouse', 'precise-icehouse/updates': 'precise-updates/icehouse', 'precise-updates/icehouse': 'precise-updates/icehouse', @@ -67,6 +90,7 @@ CLOUD_ARCHIVE_POCKETS = { 'precise-proposed/icehouse': 'precise-proposed/icehouse', # Juno 'juno': 'trusty-updates/juno', + 'juno/updates': 'trusty-updates/juno', 'trusty-juno': 'trusty-updates/juno', 'trusty-juno/updates': 'trusty-updates/juno', 'trusty-updates/juno': 'trusty-updates/juno', @@ -75,6 +99,7 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-proposed/juno': 'trusty-proposed/juno', # Kilo 'kilo': 'trusty-updates/kilo', + 'kilo/updates': 'trusty-updates/kilo', 'trusty-kilo': 'trusty-updates/kilo', 'trusty-kilo/updates': 'trusty-updates/kilo', 'trusty-updates/kilo': 'trusty-updates/kilo', @@ -83,6 +108,7 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-proposed/kilo': 'trusty-proposed/kilo', # Liberty 'liberty': 'trusty-updates/liberty', + 'liberty/updates': 'trusty-updates/liberty', 'trusty-liberty': 'trusty-updates/liberty', 'trusty-liberty/updates': 'trusty-updates/liberty', 'trusty-updates/liberty': 'trusty-updates/liberty', @@ -91,6 +117,7 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-proposed/liberty': 'trusty-proposed/liberty', # Mitaka 'mitaka': 'trusty-updates/mitaka', + 'mitaka/updates': 'trusty-updates/mitaka', 'trusty-mitaka': 'trusty-updates/mitaka', 'trusty-mitaka/updates': 'trusty-updates/mitaka', 'trusty-updates/mitaka': 'trusty-updates/mitaka', @@ -99,6 +126,7 @@ CLOUD_ARCHIVE_POCKETS = { 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', # Newton 'newton': 'xenial-updates/newton', + 'newton/updates': 'xenial-updates/newton', 'xenial-newton': 'xenial-updates/newton', 'xenial-newton/updates': 'xenial-updates/newton', 'xenial-updates/newton': 'xenial-updates/newton', @@ -107,12 +135,13 @@ CLOUD_ARCHIVE_POCKETS = { 'xenial-proposed/newton': 'xenial-proposed/newton', # Ocata 'ocata': 'xenial-updates/ocata', + 'ocata/updates': 'xenial-updates/ocata', 'xenial-ocata': 'xenial-updates/ocata', 'xenial-ocata/updates': 'xenial-updates/ocata', 'xenial-updates/ocata': 'xenial-updates/ocata', 'ocata/proposed': 'xenial-proposed/ocata', 'xenial-ocata/proposed': 'xenial-proposed/ocata', - 'xenial-ocata/newton': 'xenial-proposed/ocata', + 'xenial-proposed/ocata': 'xenial-proposed/ocata', # Pike 'pike': 'xenial-updates/pike', 'xenial-pike': 'xenial-updates/pike', @@ -120,7 +149,7 @@ CLOUD_ARCHIVE_POCKETS = { 'xenial-updates/pike': 'xenial-updates/pike', 'pike/proposed': 'xenial-proposed/pike', 'xenial-pike/proposed': 'xenial-proposed/pike', - 'xenial-pike/newton': 'xenial-proposed/pike', + 'xenial-proposed/pike': 'xenial-proposed/pike', # Queens 'queens': 'xenial-updates/queens', 'xenial-queens': 'xenial-updates/queens', @@ -128,12 +157,45 @@ CLOUD_ARCHIVE_POCKETS = { 'xenial-updates/queens': 'xenial-updates/queens', 'queens/proposed': 'xenial-proposed/queens', 'xenial-queens/proposed': 'xenial-proposed/queens', - 'xenial-queens/newton': 'xenial-proposed/queens', + 'xenial-proposed/queens': 'xenial-proposed/queens', + # Rocky + 'rocky': 'bionic-updates/rocky', + 'bionic-rocky': 'bionic-updates/rocky', + 'bionic-rocky/updates': 'bionic-updates/rocky', + 'bionic-updates/rocky': 'bionic-updates/rocky', + 'rocky/proposed': 'bionic-proposed/rocky', + 'bionic-rocky/proposed': 'bionic-proposed/rocky', + 'bionic-proposed/rocky': 'bionic-proposed/rocky', + # Stein + 'stein': 'bionic-updates/stein', + 'bionic-stein': 'bionic-updates/stein', + 'bionic-stein/updates': 'bionic-updates/stein', + 'bionic-updates/stein': 'bionic-updates/stein', + 'stein/proposed': 'bionic-proposed/stein', + 'bionic-stein/proposed': 'bionic-proposed/stein', + 'bionic-proposed/stein': 'bionic-proposed/stein', + # Train + 'train': 'bionic-updates/train', + 'bionic-train': 'bionic-updates/train', + 'bionic-train/updates': 'bionic-updates/train', + 'bionic-updates/train': 'bionic-updates/train', + 'train/proposed': 'bionic-proposed/train', + 'bionic-train/proposed': 'bionic-proposed/train', + 'bionic-proposed/train': 'bionic-proposed/train', + # Ussuri + 'ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri': 'bionic-updates/ussuri', + 'bionic-ussuri/updates': 'bionic-updates/ussuri', + 'bionic-updates/ussuri': 'bionic-updates/ussuri', + 'ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-ussuri/proposed': 'bionic-proposed/ussuri', + 'bionic-proposed/ussuri': 'bionic-proposed/ussuri', } + APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT. CMD_RETRY_DELAY = 10 # Wait 10 seconds between command retries. -CMD_RETRY_COUNT = 30 # Retry a failing fatal command X times. +CMD_RETRY_COUNT = 3 # Retry a failing fatal command X times. def filter_installed_packages(packages): @@ -151,18 +213,54 @@ def filter_installed_packages(packages): return _pkgs -def apt_cache(in_memory=True, progress=None): - """Build and return an apt cache.""" - from apt import apt_pkg - apt_pkg.init() - if in_memory: - apt_pkg.config.set("Dir::Cache::pkgcache", "") - apt_pkg.config.set("Dir::Cache::srcpkgcache", "") - return apt_pkg.Cache(progress) +def filter_missing_packages(packages): + """Return a list of packages that are installed. + :param packages: list of packages to evaluate. + :returns list: Packages that are installed. + """ + return list( + set(packages) - + set(filter_installed_packages(packages)) + ) -def install(packages, options=None, fatal=False): - """Install one or more packages.""" + +def apt_cache(*_, **__): + """Shim returning an object simulating the apt_pkg Cache. + + :param _: Accept arguments for compability, not used. + :type _: any + :param __: Accept keyword arguments for compability, not used. + :type __: any + :returns:Object used to interrogate the system apt and dpkg databases. + :rtype:ubuntu_apt_pkg.Cache + """ + if 'apt_pkg' in sys.modules: + # NOTE(fnordahl): When our consumer use the upstream ``apt_pkg`` module + # in conjunction with the apt_cache helper function, they may expect us + # to call ``apt_pkg.init()`` for them. + # + # Detect this situation, log a warning and make the call to + # ``apt_pkg.init()`` to avoid the consumer Python interpreter from + # crashing with a segmentation fault. + log('Support for use of upstream ``apt_pkg`` module in conjunction' + 'with charm-helpers is deprecated since 2019-06-25', level=WARNING) + sys.modules['apt_pkg'].init() + return ubuntu_apt_pkg.Cache() + + +def apt_install(packages, options=None, fatal=False): + """Install one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -178,8 +276,18 @@ def install(packages, options=None, fatal=False): _run_apt_command(cmd, fatal) -def upgrade(options=None, fatal=False, dist=False): - """Upgrade all packages.""" +def apt_upgrade(options=None, fatal=False, dist=False): + """Upgrade all packages. + + :param options: Options to pass on to apt-get + :type options: Option[None, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :param dist: Whether ``dist-upgrade`` should be used over ``upgrade`` + :type dist: bool + :raises: subprocess.CalledProcessError + """ if options is None: options = ['--option=Dpkg::Options::=--force-confold'] @@ -193,14 +301,22 @@ def upgrade(options=None, fatal=False, dist=False): _run_apt_command(cmd, fatal) -def update(fatal=False): +def apt_update(fatal=False): """Update local apt cache.""" cmd = ['apt-get', 'update'] _run_apt_command(cmd, fatal) -def purge(packages, fatal=False): - """Purge one or more packages.""" +def apt_purge(packages, fatal=False): + """Purge one or more packages. + + :param packages: Package(s) to install + :type packages: Option[str, List[str]] + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ cmd = ['apt-get', '--assume-yes', 'purge'] if isinstance(packages, six.string_types): cmd.append(packages) @@ -210,6 +326,21 @@ def purge(packages, fatal=False): _run_apt_command(cmd, fatal) +def apt_autoremove(purge=True, fatal=False): + """Purge one or more packages. + :param purge: Whether the ``--purge`` option should be passed on or not. + :type purge: bool + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool + :raises: subprocess.CalledProcessError + """ + cmd = ['apt-get', '--assume-yes', 'autoremove'] + if purge: + cmd.append('--purge') + _run_apt_command(cmd, fatal) + + def apt_mark(packages, mark, fatal=False): """Flag one or more packages using apt-mark.""" log("Marking {} as {}".format(packages, mark)) @@ -233,7 +364,159 @@ def apt_unhold(packages, fatal=False): return apt_mark(packages, 'unhold', fatal=fatal) -def add_source(source, key=None): +def import_key(key): + """Import an ASCII Armor key. + + A Radix64 format keyid is also supported for backwards + compatibility. In this case Ubuntu keyserver will be + queried for a key via HTTPS by its keyid. This method + is less preferrable because https proxy servers may + require traffic decryption which is equivalent to a + man-in-the-middle attack (a proxy server impersonates + keyserver TLS certificates and has to be explicitly + trusted by the system). + + :param key: A GPG key in ASCII armor format, + including BEGIN and END markers or a keyid. + :type key: (bytes, str) + :raises: GPGKeyError if the key could not be imported + """ + key = key.strip() + if '-' in key or '\n' in key: + # Send everything not obviously a keyid to GPG to import, as + # we trust its validation better than our own. eg. handling + # comments before the key. + log("PGP key found (looks like ASCII Armor format)", level=DEBUG) + if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and + '-----END PGP PUBLIC KEY BLOCK-----' in key): + log("Writing provided PGP key in the binary format", level=DEBUG) + if six.PY3: + key_bytes = key.encode('utf-8') + else: + key_bytes = key + key_name = _get_keyid_by_gpg_key(key_bytes) + key_gpg = _dearmor_gpg_key(key_bytes) + _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg) + else: + raise GPGKeyError("ASCII armor markers missing from GPG key") + else: + log("PGP key found (looks like Radix64 format)", level=WARNING) + log("SECURELY importing PGP key from keyserver; " + "full key not provided.", level=WARNING) + # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL + # to retrieve GPG keys. `apt-key adv` command is deprecated as is + # apt-key in general as noted in its manpage. See lp:1433761 for more + # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop + # gpg + key_asc = _get_key_by_keyid(key) + # write the key in GPG format so that apt-key list shows it + key_gpg = _dearmor_gpg_key(key_asc) + _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg) + + +def _get_keyid_by_gpg_key(key_material): + """Get a GPG key fingerprint by GPG key material. + Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded + or binary GPG key material. Can be used, for example, to generate file + names for keys passed via charm options. + + :param key_material: ASCII armor-encoded or binary GPG key material + :type key_material: bytes + :raises: GPGKeyError if invalid key material has been provided + :returns: A GPG key fingerprint + :rtype: str + """ + # Use the same gpg command for both Xenial and Bionic + cmd = 'gpg --with-colons --with-fingerprint' + ps = subprocess.Popen(cmd.split(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_material) + if six.PY3: + out = out.decode('utf-8') + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material provided') + # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10) + return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1) + + +def _get_key_by_keyid(keyid): + """Get a key via HTTPS from the Ubuntu keyserver. + Different key ID formats are supported by SKS keyservers (the longer ones + are more secure, see "dead beef attack" and https://evil32.com/). Since + HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will + impersonate keyserver.ubuntu.com and generate a certificate with + keyserver.ubuntu.com in the CN field or in SubjAltName fields of a + certificate. If such proxy behavior is expected it is necessary to add the + CA certificate chain containing the intermediate CA of the SSLBump proxy to + every machine that this code runs on via ca-certs cloud-init directive (via + cloudinit-userdata model-config) or via other means (such as through a + custom charm option). Also note that DNS resolution for the hostname in a + URL is done at a proxy server - not at the client side. + + 8-digit (32 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6 + 16-digit (64 bit) key ID + https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6 + 40-digit key ID: + https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6 + + :param keyid: An 8, 16 or 40 hex digit keyid to find a key for + :type keyid: (bytes, str) + :returns: A key material for the specified GPG key id + :rtype: (str, bytes) + :raises: subprocess.CalledProcessError + """ + # options=mr - machine-readable output (disables html wrappers) + keyserver_url = ('https://keyserver.ubuntu.com' + '/pks/lookup?op=get&options=mr&exact=on&search=0x{}') + curl_cmd = ['curl', keyserver_url.format(keyid)] + # use proxy server settings in order to retrieve the key + return subprocess.check_output(curl_cmd, + env=env_proxy_settings(['https'])) + + +def _dearmor_gpg_key(key_asc): + """Converts a GPG key in the ASCII armor format to the binary format. + + :param key_asc: A GPG key in ASCII armor format. + :type key_asc: (str, bytes) + :returns: A GPG key in binary format + :rtype: (str, bytes) + :raises: GPGKeyError + """ + ps = subprocess.Popen(['gpg', '--dearmor'], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + stdin=subprocess.PIPE) + out, err = ps.communicate(input=key_asc) + # no need to decode output as it is binary (invalid utf-8), only error + if six.PY3: + err = err.decode('utf-8') + if 'gpg: no valid OpenPGP data found.' in err: + raise GPGKeyError('Invalid GPG key material. Check your network setup' + ' (MTU, routing, DNS) and/or proxy server settings' + ' as well as destination keyserver status.') + else: + return out + + +def _write_apt_gpg_keyfile(key_name, key_material): + """Writes GPG key material into a file at a provided path. + + :param key_name: A key name to use for a key file (could be a fingerprint) + :type key_name: str + :param key_material: A GPG key material (binary) + :type key_material: (str, bytes) + """ + with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name), + 'wb') as keyf: + keyf.write(key_material) + + +def add_source(source, key=None, fail_invalid=False): """Add a package source to this system. @param source: a URL or sources.list entry, as supported by @@ -249,6 +532,33 @@ def add_source(source, key=None): such as 'cloud:icehouse' 'distro' may be used as a noop + Full list of source specifications supported by the function are: + + 'distro': A NOP; i.e. it has no effect. + 'proposed': the proposed deb spec [2] is wrtten to + /etc/apt/sources.list/proposed + 'distro-proposed': adds <version>-proposed to the debs [2] + 'ppa:<ppa-name>': add-apt-repository --yes <ppa_name> + 'deb <deb-spec>': add-apt-repository --yes deb <deb-spec> + 'http://....': add-apt-repository --yes http://... + 'cloud-archive:<spec>': add-apt-repository -yes cloud-archive:<spec> + 'cloud:<release>[-staging]': specify a Cloud Archive pocket <release> with + optional staging version. If staging is used then the staging PPA [2] + with be used. If staging is NOT used then the cloud archive [3] will be + added, and the 'ubuntu-cloud-keyring' package will be added for the + current distro. + + Otherwise the source is not recognised and this is logged to the juju log. + However, no error is raised, unless sys_error_on_exit is True. + + [1] deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main + where {} is replaced with the derived pocket name. + [2] deb http://archive.ubuntu.com/ubuntu {}-proposed \ + main universe multiverse restricted + where {} is replaced with the lsb_release codename (e.g. xenial) + [3] deb http://ubuntu-cloud.archive.canonical.com/ubuntu <pocket> + to /etc/apt/sources.list.d/cloud-archive-list + @param key: A key to be added to the system's APT keyring and used to verify the signatures on packages. Ideally, this should be an ASCII format GPG public key including the block headers. A GPG key @@ -256,67 +566,172 @@ def add_source(source, key=None): available to retrieve the actual public key from a public keyserver placing your Juju environment at risk. ppa and cloud archive keys are securely added automtically, so sould not be provided. + + @param fail_invalid: (boolean) if True, then the function raises a + SourceConfigError is there is no matching installation source. + + @raises SourceConfigError() if for cloud:<pocket>, the <pocket> is not a + valid pocket in CLOUD_ARCHIVE_POCKETS """ + _mapping = OrderedDict([ + (r"^distro$", lambda: None), # This is a NOP + (r"^(?:proposed|distro-proposed)$", _add_proposed), + (r"^cloud-archive:(.*)$", _add_apt_repository), + (r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository), + (r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging), + (r"^cloud:(.*)-(.*)$", _add_cloud_distro_check), + (r"^cloud:(.*)$", _add_cloud_pocket), + (r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check), + ]) if source is None: - log('Source is not present. Skipping') - return - - if (source.startswith('ppa:') or - source.startswith('http') or - source.startswith('deb ') or - source.startswith('cloud-archive:')): - cmd = ['add-apt-repository', '--yes', source] - _run_with_retries(cmd) - elif source.startswith('cloud:'): - install(filter_installed_packages(['ubuntu-cloud-keyring']), - fatal=True) - pocket = source.split(':')[-1] - if pocket not in CLOUD_ARCHIVE_POCKETS: - raise SourceConfigError( - 'Unsupported cloud: source option %s' % - pocket) - actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] - with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: - apt.write(CLOUD_ARCHIVE.format(actual_pocket)) - elif source == 'proposed': - release = lsb_release()['DISTRIB_CODENAME'] - with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: - apt.write(PROPOSED_POCKET.format(release)) - elif source == 'distro': - pass + source = '' + for r, fn in six.iteritems(_mapping): + m = re.match(r, source) + if m: + if key: + # Import key before adding the source which depends on it, + # as refreshing packages could fail otherwise. + try: + import_key(key) + except GPGKeyError as e: + raise SourceConfigError(str(e)) + # call the associated function with the captured groups + # raises SourceConfigError on error. + fn(*m.groups()) + break else: - log("Unknown source: {!r}".format(source)) - - if key: - if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key: - with NamedTemporaryFile('w+') as key_file: - key_file.write(key) - key_file.flush() - key_file.seek(0) - subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file) - else: - # Note that hkp: is in no way a secure protocol. Using a - # GPG key id is pointless from a security POV unless you - # absolutely trust your network and DNS. - subprocess.check_call(['apt-key', 'adv', '--keyserver', - 'hkp://keyserver.ubuntu.com:80', '--recv', - key]) + # nothing matched. log an error and maybe sys.exit + err = "Unknown source: {!r}".format(source) + log(err) + if fail_invalid: + raise SourceConfigError(err) + + +def _add_proposed(): + """Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list + + Uses get_distrib_codename to determine the correct stanza for + the deb line. + + For intel architecutres PROPOSED_POCKET is used for the release, but for + other architectures PROPOSED_PORTS_POCKET is used for the release. + """ + release = get_distrib_codename() + arch = platform.machine() + if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET): + raise SourceConfigError("Arch {} not supported for (distro-)proposed" + .format(arch)) + with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt: + apt.write(ARCH_TO_PROPOSED_POCKET[arch].format(release)) + + +def _add_apt_repository(spec): + """Add the spec using add_apt_repository + + :param spec: the parameter to pass to add_apt_repository + :type spec: str + """ + if '{series}' in spec: + series = get_distrib_codename() + spec = spec.replace('{series}', series) + # software-properties package for bionic properly reacts to proxy settings + # passed as environment variables (See lp:1433761). This is not the case + # LTS and non-LTS releases below bionic. + _run_with_retries(['add-apt-repository', '--yes', spec], + cmd_env=env_proxy_settings(['https'])) + + +def _add_cloud_pocket(pocket): + """Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list + + Note that this overwrites the existing file if there is one. + + This function also converts the simple pocket in to the actual pocket using + the CLOUD_ARCHIVE_POCKETS mapping. + + :param pocket: string representing the pocket to add a deb spec for. + :raises: SourceConfigError if the cloud pocket doesn't exist or the + requested release doesn't match the current distro version. + """ + apt_install(filter_installed_packages(['ubuntu-cloud-keyring']), + fatal=True) + if pocket not in CLOUD_ARCHIVE_POCKETS: + raise SourceConfigError( + 'Unsupported cloud: source option %s' % + pocket) + actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket] + with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt: + apt.write(CLOUD_ARCHIVE.format(actual_pocket)) + + +def _add_cloud_staging(cloud_archive_release, openstack_release): + """Add the cloud staging repository which is in + ppa:ubuntu-cloud-archive/<openstack_release>-staging + + This function checks that the cloud_archive_release matches the current + codename for the distro that charm is being installed on. + + :param cloud_archive_release: string, codename for the release. + :param openstack_release: String, codename for the openstack release. + :raises: SourceConfigError if the cloud_archive_release doesn't match the + current version of the os. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + ppa = 'ppa:ubuntu-cloud-archive/{}-staging'.format(openstack_release) + cmd = 'add-apt-repository -y {}'.format(ppa) + _run_with_retries(cmd.split(' ')) + + +def _add_cloud_distro_check(cloud_archive_release, openstack_release): + """Add the cloud pocket, but also check the cloud_archive_release against + the current distro, and use the openstack_release as the full lookup. + + This just calls _add_cloud_pocket() with the openstack_release as pocket + to get the correct cloud-archive.list for dpkg to work with. + + :param cloud_archive_release:String, codename for the distro release. + :param openstack_release: String, spec for the release to look up in the + CLOUD_ARCHIVE_POCKETS + :raises: SourceConfigError if this is the wrong distro, or the pocket spec + doesn't exist. + """ + _verify_is_ubuntu_rel(cloud_archive_release, openstack_release) + _add_cloud_pocket("{}-{}".format(cloud_archive_release, openstack_release)) + + +def _verify_is_ubuntu_rel(release, os_release): + """Verify that the release is in the same as the current ubuntu release. + + :param release: String, lowercase for the release. + :param os_release: String, the os_release being asked for + :raises: SourceConfigError if the release is not the same as the ubuntu + release. + """ + ubuntu_rel = get_distrib_codename() + if release != ubuntu_rel: + raise SourceConfigError( + 'Invalid Cloud Archive release specified: {}-{} on this Ubuntu' + 'version ({})'.format(release, os_release, ubuntu_rel)) def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), retry_message="", cmd_env=None): """Run a command and retry until success or max_retries is reached. - :param: cmd: str: The apt command to run. - :param: max_retries: int: The number of retries to attempt on a fatal - command. Defaults to CMD_RETRY_COUNT. - :param: retry_exitcodes: tuple: Optional additional exit codes to retry. - Defaults to retry on exit code 1. - :param: retry_message: str: Optional log prefix emitted during retries. - :param: cmd_env: dict: Environment variables to add to the command run. + :param cmd: The apt command to run. + :type cmd: str + :param max_retries: The number of retries to attempt on a fatal + command. Defaults to CMD_RETRY_COUNT. + :type max_retries: int + :param retry_exitcodes: Optional additional exit codes to retry. + Defaults to retry on exit code 1. + :type retry_exitcodes: tuple + :param retry_message: Optional log prefix emitted during retries. + :type retry_message: str + :param: cmd_env: Environment variables to add to the command run. + :type cmd_env: Option[None, Dict[str, str]] """ - - env = os.environ.copy() + env = get_apt_dpkg_env() if cmd_env: env.update(cmd_env) @@ -343,21 +758,18 @@ def _run_with_retries(cmd, max_retries=CMD_RETRY_COUNT, retry_exitcodes=(1,), def _run_apt_command(cmd, fatal=False): """Run an apt command with optional retries. - :param: fatal: bool: Whether the command's output should be checked and - retried. + :param cmd: The apt command to run. + :type cmd: str + :param fatal: Whether the command's output should be checked and + retried. + :type fatal: bool """ - # Provide DEBIAN_FRONTEND=noninteractive if not present in the environment. - cmd_env = { - 'DEBIAN_FRONTEND': os.environ.get('DEBIAN_FRONTEND', 'noninteractive')} - if fatal: _run_with_retries( - cmd, cmd_env=cmd_env, retry_exitcodes=(1, APT_NO_LOCK,), + cmd, retry_exitcodes=(1, APT_NO_LOCK,), retry_message="Couldn't acquire DPKG lock") else: - env = os.environ.copy() - env.update(cmd_env) - subprocess.call(cmd, env=env) + subprocess.call(cmd, env=get_apt_dpkg_env()) def get_upstream_version(package): @@ -365,11 +777,10 @@ def get_upstream_version(package): @returns None (if not installed) or the upstream version """ - import apt_pkg cache = apt_cache() try: pkg = cache[package] - except: + except Exception: # the package is unknown to the current apt cache. return None @@ -377,4 +788,18 @@ def get_upstream_version(package): # package is known, but no version is currently installed. return None - return apt_pkg.upstream_version(pkg.current_ver.ver_str) + return ubuntu_apt_pkg.upstream_version(pkg.current_ver.ver_str) + + +def get_apt_dpkg_env(): + """Get environment suitable for execution of APT and DPKG tools. + + We keep this in a helper function instead of in a global constant to + avoid execution on import of the library. + :returns: Environment suitable for execution of APT and DPKG tools. + :rtype: Dict[str, str] + """ + # The fallback is used in the event of ``/etc/environment`` not containing + # avalid PATH variable. + return {'DEBIAN_FRONTEND': 'noninteractive', + 'PATH': get_system_env('PATH', '/usr/sbin:/usr/bin:/sbin:/bin')} diff --git a/charmhelpers/fetch/ubuntu_apt_pkg.py b/charmhelpers/fetch/ubuntu_apt_pkg.py new file mode 100644 index 0000000..929a75d --- /dev/null +++ b/charmhelpers/fetch/ubuntu_apt_pkg.py @@ -0,0 +1,267 @@ +# Copyright 2019 Canonical Ltd +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Provide a subset of the ``python-apt`` module API. + +Data collection is done through subprocess calls to ``apt-cache`` and +``dpkg-query`` commands. + +The main purpose for this module is to avoid dependency on the +``python-apt`` python module. + +The indicated python module is a wrapper around the ``apt`` C++ library +which is tightly connected to the version of the distribution it was +shipped on. It is not developed in a backward/forward compatible manner. + +This in turn makes it incredibly hard to distribute as a wheel for a piece +of python software that supports a span of distro releases [0][1]. + +Upstream feedback like [2] does not give confidence in this ever changing, +so with this we get rid of the dependency. + +0: https://github.com/juju-solutions/layer-basic/pull/135 +1: https://bugs.launchpad.net/charm-octavia/+bug/1824112 +2: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=845330#10 +""" + +import locale +import os +import subprocess +import sys + + +class _container(dict): + """Simple container for attributes.""" + __getattr__ = dict.__getitem__ + __setattr__ = dict.__setitem__ + + +class Package(_container): + """Simple container for package attributes.""" + + +class Version(_container): + """Simple container for version attributes.""" + + +class Cache(object): + """Simulation of ``apt_pkg`` Cache object.""" + def __init__(self, progress=None): + pass + + def __contains__(self, package): + try: + pkg = self.__getitem__(package) + return pkg is not None + except KeyError: + return False + + def __getitem__(self, package): + """Get information about a package from apt and dpkg databases. + + :param package: Name of package + :type package: str + :returns: Package object + :rtype: object + :raises: KeyError, subprocess.CalledProcessError + """ + apt_result = self._apt_cache_show([package])[package] + apt_result['name'] = apt_result.pop('package') + pkg = Package(apt_result) + dpkg_result = self._dpkg_list([package]).get(package, {}) + current_ver = None + installed_version = dpkg_result.get('version') + if installed_version: + current_ver = Version({'ver_str': installed_version}) + pkg.current_ver = current_ver + pkg.architecture = dpkg_result.get('architecture') + return pkg + + def _dpkg_list(self, packages): + """Get data from system dpkg database for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about installed packages, keys like + ``dpkg-query --list`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['dpkg-query', '--list'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + except subprocess.CalledProcessError as cp: + # ``dpkg-query`` may return error and at the same time have + # produced useful output, for example when asked for multiple + # packages where some are not installed + if cp.returncode != 1: + raise + output = cp.output + headings = [] + for line in output.splitlines(): + if line.startswith('||/'): + headings = line.split() + headings.pop(0) + continue + elif (line.startswith('|') or line.startswith('+') or + line.startswith('dpkg-query:')): + continue + else: + data = line.split(None, 4) + status = data.pop(0) + if status != 'ii': + continue + pkg = {} + pkg.update({k.lower(): v for k, v in zip(headings, data)}) + if 'name' in pkg: + pkgs.update({pkg['name']: pkg}) + return pkgs + + def _apt_cache_show(self, packages): + """Get data from system apt cache for package. + + :param packages: Packages to get data from + :type packages: List[str] + :returns: Structured data about package, keys like + ``apt-cache show`` + :rtype: dict + :raises: subprocess.CalledProcessError + """ + pkgs = {} + cmd = ['apt-cache', 'show', '--no-all-versions'] + cmd.extend(packages) + if locale.getlocale() == (None, None): + # subprocess calls out to locale.getpreferredencoding(False) to + # determine encoding. Workaround for Trusty where the + # environment appears to not be set up correctly. + locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') + try: + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + previous = None + pkg = {} + for line in output.splitlines(): + if not line: + if 'package' in pkg: + pkgs.update({pkg['package']: pkg}) + pkg = {} + continue + if line.startswith(' '): + if previous and previous in pkg: + pkg[previous] += os.linesep + line.lstrip() + continue + if ':' in line: + kv = line.split(':', 1) + key = kv[0].lower() + if key == 'n': + continue + previous = key + pkg.update({key: kv[1].lstrip()}) + except subprocess.CalledProcessError as cp: + # ``apt-cache`` returns 100 if none of the packages asked for + # exist in the apt cache. + if cp.returncode != 100: + raise + return pkgs + + +class Config(_container): + def __init__(self): + super(Config, self).__init__(self._populate()) + + def _populate(self): + cfgs = {} + cmd = ['apt-config', 'dump'] + output = subprocess.check_output(cmd, + stderr=subprocess.STDOUT, + universal_newlines=True) + for line in output.splitlines(): + if not line.startswith("CommandLine"): + k, v = line.split(" ", 1) + cfgs[k] = v.strip(";").strip("\"") + + return cfgs + + +# Backwards compatibility with old apt_pkg module +sys.modules[__name__].config = Config() + + +def init(): + """Compability shim that does nothing.""" + pass + + +def upstream_version(version): + """Extracts upstream version from a version string. + + Upstream reference: https://salsa.debian.org/apt-team/apt/blob/master/ + apt-pkg/deb/debversion.cc#L259 + + :param version: Version string + :type version: str + :returns: Upstream version + :rtype: str + """ + if version: + version = version.split(':')[-1] + version = version.split('-')[0] + return version + + +def version_compare(a, b): + """Compare the given versions. + + Call out to ``dpkg`` to make sure the code doing the comparison is + compatible with what the ``apt`` library would do. Mimic the return + values. + + Upstream reference: + https://apt-team.pages.debian.net/python-apt/library/apt_pkg.html + ?highlight=version_compare#apt_pkg.version_compare + + :param a: version string + :type a: str + :param b: version string + :type b: str + :returns: >0 if ``a`` is greater than ``b``, 0 if a equals b, + <0 if ``a`` is smaller than ``b`` + :rtype: int + :raises: subprocess.CalledProcessError, RuntimeError + """ + for op in ('gt', 1), ('eq', 0), ('lt', -1): + try: + subprocess.check_call(['dpkg', '--compare-versions', + a, op[0], b], + stderr=subprocess.STDOUT, + universal_newlines=True) + return op[1] + except subprocess.CalledProcessError as cp: + if cp.returncode == 1: + continue + raise + else: + raise RuntimeError('Unable to compare "{}" and "{}", according to ' + 'our logic they are neither greater, equal nor ' + 'less than each other.') diff --git a/charmhelpers/osplatform.py b/charmhelpers/osplatform.py index d9a4d5c..78c81af 100644 --- a/charmhelpers/osplatform.py +++ b/charmhelpers/osplatform.py @@ -1,4 +1,5 @@ import platform +import os def get_platform(): @@ -9,9 +10,13 @@ def get_platform(): This string is used to decide which platform module should be imported. """ # linux_distribution is deprecated and will be removed in Python 3.7 - # Warings *not* disabled, as we certainly need to fix this. - tuple_platform = platform.linux_distribution() - current_platform = tuple_platform[0] + # Warnings *not* disabled, as we certainly need to fix this. + if hasattr(platform, 'linux_distribution'): + tuple_platform = platform.linux_distribution() + current_platform = tuple_platform[0] + else: + current_platform = _get_platform_from_fs() + if "Ubuntu" in current_platform: return "ubuntu" elif "CentOS" in current_platform: @@ -20,6 +25,22 @@ def get_platform(): # Stock Python does not detect Ubuntu and instead returns debian. # Or at least it does in some build environments like Travis CI return "ubuntu" + elif "elementary" in current_platform: + # ElementaryOS fails to run tests locally without this. + return "ubuntu" else: raise RuntimeError("This module is not supported on {}." .format(current_platform)) + + +def _get_platform_from_fs(): + """Get Platform from /etc/os-release.""" + with open(os.path.join(os.sep, 'etc', 'os-release')) as fin: + content = dict( + line.split('=', 1) + for line in fin.read().splitlines() + if '=' in line + ) + for k, v in content.items(): + content[k] = v.strip('"') + return content["NAME"] |