diff options
| author | Mario Splivalo <mario.splivalo@canonical.com> | 2016-11-03 14:26:40 +0100 |
|---|---|---|
| committer | Mario Splivalo <mario.splivalo@canonical.com> | 2016-11-03 14:26:40 +0100 |
| commit | 4e4b48ac1a0201056c5f39e25ce354eec03c78b9 (patch) | |
| tree | 87d886b60e5229cc74823d01ce530afbbb69137d | |
| parent | 9e3a8e056ac0acec8c57e983c8e49c13e78614af (diff) | |
| parent | 0b7b328a92c521d6115019b1ea02108cd7ca4ecb (diff) | |
Merged latest lp:charms/trusty/mongodb
| -rw-r--r-- | charmhelpers/contrib/python/packages.py | 29 | ||||
| -rw-r--r-- | charmhelpers/core/hookenv.py | 31 | ||||
| -rw-r--r-- | charmhelpers/core/host.py | 212 | ||||
| -rw-r--r-- | charmhelpers/fetch/__init__.py | 8 | ||||
| -rw-r--r-- | charmhelpers/fetch/giturl.py | 4 | ||||
| -rwxr-xr-x | tests/03_deploy_replicaset.py | 96 |
6 files changed, 282 insertions, 98 deletions
diff --git a/charmhelpers/contrib/python/packages.py b/charmhelpers/contrib/python/packages.py index 8dcd6dd..a2411c3 100644 --- a/charmhelpers/contrib/python/packages.py +++ b/charmhelpers/contrib/python/packages.py @@ -19,20 +19,35 @@ import os import subprocess +import sys from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import charm_dir, log -try: - from pip import main as pip_execute -except ImportError: - apt_update() - apt_install('python-pip') - from pip import main as pip_execute - __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" +def pip_execute(*args, **kwargs): + """Overriden pip_execute() to stop sys.path being changed. + + The act of importing main from the pip module seems to cause add wheels + from the /usr/share/python-wheels which are installed by various tools. + This function ensures that sys.path remains the same after the call is + executed. + """ + try: + _path = sys.path + try: + from pip import main as _pip_execute + except ImportError: + apt_update() + apt_install('python-pip') + from pip import main as _pip_execute + _pip_execute(*args, **kwargs) + finally: + sys.path = _path + + def parse_options(given, available): """Given a set of options, check if available""" for key, value in sorted(given.items()): diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index 2dd70bc..0132129 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -912,6 +912,24 @@ def payload_status_set(klass, pid, status): subprocess.check_call(cmd) +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def resource_get(name): + """used to fetch the resource path of the given name. + + <name> must match a name of defined resource in metadata.yaml + + returns either a path or False if resource not available + """ + if not name: + return False + + cmd = ['resource-get', name] + try: + return subprocess.check_output(cmd).decode('UTF-8') + except subprocess.CalledProcessError: + return False + + @cached def juju_version(): """Full version string (eg. '1.23.3.1-trusty-amd64')""" @@ -976,3 +994,16 @@ def _run_atexit(): for callback, args, kwargs in reversed(_atexit): callback(*args, **kwargs) del _atexit[:] + + +@translate_exc(from_exc=OSError, to_exc=NotImplementedError) +def network_get_primary_address(binding): + ''' + Retrieve the primary network address for a named binding + + :param binding: string. The name of a relation of extra-binding + :return: string. The primary IP address for the named binding + :raise: NotImplementedError if run on Juju < 2.0 + ''' + cmd = ['network-get', '--primary-address', binding] + return subprocess.check_output(cmd).strip() diff --git a/charmhelpers/core/host.py b/charmhelpers/core/host.py index 710fdab..e367e45 100644 --- a/charmhelpers/core/host.py +++ b/charmhelpers/core/host.py @@ -30,6 +30,8 @@ import random import string import subprocess import hashlib +import functools +import itertools from contextlib import contextmanager from collections import OrderedDict @@ -126,22 +128,31 @@ def service(action, service_name): return subprocess.call(cmd) == 0 +_UPSTART_CONF = "/etc/init/{}.conf" +_INIT_D_CONF = "/etc/init.d/{}" + + def service_running(service_name): """Determine whether a system service is running""" if init_is_systemd(): return service('is-active', service_name) else: - try: - output = subprocess.check_output( - ['service', service_name, 'status'], - stderr=subprocess.STDOUT).decode('UTF-8') - except subprocess.CalledProcessError: - return False - else: - if ("start/running" in output or "is running" in output): - return True - else: + if os.path.exists(_UPSTART_CONF.format(service_name)): + try: + output = subprocess.check_output( + ['status', service_name], + stderr=subprocess.STDOUT).decode('UTF-8') + except subprocess.CalledProcessError: return False + else: + # This works for upstart scripts where the 'service' command + # returns a consistent string to represent running 'start/running' + if "start/running" in output: + return True + elif os.path.exists(_INIT_D_CONF.format(service_name)): + # Check System V scripts init script return codes + return service('status', service_name) + return False def service_available(service_name): @@ -160,13 +171,13 @@ SYSTEMD_SYSTEM = '/run/systemd/system' def init_is_systemd(): + """Return True if the host system uses systemd, False otherwise.""" return os.path.isdir(SYSTEMD_SYSTEM) def adduser(username, password=None, shell='/bin/bash', system_user=False, - primary_group=None, secondary_groups=None): - """ - Add a user to the system. + primary_group=None, secondary_groups=None, uid=None): + """Add a user to the system. Will log but otherwise succeed if the user already exists. @@ -174,17 +185,23 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False, :param str password: Password for user; if ``None``, create a system user :param str shell: The default shell for the user :param bool system_user: Whether to create a login or system user - :param str primary_group: Primary group for user; defaults to their username + :param str primary_group: Primary group for user; defaults to username :param list secondary_groups: Optional list of additional groups + :param int uid: UID for user being created :returns: The password database entry struct, as returned by `pwd.getpwnam` """ try: user_info = pwd.getpwnam(username) log('user {0} already exists!'.format(username)) + if uid: + user_info = pwd.getpwuid(int(uid)) + log('user with uid {0} already exists!'.format(uid)) except KeyError: log('creating user {0}'.format(username)) cmd = ['useradd'] + if uid: + cmd.extend(['--uid', str(uid)]) if system_user or password is None: cmd.append('--system') else: @@ -219,14 +236,58 @@ def user_exists(username): return user_exists -def add_group(group_name, system_group=False): - """Add a group to the system""" +def uid_exists(uid): + """Check if a uid exists""" + try: + pwd.getpwuid(uid) + uid_exists = True + except KeyError: + uid_exists = False + return uid_exists + + +def group_exists(groupname): + """Check if a group exists""" + try: + grp.getgrnam(groupname) + group_exists = True + except KeyError: + group_exists = False + return group_exists + + +def gid_exists(gid): + """Check if a gid exists""" + try: + grp.getgrgid(gid) + gid_exists = True + except KeyError: + gid_exists = False + return gid_exists + + +def add_group(group_name, system_group=False, gid=None): + """Add a group to the system + + Will log but otherwise succeed if the group already exists. + + :param str group_name: group to create + :param bool system_group: Create system group + :param int gid: GID for user being created + + :returns: The password database entry struct, as returned by `grp.getgrnam` + """ try: group_info = grp.getgrnam(group_name) log('group {0} already exists!'.format(group_name)) + if gid: + group_info = grp.getgrgid(gid) + log('group with gid {0} already exists!'.format(gid)) except KeyError: log('creating group {0}'.format(group_name)) cmd = ['addgroup'] + if gid: + cmd.extend(['--gid', str(gid)]) if system_group: cmd.append('--system') else: @@ -300,14 +361,12 @@ def write_file(path, content, owner='root', group='root', perms=0o444): def fstab_remove(mp): - """Remove the given mountpoint entry from /etc/fstab - """ + """Remove the given mountpoint entry from /etc/fstab""" return Fstab.remove_by_mountpoint(mp) def fstab_add(dev, mp, fs, options=None): - """Adds the given device entry to the /etc/fstab file - """ + """Adds the given device entry to the /etc/fstab file""" return Fstab.add(dev, mp, fs, options=options) @@ -363,8 +422,7 @@ def fstab_mount(mountpoint): def file_hash(path, hash_type='md5'): - """ - Generate a hash checksum of the contents of 'path' or None if not found. + """Generate a hash checksum of the contents of 'path' or None if not found. :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. @@ -379,10 +437,9 @@ def file_hash(path, hash_type='md5'): def path_hash(path): - """ - Generate a hash checksum of all files matching 'path'. Standard wildcards - like '*' and '?' are supported, see documentation for the 'glob' module for - more information. + """Generate a hash checksum of all files matching 'path'. Standard + wildcards like '*' and '?' are supported, see documentation for the 'glob' + module for more information. :return: dict: A { filename: hash } dictionary for all matched files. Empty if none found. @@ -394,8 +451,7 @@ def path_hash(path): def check_hash(path, checksum, hash_type='md5'): - """ - Validate a file using a cryptographic checksum. + """Validate a file using a cryptographic checksum. :param str checksum: Value of the checksum used to validate the file. :param str hash_type: Hash algorithm used to generate `checksum`. @@ -410,10 +466,11 @@ def check_hash(path, checksum, hash_type='md5'): class ChecksumError(ValueError): + """A class derived from Value error to indicate the checksum failed.""" pass -def restart_on_change(restart_map, stopstart=False): +def restart_on_change(restart_map, stopstart=False, restart_functions=None): """Restart services based on configuration files changing This function is used a decorator, for example:: @@ -431,27 +488,58 @@ def restart_on_change(restart_map, stopstart=False): restarted if any file matching the pattern got changed, created or removed. Standard wildcards are supported, see documentation for the 'glob' module for more information. + + @param restart_map: {path_file_name: [service_name, ...] + @param stopstart: DEFAULT false; whether to stop, start OR restart + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result from decorated function """ def wrap(f): + @functools.wraps(f) def wrapped_f(*args, **kwargs): - checksums = {path: path_hash(path) for path in restart_map} - f(*args, **kwargs) - restarts = [] - for path in restart_map: - if path_hash(path) != checksums[path]: - restarts += restart_map[path] - services_list = list(OrderedDict.fromkeys(restarts)) - if not stopstart: - for service_name in services_list: - service('restart', service_name) - else: - for action in ['stop', 'start']: - for service_name in services_list: - service(action, service_name) + return restart_on_change_helper( + (lambda: f(*args, **kwargs)), restart_map, stopstart, + restart_functions) return wrapped_f return wrap +def restart_on_change_helper(lambda_f, restart_map, stopstart=False, + restart_functions=None): + """Helper function to perform the restart_on_change function. + + This is provided for decorators to restart services if files described + in the restart_map have changed after an invocation of lambda_f(). + + @param lambda_f: function to call. + @param restart_map: {file: [service, ...]} + @param stopstart: whether to stop, start or restart a service + @param restart_functions: nonstandard functions to use to restart services + {svc: func, ...} + @returns result of lambda_f() + """ + if restart_functions is None: + restart_functions = {} + checksums = {path: path_hash(path) for path in restart_map} + r = lambda_f() + # create a list of lists of the services to restart + restarts = [restart_map[path] + for path in restart_map + if path_hash(path) != checksums[path]] + # create a flat list of ordered services without duplicates from lists + services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts))) + if services_list: + actions = ('stop', 'start') if stopstart else ('restart',) + for service_name in services_list: + if service_name in restart_functions: + restart_functions[service_name](service_name) + else: + for action in actions: + service(action, service_name) + return r + + def lsb_release(): """Return /etc/lsb-release in a dict""" d = {} @@ -515,7 +603,7 @@ def get_bond_master(interface): def list_nics(nic_type=None): - '''Return a list of nics of given type(s)''' + """Return a list of nics of given type(s)""" if isinstance(nic_type, six.string_types): int_types = [nic_type] else: @@ -557,12 +645,13 @@ def list_nics(nic_type=None): def set_nic_mtu(nic, mtu): - '''Set MTU on a network interface''' + """Set the Maximum Transmission Unit (MTU) on a network interface.""" cmd = ['ip', 'link', 'set', nic, 'mtu', mtu] subprocess.check_call(cmd) def get_nic_mtu(nic): + """Return the Maximum Transmission Unit (MTU) for a network interface.""" cmd = ['ip', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') mtu = "" @@ -574,6 +663,7 @@ def get_nic_mtu(nic): def get_nic_hwaddr(nic): + """Return the Media Access Control (MAC) for a network interface.""" cmd = ['ip', '-o', '-0', 'addr', 'show', nic] ip_output = subprocess.check_output(cmd).decode('UTF-8') hwaddr = "" @@ -584,7 +674,7 @@ def get_nic_hwaddr(nic): def cmp_pkgrevno(package, revno, pkgcache=None): - '''Compare supplied revno with the revno of the installed package + """Compare supplied revno with the revno of the installed package * 1 => Installed revno is greater than supplied arg * 0 => Installed revno is the same as supplied arg @@ -593,7 +683,7 @@ def cmp_pkgrevno(package, revno, pkgcache=None): This function imports apt_cache function from charmhelpers.fetch if the pkgcache argument is None. Be sure to add charmhelpers.fetch if you call this function, or pass an apt_pkg.Cache() instance. - ''' + """ import apt_pkg if not pkgcache: from charmhelpers.fetch import apt_cache @@ -603,19 +693,27 @@ def cmp_pkgrevno(package, revno, pkgcache=None): @contextmanager -def chdir(d): +def chdir(directory): + """Change the current working directory to a different directory for a code + block and return the previous directory after the block exits. Useful to + run commands from a specificed directory. + + :param str directory: The directory path to change to for this context. + """ cur = os.getcwd() try: - yield os.chdir(d) + yield os.chdir(directory) finally: os.chdir(cur) def chownr(path, owner, group, follow_links=True, chowntopdir=False): - """ - Recursively change user and group ownership of files and directories + """Recursively change user and group ownership of files and directories in given path. Doesn't chown path itself by default, only its children. + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. :param bool follow_links: Also Chown links if True :param bool chowntopdir: Also chown path itself if True """ @@ -639,15 +737,23 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False): def lchownr(path, owner, group): + """Recursively change user and group ownership of files and directories + in a given path, not following symbolic links. See the documentation for + 'os.lchown' for more information. + + :param str path: The string path to start changing ownership. + :param str owner: The owner string to use when looking up the uid. + :param str group: The group string to use when looking up the gid. + """ chownr(path, owner, group, follow_links=False) def get_total_ram(): - '''The total amount of system RAM in bytes. + """The total amount of system RAM in bytes. This is what is reported by the OS, and may be overcommitted when there are multiple containers hosted on the same machine. - ''' + """ with open('/proc/meminfo', 'r') as f: for line in f.readlines(): if line: diff --git a/charmhelpers/fetch/__init__.py b/charmhelpers/fetch/__init__.py index db0d86a..ad485ec 100644 --- a/charmhelpers/fetch/__init__.py +++ b/charmhelpers/fetch/__init__.py @@ -106,6 +106,14 @@ CLOUD_ARCHIVE_POCKETS = { 'mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-mitaka/proposed': 'trusty-proposed/mitaka', 'trusty-proposed/mitaka': 'trusty-proposed/mitaka', + # Newton + 'newton': 'xenial-updates/newton', + 'xenial-newton': 'xenial-updates/newton', + 'xenial-newton/updates': 'xenial-updates/newton', + 'xenial-updates/newton': 'xenial-updates/newton', + 'newton/proposed': 'xenial-proposed/newton', + 'xenial-newton/proposed': 'xenial-proposed/newton', + 'xenial-proposed/newton': 'xenial-proposed/newton', } # The order of this list is very important. Handlers should be listed in from diff --git a/charmhelpers/fetch/giturl.py b/charmhelpers/fetch/giturl.py index 9ad8dc6..65ed531 100644 --- a/charmhelpers/fetch/giturl.py +++ b/charmhelpers/fetch/giturl.py @@ -15,7 +15,7 @@ # along with charm-helpers. If not, see <http://www.gnu.org/licenses/>. import os -from subprocess import check_call +from subprocess import check_call, CalledProcessError from charmhelpers.fetch import ( BaseFetchHandler, UnhandledSource, @@ -63,6 +63,8 @@ class GitUrlFetchHandler(BaseFetchHandler): branch_name) try: self.clone(source, dest_dir, branch, depth) + except CalledProcessError as e: + raise UnhandledSource(e) except OSError as e: raise UnhandledSource(e.strerror) return dest_dir diff --git a/tests/03_deploy_replicaset.py b/tests/03_deploy_replicaset.py index cae5930..d7120a3 100755 --- a/tests/03_deploy_replicaset.py +++ b/tests/03_deploy_replicaset.py @@ -46,39 +46,12 @@ sentry_dict = { ############################################################# -# Check presence of MongoDB GUI HEALTH Status +# Test Utilities ############################################################# -def validate_status_interface(): - pubaddy = d.sentry['mongodb'][0].info['public-address'] - fmt = "http://{}:28017" - if ":" in pubaddy: - fmt = "http://[{}]:28017" - r = requests.get(fmt.format(pubaddy), verify=False) - r.raise_for_status - - -############################################################# -# Validate that each unit has an active mongo service -############################################################# -def validate_running_services(): - for service in sentry_dict: - output = sentry_dict[service].run('service mongodb status') - service_active = str(output).find('active (running)') - if service_active == -1: - message = "Failed to find running MongoDB on host {}".format( - service) - amulet.raise_status(amulet.SKIP, msg=message) - - -############################################################# -# Validate proper replicaset setup -############################################################# -def validate_replicaset_setup(): - - d.sentry.wait(seconds) - +def _expect_replicaset_counts(primaries_count, + secondaries_count, + time_between=10): unit_status = [] - time_between = 10 tries = wait_for_replicaset / time_between for service in sentry_dict: @@ -107,19 +80,67 @@ def validate_replicaset_setup(): client.close() primaries = Counter(unit_status)[1] - if primaries != 1: - message = "Only one PRIMARY unit allowed! Found: %s %s" % (primaries, - unit_status) + if primaries != primaries_count: + message = "Expected %d PRIMARY unit(s)! Found: %s %s" % ( + primaries_count, + primaries, + unit_status) amulet.raise_status(amulet.FAIL, message) secondrs = Counter(unit_status)[2] - if secondrs != 2: - message = ("Only two SECONDARY units allowed! (Found %s) %s" % - (secondrs, unit_status)) + if secondrs != secondaries_count: + message = ("Expected %d secondary units! (Found %s) %s" % + (secondaries_count, secondrs, unit_status)) amulet.raise_status(amulet.FAIL, message) ############################################################# +# Check presence of MongoDB GUI HEALTH Status +############################################################# +def validate_status_interface(): + pubaddy = d.sentry['mongodb'][0].info['public-address'] + fmt = "http://{}:28017" + if ":" in pubaddy: + fmt = "http://[{}]:28017" + r = requests.get(fmt.format(pubaddy), verify=False) + r.raise_for_status + + +############################################################# +# Validate that each unit has an active mongo service +############################################################# +def validate_running_services(): + for service in sentry_dict: + output = sentry_dict[service].run('service mongodb status') + service_active = str(output).find('mongodb start/running') + if series='xenial': + service_active = str(output).find('active (running)') + if service_active == -1: + message = "Failed to find running MongoDB on host {}".format( + service) + amulet.raise_status(amulet.SKIP, msg=message) + + +############################################################# +# Validate proper replicaset setup +############################################################# +def validate_replicaset_setup(): + d.sentry.wait(seconds) + _expect_replicaset_counts(1, 2) + + +############################################################# +# Validate replicaset joined +############################################################# +def validate_replicaset_relation_joined(): + d.add_unit('mongodb', units=2) + d.sentry.wait(wait_for_replicaset) + sentry_dict.update({'mongodb3-sentry': d.sentry['mongodb'][3], + 'mongodb4-sentry': d.sentry['mongodb'][4]}) + _expect_replicaset_counts(1, 4) + + +############################################################# # Validate connectivity from $WORLD ############################################################# def validate_world_connectivity(): @@ -165,4 +186,5 @@ def validate_world_connectivity(): validate_status_interface() validate_running_services() validate_replicaset_setup() +validate_replicaset_relation_joined() validate_world_connectivity() |
