summaryrefslogtreecommitdiff
diff options
authorCharles Butler <chuck@dasroot.net>2014-07-08 14:53:07 -0400
committerCharles Butler <chuck@dasroot.net>2014-07-08 14:53:07 -0400
commit0b761f954ca0579c3426c99342a44fdcd187ef25 (patch)
treeb87d37c49144a341a2b7ad6a9bb7b096e40719f5
parent5f28826e1c1d30a5af4480277396d2b82bb6b8a9 (diff)
Adds charmhelpers, and refactors to green light deployments
-rw-r--r--.bzrignore1
-rw-r--r--Makefile10
-rw-r--r--__init__.py0
-rw-r--r--bin/charm_helpers_sync.py225
-rw-r--r--hooks/charmhelpers/core/fstab.py116
-rw-r--r--hooks/charmhelpers/core/hookenv.py108
-rw-r--r--hooks/charmhelpers/core/host.py46
-rw-r--r--hooks/charmhelpers/fetch/__init__.py211
-rw-r--r--hooks/charmhelpers/fetch/bzrurl.py3
-rwxr-xr-xhooks/hooks.py308
-rw-r--r--metadata.yaml7
-rwxr-xr-xscripts/charm-helpers-sync225
12 files changed, 920 insertions, 340 deletions
diff --git a/.bzrignore b/.bzrignore
new file mode 100644
index 0000000..6b8710a
--- /dev/null
+++ b/.bzrignore
@@ -0,0 +1 @@
+.git
diff --git a/Makefile b/Makefile
index 84be4d0..2dee964 100644
--- a/Makefile
+++ b/Makefile
@@ -13,9 +13,17 @@
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
+PYTHON := /usr/bin/env python
unittest:
tests/10-unit.test
sync:
- @charm-helper-sync -c charm-helpers-sync.yaml
+ @mkdir -p bin
+ @bzr cat lp:charm-helpers/tools/charm_helpers_sync/charm_helpers_sync.py > bin/charm_helpers_sync.py
+ @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-sync.yaml
+
+clean:
+ @find . -name \*.pyc -delete
+ @find . -name '*.bak' -delete
+
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/__init__.py
diff --git a/bin/charm_helpers_sync.py b/bin/charm_helpers_sync.py
new file mode 100644
index 0000000..03bf64d
--- /dev/null
+++ b/bin/charm_helpers_sync.py
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+#
+# Copyright 2013 Canonical Ltd.
+
+# Authors:
+# Adam Gandelman <adamg@ubuntu.com>
+#
+
+import logging
+import optparse
+import os
+import subprocess
+import shutil
+import sys
+import tempfile
+import yaml
+
+from fnmatch import fnmatch
+
+CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
+
+
+def parse_config(conf_file):
+ if not os.path.isfile(conf_file):
+ logging.error('Invalid config file: %s.' % conf_file)
+ return False
+ return yaml.load(open(conf_file).read())
+
+
+def clone_helpers(work_dir, branch):
+ dest = os.path.join(work_dir, 'charm-helpers')
+ logging.info('Checking out %s to %s.' % (branch, dest))
+ cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
+ subprocess.check_call(cmd)
+ return dest
+
+
+def _module_path(module):
+ return os.path.join(*module.split('.'))
+
+
+def _src_path(src, module):
+ return os.path.join(src, 'charmhelpers', _module_path(module))
+
+
+def _dest_path(dest, module):
+ return os.path.join(dest, _module_path(module))
+
+
+def _is_pyfile(path):
+ return os.path.isfile(path + '.py')
+
+
+def ensure_init(path):
+ '''
+ ensure directories leading up to path are importable, omitting
+ parent directory, eg path='/hooks/helpers/foo'/:
+ hooks/
+ hooks/helpers/__init__.py
+ hooks/helpers/foo/__init__.py
+ '''
+ for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
+ _i = os.path.join(d, '__init__.py')
+ if not os.path.exists(_i):
+ logging.info('Adding missing __init__.py: %s' % _i)
+ open(_i, 'wb').close()
+
+
+def sync_pyfile(src, dest):
+ src = src + '.py'
+ src_dir = os.path.dirname(src)
+ logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
+ if not os.path.exists(dest):
+ os.makedirs(dest)
+ shutil.copy(src, dest)
+ if os.path.isfile(os.path.join(src_dir, '__init__.py')):
+ shutil.copy(os.path.join(src_dir, '__init__.py'),
+ dest)
+ ensure_init(dest)
+
+
+def get_filter(opts=None):
+ opts = opts or []
+ if 'inc=*' in opts:
+ # do not filter any files, include everything
+ return None
+
+ def _filter(dir, ls):
+ incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
+ _filter = []
+ for f in ls:
+ _f = os.path.join(dir, f)
+
+ if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
+ if True not in [fnmatch(_f, inc) for inc in incs]:
+ logging.debug('Not syncing %s, does not match include '
+ 'filters (%s)' % (_f, incs))
+ _filter.append(f)
+ else:
+ logging.debug('Including file, which matches include '
+ 'filters (%s): %s' % (incs, _f))
+ elif (os.path.isfile(_f) and not _f.endswith('.py')):
+ logging.debug('Not syncing file: %s' % f)
+ _filter.append(f)
+ elif (os.path.isdir(_f) and not
+ os.path.isfile(os.path.join(_f, '__init__.py'))):
+ logging.debug('Not syncing directory: %s' % f)
+ _filter.append(f)
+ return _filter
+ return _filter
+
+
+def sync_directory(src, dest, opts=None):
+ if os.path.exists(dest):
+ logging.debug('Removing existing directory: %s' % dest)
+ shutil.rmtree(dest)
+ logging.info('Syncing directory: %s -> %s.' % (src, dest))
+
+ shutil.copytree(src, dest, ignore=get_filter(opts))
+ ensure_init(dest)
+
+
+def sync(src, dest, module, opts=None):
+ if os.path.isdir(_src_path(src, module)):
+ sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
+ elif _is_pyfile(_src_path(src, module)):
+ sync_pyfile(_src_path(src, module),
+ os.path.dirname(_dest_path(dest, module)))
+ else:
+ logging.warn('Could not sync: %s. Neither a pyfile or directory, '
+ 'does it even exist?' % module)
+
+
+def parse_sync_options(options):
+ if not options:
+ return []
+ return options.split(',')
+
+
+def extract_options(inc, global_options=None):
+ global_options = global_options or []
+ if global_options and isinstance(global_options, basestring):
+ global_options = [global_options]
+ if '|' not in inc:
+ return (inc, global_options)
+ inc, opts = inc.split('|')
+ return (inc, parse_sync_options(opts) + global_options)
+
+
+def sync_helpers(include, src, dest, options=None):
+ if not os.path.isdir(dest):
+ os.makedirs(dest)
+
+ global_options = parse_sync_options(options)
+
+ for inc in include:
+ if isinstance(inc, str):
+ inc, opts = extract_options(inc, global_options)
+ sync(src, dest, inc, opts)
+ elif isinstance(inc, dict):
+ # could also do nested dicts here.
+ for k, v in inc.iteritems():
+ if isinstance(v, list):
+ for m in v:
+ inc, opts = extract_options(m, global_options)
+ sync(src, dest, '%s.%s' % (k, inc), opts)
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser()
+ parser.add_option('-c', '--config', action='store', dest='config',
+ default=None, help='helper config file')
+ parser.add_option('-D', '--debug', action='store_true', dest='debug',
+ default=False, help='debug')
+ parser.add_option('-b', '--branch', action='store', dest='branch',
+ help='charm-helpers bzr branch (overrides config)')
+ parser.add_option('-d', '--destination', action='store', dest='dest_dir',
+ help='sync destination dir (overrides config)')
+ (opts, args) = parser.parse_args()
+
+ if opts.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ if opts.config:
+ logging.info('Loading charm helper config from %s.' % opts.config)
+ config = parse_config(opts.config)
+ if not config:
+ logging.error('Could not parse config from %s.' % opts.config)
+ sys.exit(1)
+ else:
+ config = {}
+
+ if 'branch' not in config:
+ config['branch'] = CHARM_HELPERS_BRANCH
+ if opts.branch:
+ config['branch'] = opts.branch
+ if opts.dest_dir:
+ config['destination'] = opts.dest_dir
+
+ if 'destination' not in config:
+ logging.error('No destination dir. specified as option or config.')
+ sys.exit(1)
+
+ if 'include' not in config:
+ if not args:
+ logging.error('No modules to sync specified as option or config.')
+ sys.exit(1)
+ config['include'] = []
+ [config['include'].append(a) for a in args]
+
+ sync_options = None
+ if 'options' in config:
+ sync_options = config['options']
+ tmpd = tempfile.mkdtemp()
+ try:
+ checkout = clone_helpers(tmpd, config['branch'])
+ sync_helpers(config['include'], checkout, config['destination'],
+ options=sync_options)
+ except Exception, e:
+ logging.error("Could not sync: %s" % e)
+ raise e
+ finally:
+ logging.debug('Cleaning up %s' % tmpd)
+ shutil.rmtree(tmpd)
diff --git a/hooks/charmhelpers/core/fstab.py b/hooks/charmhelpers/core/fstab.py
new file mode 100644
index 0000000..cfaf0a6
--- /dev/null
+++ b/hooks/charmhelpers/core/fstab.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# -*- coding: utf-8 -*-
+
+__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
+
+import os
+
+
+class Fstab(file):
+ """This class extends file in order to implement a file reader/writer
+ for file `/etc/fstab`
+ """
+
+ class Entry(object):
+ """Entry class represents a non-comment line on the `/etc/fstab` file
+ """
+ def __init__(self, device, mountpoint, filesystem,
+ options, d=0, p=0):
+ self.device = device
+ self.mountpoint = mountpoint
+ self.filesystem = filesystem
+
+ if not options:
+ options = "defaults"
+
+ self.options = options
+ self.d = d
+ self.p = p
+
+ def __eq__(self, o):
+ return str(self) == str(o)
+
+ def __str__(self):
+ return "{} {} {} {} {} {}".format(self.device,
+ self.mountpoint,
+ self.filesystem,
+ self.options,
+ self.d,
+ self.p)
+
+ DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
+
+ def __init__(self, path=None):
+ if path:
+ self._path = path
+ else:
+ self._path = self.DEFAULT_PATH
+ file.__init__(self, self._path, 'r+')
+
+ def _hydrate_entry(self, line):
+ # NOTE: use split with no arguments to split on any
+ # whitespace including tabs
+ return Fstab.Entry(*filter(
+ lambda x: x not in ('', None),
+ line.strip("\n").split()))
+
+ @property
+ def entries(self):
+ self.seek(0)
+ for line in self.readlines():
+ try:
+ if not line.startswith("#"):
+ yield self._hydrate_entry(line)
+ except ValueError:
+ pass
+
+ def get_entry_by_attr(self, attr, value):
+ for entry in self.entries:
+ e_attr = getattr(entry, attr)
+ if e_attr == value:
+ return entry
+ return None
+
+ def add_entry(self, entry):
+ if self.get_entry_by_attr('device', entry.device):
+ return False
+
+ self.write(str(entry) + '\n')
+ self.truncate()
+ return entry
+
+ def remove_entry(self, entry):
+ self.seek(0)
+
+ lines = self.readlines()
+
+ found = False
+ for index, line in enumerate(lines):
+ if not line.startswith("#"):
+ if self._hydrate_entry(line) == entry:
+ found = True
+ break
+
+ if not found:
+ return False
+
+ lines.remove(line)
+
+ self.seek(0)
+ self.write(''.join(lines))
+ self.truncate()
+ return True
+
+ @classmethod
+ def remove_by_mountpoint(cls, mountpoint, path=None):
+ fstab = cls(path=path)
+ entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
+ if entry:
+ return fstab.remove_entry(entry)
+ return False
+
+ @classmethod
+ def add(cls, device, mountpoint, filesystem, options=None, path=None):
+ return cls(path=path).add_entry(Fstab.Entry(device,
+ mountpoint, filesystem,
+ options=options))
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 505c202..c953043 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -25,7 +25,7 @@ cache = {}
def cached(func):
"""Cache return values for multiple executions of func + args
- For example:
+ For example::
@cached
def unit_get(attribute):
@@ -155,6 +155,100 @@ def hook_name():
return os.path.basename(sys.argv[0])
+class Config(dict):
+ """A Juju charm config dictionary that can write itself to
+ disk (as json) and track which values have changed since
+ the previous hook invocation.
+
+ Do not instantiate this object directly - instead call
+ ``hookenv.config()``
+
+ Example usage::
+
+ >>> # inside a hook
+ >>> from charmhelpers.core import hookenv
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'bar'
+ >>> config['mykey'] = 'myval'
+ >>> config.save()
+
+
+ >>> # user runs `juju set mycharm foo=baz`
+ >>> # now we're inside subsequent config-changed hook
+ >>> config = hookenv.config()
+ >>> config['foo']
+ 'baz'
+ >>> # test to see if this val has changed since last hook
+ >>> config.changed('foo')
+ True
+ >>> # what was the previous value?
+ >>> config.previous('foo')
+ 'bar'
+ >>> # keys/values that we add are preserved across hooks
+ >>> config['mykey']
+ 'myval'
+ >>> # don't forget to save at the end of hook!
+ >>> config.save()
+
+ """
+ CONFIG_FILE_NAME = '.juju-persistent-config'
+
+ def __init__(self, *args, **kw):
+ super(Config, self).__init__(*args, **kw)
+ self._prev_dict = None
+ self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
+ if os.path.exists(self.path):
+ self.load_previous()
+
+ def load_previous(self, path=None):
+ """Load previous copy of config from disk so that current values
+ can be compared to previous values.
+
+ :param path:
+
+ File path from which to load the previous config. If `None`,
+ config is loaded from the default location. If `path` is
+ specified, subsequent `save()` calls will write to the same
+ path.
+
+ """
+ self.path = path or self.path
+ with open(self.path) as f:
+ self._prev_dict = json.load(f)
+
+ def changed(self, key):
+ """Return true if the value for this key has changed since
+ the last save.
+
+ """
+ if self._prev_dict is None:
+ return True
+ return self.previous(key) != self.get(key)
+
+ def previous(self, key):
+ """Return previous value for this key, or None if there
+ is no "previous" value.
+
+ """
+ if self._prev_dict:
+ return self._prev_dict.get(key)
+ return None
+
+ def save(self):
+ """Save this config to disk.
+
+ Preserves items in _prev_dict that do not exist in self.
+
+ """
+ if self._prev_dict:
+ for k, v in self._prev_dict.iteritems():
+ if k not in self:
+ self[k] = v
+ with open(self.path, 'w') as f:
+ json.dump(self, f)
+
+
@cached
def config(scope=None):
"""Juju charm configuration"""
@@ -163,7 +257,10 @@ def config(scope=None):
config_cmd_line.append(scope)
config_cmd_line.append('--format=json')
try:
- return json.loads(subprocess.check_output(config_cmd_line))
+ config_data = json.loads(subprocess.check_output(config_cmd_line))
+ if scope is not None:
+ return config_data
+ return Config(config_data)
except ValueError:
return None
@@ -348,18 +445,19 @@ class UnregisteredHookError(Exception):
class Hooks(object):
"""A convenient handler for hook functions.
- Example:
+ Example::
+
hooks = Hooks()
# register a hook, taking its name from the function name
@hooks.hook()
def install():
- ...
+ pass # your code here
# register a hook, providing a custom hook name
@hooks.hook("config-changed")
def config_changed():
- ...
+ pass # your code here
if __name__ == "__main__":
# execute a hook based on the name the program is called by
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index cfd2684..8b617a4 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -16,6 +16,7 @@ import hashlib
from collections import OrderedDict
from hookenv import log
+from fstab import Fstab
def service_start(service_name):
@@ -34,7 +35,8 @@ def service_restart(service_name):
def service_reload(service_name, restart_on_failure=False):
- """Reload a system service, optionally falling back to restart if reload fails"""
+ """Reload a system service, optionally falling back to restart if
+ reload fails"""
service_result = service('reload', service_name)
if not service_result and restart_on_failure:
service_result = service('restart', service_name)
@@ -143,7 +145,19 @@ def write_file(path, content, owner='root', group='root', perms=0444):
target.write(content)
-def mount(device, mountpoint, options=None, persist=False):
+def fstab_remove(mp):
+ """Remove the given mountpoint entry from /etc/fstab
+ """
+ return Fstab.remove_by_mountpoint(mp)
+
+
+def fstab_add(dev, mp, fs, options=None):
+ """Adds the given device entry to the /etc/fstab file
+ """
+ return Fstab.add(dev, mp, fs, options=options)
+
+
+def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
"""Mount a filesystem at a particular mountpoint"""
cmd_args = ['mount']
if options is not None:
@@ -154,9 +168,9 @@ def mount(device, mountpoint, options=None, persist=False):
except subprocess.CalledProcessError, e:
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
return False
+
if persist:
- # TODO: update fstab
- pass
+ return fstab_add(device, mountpoint, filesystem, options=options)
return True
@@ -168,9 +182,9 @@ def umount(mountpoint, persist=False):
except subprocess.CalledProcessError, e:
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
return False
+
if persist:
- # TODO: update fstab
- pass
+ return fstab_remove(mountpoint)
return True
@@ -197,13 +211,13 @@ def file_hash(path):
def restart_on_change(restart_map, stopstart=False):
"""Restart services based on configuration files changing
- This function is used a decorator, for example
+ This function is used a decorator, for example::
@restart_on_change({
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
})
def ceph_client_changed():
- ...
+ pass # your code here
In this example, the cinder-api and cinder-volume services
would be restarted if /etc/ceph/ceph.conf is changed by the
@@ -295,3 +309,19 @@ def get_nic_hwaddr(nic):
if 'link/ether' in words:
hwaddr = words[words.index('link/ether') + 1]
return hwaddr
+
+
+def cmp_pkgrevno(package, revno, pkgcache=None):
+ '''Compare supplied revno with the revno of the installed package
+
+ * 1 => Installed revno is greater than supplied arg
+ * 0 => Installed revno is the same as supplied arg
+ * -1 => Installed revno is less than supplied arg
+
+ '''
+ import apt_pkg
+ if not pkgcache:
+ apt_pkg.init()
+ pkgcache = apt_pkg.Cache()
+ pkg = pkgcache[package]
+ return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index 97a1991..5be512c 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -1,4 +1,5 @@
import importlib
+import time
from yaml import safe_load
from charmhelpers.core.host import (
lsb_release
@@ -12,9 +13,9 @@ from charmhelpers.core.hookenv import (
config,
log,
)
-import apt_pkg
import os
+
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
"""
@@ -54,12 +55,74 @@ CLOUD_ARCHIVE_POCKETS = {
'icehouse/proposed': 'precise-proposed/icehouse',
'precise-icehouse/proposed': 'precise-proposed/icehouse',
'precise-proposed/icehouse': 'precise-proposed/icehouse',
+ # Juno
+ 'juno': 'trusty-updates/juno',
+ 'trusty-juno': 'trusty-updates/juno',
+ 'trusty-juno/updates': 'trusty-updates/juno',
+ 'trusty-updates/juno': 'trusty-updates/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'juno/proposed': 'trusty-proposed/juno',
+ 'trusty-juno/proposed': 'trusty-proposed/juno',
+ 'trusty-proposed/juno': 'trusty-proposed/juno',
}
+# The order of this list is very important. Handlers should be listed in from
+# least- to most-specific URL matching.
+FETCH_HANDLERS = (
+ 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
+ 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
+)
+
+APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
+APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
+APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
+
+
+class SourceConfigError(Exception):
+ pass
+
+
+class UnhandledSource(Exception):
+ pass
+
+
+class AptLockError(Exception):
+ pass
+
+
+class BaseFetchHandler(object):
+
+ """Base class for FetchHandler implementations in fetch plugins"""
+
+ def can_handle(self, source):
+ """Returns True if the source can be handled. Otherwise returns
+ a string explaining why it cannot"""
+ return "Wrong source type"
+
+ def install(self, source):
+ """Try to download and unpack the source. Return the path to the
+ unpacked files or raise UnhandledSource."""
+ raise UnhandledSource("Wrong source type {}".format(source))
+
+ def parse_url(self, url):
+ return urlparse(url)
+
+ def base_url(self, url):
+ """Return url without querystring or fragment"""
+ parts = list(self.parse_url(url))
+ parts[4:] = ['' for i in parts[4:]]
+ return urlunparse(parts)
+
def filter_installed_packages(packages):
"""Returns a list of packages that require installation"""
+ import apt_pkg
apt_pkg.init()
+
+ # Tell apt to build an in-memory cache to prevent race conditions (if
+ # another process is already building the cache).
+ apt_pkg.config.set("Dir::Cache::pkgcache", "")
+
cache = apt_pkg.Cache()
_pkgs = []
for package in packages:
@@ -87,14 +150,7 @@ def apt_install(packages, options=None, fatal=False):
cmd.extend(packages)
log("Installing {} with options: {}".format(packages,
options))
- env = os.environ.copy()
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- subprocess.check_call(cmd, env=env)
- else:
- subprocess.call(cmd, env=env)
+ _run_apt_command(cmd, fatal)
def apt_upgrade(options=None, fatal=False, dist=False):
@@ -109,24 +165,13 @@ def apt_upgrade(options=None, fatal=False, dist=False):
else:
cmd.append('upgrade')
log("Upgrading with options: {}".format(options))
-
- env = os.environ.copy()
- if 'DEBIAN_FRONTEND' not in env:
- env['DEBIAN_FRONTEND'] = 'noninteractive'
-
- if fatal:
- subprocess.check_call(cmd, env=env)
- else:
- subprocess.call(cmd, env=env)
+ _run_apt_command(cmd, fatal)
def apt_update(fatal=False):
"""Update local apt cache"""
cmd = ['apt-get', 'update']
- if fatal:
- subprocess.check_call(cmd)
- else:
- subprocess.call(cmd)
+ _run_apt_command(cmd, fatal)
def apt_purge(packages, fatal=False):
@@ -137,10 +182,7 @@ def apt_purge(packages, fatal=False):
else:
cmd.extend(packages)
log("Purging {}".format(packages))
- if fatal:
- subprocess.check_call(cmd)
- else:
- subprocess.call(cmd)
+ _run_apt_command(cmd, fatal)
def apt_hold(packages, fatal=False):
@@ -151,6 +193,7 @@ def apt_hold(packages, fatal=False):
else:
cmd.extend(packages)
log("Holding {}".format(packages))
+
if fatal:
subprocess.check_call(cmd)
else:
@@ -184,56 +227,49 @@ def add_source(source, key=None):
apt.write(PROPOSED_POCKET.format(release))
if key:
subprocess.check_call(['apt-key', 'adv', '--keyserver',
- 'keyserver.ubuntu.com', '--recv',
+ 'hkp://keyserver.ubuntu.com:80', '--recv',
key])
-class SourceConfigError(Exception):
- pass
-
-
def configure_sources(update=False,
sources_var='install_sources',
keys_var='install_keys'):
"""
- Configure multiple sources from charm configuration
+ Configure multiple sources from charm configuration.
+
+ The lists are encoded as yaml fragments in the configuration.
+ The frament needs to be included as a string.
Example config:
- install_sources:
+ install_sources: |
- "ppa:foo"
- "http://example.com/repo precise main"
- install_keys:
+ install_keys: |
- null
- "a1b2c3d4"
Note that 'null' (a.k.a. None) should not be quoted.
"""
- sources = safe_load(config(sources_var))
- keys = config(keys_var)
- if keys is not None:
- keys = safe_load(keys)
- if isinstance(sources, basestring) and (
- keys is None or isinstance(keys, basestring)):
- add_source(sources, keys)
- else:
- if not len(sources) == len(keys):
- msg = 'Install sources and keys lists are different lengths'
- raise SourceConfigError(msg)
- for src_num in range(len(sources)):
- add_source(sources[src_num], keys[src_num])
- if update:
- apt_update(fatal=True)
+ sources = safe_load((config(sources_var) or '').strip()) or []
+ keys = safe_load((config(keys_var) or '').strip()) or None
-# The order of this list is very important. Handlers should be listed in from
-# least- to most-specific URL matching.
-FETCH_HANDLERS = (
- 'charmhelpers.fetch.archiveurl.ArchiveUrlFetchHandler',
- 'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
-)
+ if isinstance(sources, basestring):
+ sources = [sources]
+ if keys is None:
+ for source in sources:
+ add_source(source, None)
+ else:
+ if isinstance(keys, basestring):
+ keys = [keys]
-class UnhandledSource(Exception):
- pass
+ if len(sources) != len(keys):
+ raise SourceConfigError(
+ 'Install sources and keys lists are different lengths')
+ for source, key in zip(sources, keys):
+ add_source(source, key)
+ if update:
+ apt_update(fatal=True)
def install_remote(source):
@@ -265,30 +301,6 @@ def install_from_config(config_var_name):
return install_remote(source)
-class BaseFetchHandler(object):
-
- """Base class for FetchHandler implementations in fetch plugins"""
-
- def can_handle(self, source):
- """Returns True if the source can be handled. Otherwise returns
- a string explaining why it cannot"""
- return "Wrong source type"
-
- def install(self, source):
- """Try to download and unpack the source. Return the path to the
- unpacked files or raise UnhandledSource."""
- raise UnhandledSource("Wrong source type {}".format(source))
-
- def parse_url(self, url):
- return urlparse(url)
-
- def base_url(self, url):
- """Return url without querystring or fragment"""
- parts = list(self.parse_url(url))
- parts[4:] = ['' for i in parts[4:]]
- return urlunparse(parts)
-
-
def plugins(fetch_handlers=None):
if not fetch_handlers:
fetch_handlers = FETCH_HANDLERS
@@ -306,3 +318,40 @@ def plugins(fetch_handlers=None):
log("FetchHandler {} not found, skipping plugin".format(
handler_name))
return plugin_list
+
+
+def _run_apt_command(cmd, fatal=False):
+ """
+ Run an APT command, checking output and retrying if the fatal flag is set
+ to True.
+
+ :param: cmd: str: The apt command to run.
+ :param: fatal: bool: Whether the command's output should be checked and
+ retried.
+ """
+ env = os.environ.copy()
+
+ if 'DEBIAN_FRONTEND' not in env:
+ env['DEBIAN_FRONTEND'] = 'noninteractive'
+
+ if fatal:
+ retry_count = 0
+ result = None
+
+ # If the command is considered "fatal", we need to retry if the apt
+ # lock was not acquired.
+
+ while result is None or result == APT_NO_LOCK:
+ try:
+ result = subprocess.check_call(cmd, env=env)
+ except subprocess.CalledProcessError, e:
+ retry_count = retry_count + 1
+ if retry_count > APT_NO_LOCK_RETRY_COUNT:
+ raise
+ result = e.returncode
+ log("Couldn't acquire DPKG lock. Will retry in {} seconds."
+ "".format(APT_NO_LOCK_RETRY_DELAY))
+ time.sleep(APT_NO_LOCK_RETRY_DELAY)
+
+ else:
+ subprocess.call(cmd, env=env)
diff --git a/hooks/charmhelpers/fetch/bzrurl.py b/hooks/charmhelpers/fetch/bzrurl.py
index db5dd9a..0e580e4 100644
--- a/hooks/charmhelpers/fetch/bzrurl.py
+++ b/hooks/charmhelpers/fetch/bzrurl.py
@@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler):
def install(self, source):
url_parts = self.parse_url(source)
branch_name = url_parts.path.strip("/").split("/")[-1]
- dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
+ dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
+ branch_name)
if not os.path.exists(dest_dir):
mkdir(dest_dir, perms=0755)
try:
diff --git a/hooks/hooks.py b/hooks/hooks.py
index d8f7c99..6fc469e 100755
--- a/hooks/hooks.py
+++ b/hooks/hooks.py
@@ -29,8 +29,22 @@ from charmhelpers.fetch import (
apt_update,
apt_install
)
+
from charmhelpers.core.hookenv import (
- config
+ config,
+ unit_get,
+ relation_get,
+ relation_set,
+ relations_for_id,
+ relations_of_type,
+ open_port,
+ close_port,
+)
+
+from charmhelpers.core.hookenv import log as juju_log
+
+from charmhelpers.core.host import (
+ service,
)
@@ -47,201 +61,6 @@ default_max_tries = 20
# Supporting functions
###############################################################################
-
-#------------------------------------------------------------------------------
-# juju_log: calls juju-log and records the message defined by the message
-# variable
-#------------------------------------------------------------------------------
-def juju_log(message=None):
- return (subprocess.call(['juju-log', str(message)]) == 0)
-
-
-#------------------------------------------------------------------------------
-# service: Analogous to calling service on the command line to start/stop
-# and get status of a service/daemon.
-# Parameters:
-# service_name: The name of the service to act on.
-# service_action: The action (start, stop, status, etc.)
-# Returns: True if the command was successfully executed or False on
-# error.
-#------------------------------------------------------------------------------
-def service(service_name=None, service_action=None):
- juju_log("service: %s, action: %s" % (service_name, service_action))
- if service_name is not None and service_action is not None:
- retVal = subprocess.call(
- ["service", service_name, service_action]) == 0
- else:
- retVal = False
- juju_log("service %s %s returns: %s" %
- (service_name, service_action, retVal))
- return(retVal)
-
-
-#------------------------------------------------------------------------------
-# unit_get: Convenience function wrapping the juju command unit-get
-# Parameter:
-# setting_name: The setting to get out of unit_get
-# Returns: The requested information or None on error
-#------------------------------------------------------------------------------
-def unit_get(setting_name=None):
- juju_log("unit_get: %s" % setting_name)
- try:
- cmd_line = ['unit-get', '--format=json']
- if setting_name is not None:
- cmd_line.append(setting_name)
- unit_data = json.loads(subprocess.check_output(cmd_line))
- except Exception, e:
- subprocess.call(['juju-log', str(e)])
- unit_data = None
- finally:
- juju_log("unit_get %s returns: %s" % (setting_name, unit_data))
- return(unit_data)
-
-
-#------------------------------------------------------------------------------
-# config_get: Returns a dictionary containing all of the config information
-# Optional parameter: scope
-# scope: limits the scope of the returned configuration to the
-# desired config item.
-#------------------------------------------------------------------------------
-def config_get(scope=None):
- juju_log("config_get: %s" % scope)
- try:
- config_cmd_line = ['config-get']
- if scope is not None:
- config_cmd_line.append(scope)
- config_cmd_line.append('--format=json')
- config_data = json.loads(subprocess.check_output(config_cmd_line))
- except Exception, e:
- juju_log(str(e))
- config_data = None
- finally:
- juju_log("config_get: %s returns: %s" % (scope, config_data))
- return(config_data)
-
-
-#------------------------------------------------------------------------------
-# relation_get: Returns a dictionary containing the relation information
-# Optional parameters: scope, relation_id
-# scope: limits the scope of the returned data to the
-# desired item.
-# unit_name: limits the data ( and optionally the scope )
-# to the specified unit
-# relation_id: specify relation id for out of context usage.
-#------------------------------------------------------------------------------
-def relation_get(scope=None, unit_name=None, relation_id=None,
- wait_for=default_wait_for, max_tries=default_max_tries):
- juju_log("relation_get: scope: %s, unit_name: %s, relation_id: %s" %
- (scope, unit_name, relation_id))
- current_try = 0
- try:
- relation_cmd_line = ['relation-get', '--format=json']
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- if scope is not None:
- relation_cmd_line.append(scope)
- else:
- relation_cmd_line.append('')
- if unit_name is not None:
- relation_cmd_line.append(unit_name)
- relation_data = json.loads(subprocess.check_output(relation_cmd_line))
-
-# while relation_data is None and current_try < max_tries:
-# time.sleep(wait_for)
-# relation_data = json.loads(subprocess.check_output(relation_cmd_line))
-# current_try += 1
-
- except Exception, e:
- juju_log(str(e))
- relation_data = None
- finally:
- juju_log("relation_get returns: %s" % relation_data)
- return(relation_data)
-
-
-#------------------------------------------------------------------------------
-# relation_set: Convenience function wrapping the juju command relation-set
-# Parameters:
-# key_value_pairs: A dictionary containing the key/value pairs
-# to be set.
-# Optional Parameter:
-# relation_id: The relation id to use
-# Returns: True on success or False on failure
-#------------------------------------------------------------------------------
-def relation_set(key_value_pairs=None, relation_id=None):
- juju_log("relation_set: kv: %s, relation_id: %s" %
- (key_value_pairs, relation_id))
- if key_value_pairs is None or not isinstance(key_value_pairs, dict):
- juju_log("relation_set: Invalid key_value_pais.")
- return(False)
- try:
- relation_cmd_line = ['relation-set', '--format=json']
- if relation_id is not None:
- relation_cmd_line.append('-r %s' % relation_id)
- for (key, value) in key_value_pairs.items():
- relation_cmd_line.append('%s=%s' % (key, value))
- retVal = (subprocess.call(relation_cmd_line) == 0)
- except Exception, e:
- juju_log(str(e))
- retVal = False
- finally:
- juju_log("relation_set returns: %s" % retVal)
- return(retVal)
-
-
-def relation_list(relation_id=None, wait_for=default_wait_for,
- max_tries=default_max_tries):
- juju_log("relation_list: relation_id: %s" % relation_id)
- current_try = 0
- try:
- relation_cmd_line = ['relation-list', '--format=json']
- if relation_id is not None:
- relation_cmd_line.append('-r %s' % relation_id)
- relation_data = json.loads(subprocess.check_output(relation_cmd_line))
-
-# while relation_data is None and current_try < max_tries:
-# time.sleep(wait_for)
-# relation_data = json.loads(subprocess.check_output(relation_cmd_line))
-# current_try += 1
-
- except Exception, e:
- juju_log(str(e))
- relation_data = None
- finally:
- juju_log("relation_id %s returns: %s" % (relation_id, relation_data))
- return(relation_data)
-
-
-#------------------------------------------------------------------------------
-# open_port: Convenience function to open a port in juju to
-# expose a service
-#------------------------------------------------------------------------------
-def open_port(port=None, protocol="TCP"):
- juju_log("open_port: port: %d protocol: %s" % (int(port), protocol))
- if port is None:
- retVal = False
- else:
- retVal = subprocess.call(['open-port', "%d/%s" %
- (int(port), protocol)]) == 0
- juju_log("open_port %d/%s returns: %s" % (int(port), protocol, retVal))
- return(retVal)
-
-
-#------------------------------------------------------------------------------
-# close_port: Convenience function to close a port in juju to
-# unexpose a service
-#------------------------------------------------------------------------------
-def close_port(port=None, protocol="TCP"):
- juju_log("close_port: port: %d protocol: %s" % (int(port), protocol))
- if port is None:
- retVal = False
- else:
- retVal = subprocess.call(['close-port', "%d/%s" %
- (int(port), protocol)]) == 0
- juju_log("close_port %d/%s returns: %s" % (int(port), protocol, retVal))
- return(retVal)
-
-
def port_check(host=None, port=None, protocol='TCP'):
if host is None or port is None:
juju_log("port_check: host and port must be defined.")
@@ -643,7 +462,7 @@ def enable_arbiter(master_node=None, host=None):
def configsvr_status(wait_for=default_wait_for, max_tries=default_max_tries):
- config_data = config_get()
+ config_data = config()
current_try = 0
while (process_check_pidfile('/var/run/mongodb/configsvr.pid') !=
(None, None)) and not port_check(
@@ -671,7 +490,7 @@ def disable_configsvr(port=None):
juju_log("disable_configsvr: port not defined.")
return(False)
try:
- config_server_port = config_get('config_server_port')
+ config_server_port = config('config_server_port')
pid = open('/var/run/mongodb/configsvr.pid').read()
os.kill(int(pid), signal.SIGTERM)
os.unlink('/var/run/mongodb/configsvr.pid')
@@ -733,7 +552,7 @@ max_tries=default_max_tries):
def mongos_status(wait_for=default_wait_for, max_tries=default_max_tries):
- config_data = config_get()
+ config_data = config()
current_try = 0
while (process_check_pidfile('/var/run/mongodb/mongos.pid') !=
(None, None)) and not port_check(
@@ -825,17 +644,17 @@ def load_config_servers(mongos_list=None):
def restart_mongod(wait_for=default_wait_for, max_tries=default_max_tries):
my_hostname = unit_get('public-address')
- my_port = config_get('port')
+ my_port = config('port')
current_try = 0
- service('mongodb', 'stop')
+ service('stop', 'mongodb')
if os.path.exists('/var/lib/mongodb/mongod.lock'):
os.remove('/var/lib/mongodb/mongod.lock')
- if not service('mongodb', 'start'):
+ if not service('start', 'mongodb'):
return False
- while (service('mongodb', 'status') and
+ while (service('status', 'mongodb') and
not port_check(my_hostname, my_port) and
current_try < max_tries):
juju_log(
@@ -845,14 +664,14 @@ def restart_mongod(wait_for=default_wait_for, max_tries=default_max_tries):
current_try += 1
return(
- (service('mongodb', 'status') == port_check(my_hostname, my_port))
+ (service('status', 'mongodb') == port_check(my_hostname, my_port))
is True)
def backup_cronjob(disable=False):
"""Generate the cronjob to backup with mongodbump."""
juju_log('Setting up cronjob')
- config_data = config_get()
+ config_data = config()
backupdir = config_data['backup_directory']
bind_ip = config_data['bind_ip']
cron_file = '/etc/cron.d/mongodb'
@@ -912,7 +731,7 @@ def install_hook():
def config_changed():
juju_log("Entering config_changed")
print "Entering config_changed"
- config_data = config_get()
+ config_data = config()
print "config_data: ", config_data
mongodb_config = open(default_mongodb_config).read()
@@ -1054,7 +873,7 @@ def start_hook():
def stop_hook():
juju_log("stop_hook")
try:
- retVal = service('mongodb', 'stop')
+ retVal = service('stop', 'mongodb')
os.remove('/var/lib/mongodb/mongod.lock')
#FIXME Need to check if this is still needed
except Exception, e:
@@ -1068,14 +887,14 @@ def stop_hook():
def database_relation_joined():
juju_log("database_relation_joined")
my_hostname = unit_get('public-address')
- my_port = config_get('port')
- my_replset = config_get('replicaset')
+ my_port = config('port')
+ my_replset = config('replicaset')
juju_log("my_hostname: %s" % my_hostname)
juju_log("my_port: %s" % my_port)
juju_log("my_replset: %s" % my_replset)
return(relation_set(
{
- 'hostname': my_hostname,
+ 'hostnme': my_hostname,
'port': my_port,
'replset': my_replset,
'type': 'database',
@@ -1085,31 +904,35 @@ def database_relation_joined():
def replica_set_relation_joined():
juju_log("replica_set_relation_joined")
my_hostname = unit_get('public-address')
- my_port = config_get('port')
- my_replset = config_get('replicaset')
+ my_port = config('port')
+ my_replset = config('replicaset')
my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
juju_log("my_hostname: %s" % my_hostname)
juju_log("my_port: %s" % my_port)
juju_log("my_replset: %s" % my_replset)
juju_log("my_install_order: %s" % my_install_order)
- return(enable_replset(my_replset) ==
- restart_mongod() ==
- relation_set(
- {
- 'hostname': my_hostname,
- 'port': my_port,
- 'replset': my_replset,
- 'install-order': my_install_order,
- 'type': 'replset',
- }))
+ enabled = enable_replset(my_replset)
+ restarted = restart_mongod()
+
+ relation_set(None, {
+ 'hostname': my_hostname,
+ 'port': my_port,
+ 'replset': my_replset,
+ 'install-order': my_install_order,
+ 'type': 'replset',
+ })
+
+ if enabled and restarted:
+ return True
+ return False
def replica_set_relation_changed():
juju_log("replica_set_relation_changed")
my_hostname = unit_get('public-address')
- my_port = config_get('port')
+ my_port = config('port')
my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
- my_replicaset_master = config_get('replicaset_master')
+ my_replicaset_master = config('replicaset_master')
# If we are joining an existing replicaset cluster, just join and leave.
if my_replicaset_master != "auto":
@@ -1121,11 +944,11 @@ def replica_set_relation_changed():
master_install_order = my_install_order
# Check the nodes in the relation to find the master
- for member in relation_list():
- juju_log("replica_set_relation_changed: member: %s" % member)
- hostname = relation_get('hostname', member)
- port = relation_get('port', member)
- install_order = relation_get('install-order', member)
+ for member in relations_of_type('replica-set'):
+ juju_log("replica_set_relation_changed: member: %s" % member['__unit__'])
+ hostname = relation_get('hostname', member['__unit__'])
+ port = relation_get('port', member['__unit__'])
+ install_order = relation_get('install-order', member['__unit__'])
juju_log("replica_set_relation_changed: install_order: %s" % install_order)
if install_order is None:
juju_log("replica_set_relation_changed: install_order is None. relation is not ready")
@@ -1139,9 +962,9 @@ def replica_set_relation_changed():
init_replset("%s:%s" % (master_hostname, master_port))
# Add the rest of the nodes to the replset
- for member in relation_list():
- hostname = relation_get('hostname', member)
- port = relation_get('port', member)
+ for member in relations_of_type('replica-set'):
+ hostname = relation_get('hostname', member['__unit__'])
+ port = relation_get('port', member['__unit__'])
if master_hostname != hostname:
if hostname == my_hostname:
subprocess.call(['mongo',
@@ -1156,13 +979,14 @@ def replica_set_relation_changed():
join_replset("%s:%s" % (master_hostname, master_port),
"%s:%s" % (my_hostname, my_port))
+ # should this always return true?
return(True)
def configsvr_relation_joined():
juju_log("configsvr_relation_joined")
my_hostname = unit_get('public-address')
- my_port = config_get('config_server_port')
+ my_port = config('config_server_port')
my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
return(relation_set(
{
@@ -1175,7 +999,7 @@ def configsvr_relation_joined():
def configsvr_relation_changed():
juju_log("configsvr_relation_changed")
- config_data = config_get()
+ config_data = config()
my_port = config_data['config_server_port']
disable_configsvr(my_port)
retVal = enable_configsvr(config_data)
@@ -1186,7 +1010,7 @@ def configsvr_relation_changed():
def mongos_relation_joined():
juju_log("mongos_relation_joined")
my_hostname = unit_get('public-address')
- my_port = config_get('mongos_port')
+ my_port = config('mongos_port')
my_install_order = os.environ['JUJU_UNIT_NAME'].split('/')[1]
return(relation_set(
{
@@ -1199,9 +1023,9 @@ def mongos_relation_joined():
def mongos_relation_changed():
juju_log("mongos_relation_changed")
- config_data = config_get()
+ config_data = config()
retVal = False
- for member in relation_list():
+ for member in relation_for_id():
hostname = relation_get('hostname', member)
port = relation_get('port', member)
rel_type = relation_get('type', member)
@@ -1226,7 +1050,7 @@ def mongos_relation_changed():
if mongos_ready():
mongos_host = "%s:%s" % (
unit_get('public-address'),
- config_get('mongos_port'))
+ config('mongos_port'))
shard_command1 = "sh.addShard(\"%s:%s\")" % (hostname, port)
retVal1 = mongo_client(mongos_host, shard_command1)
replicaset = relation_get('replset', member)
@@ -1244,7 +1068,7 @@ def mongos_relation_changed():
def mongos_relation_broken():
# config_servers = load_config_servers(default_mongos_list)
-# for member in relation_list():
+# for member in relation_for_id():
# hostname = relation_get('hostname', member)
# port = relation_get('port', member)
# if '%s:%s' % (hostname, port) in config_servers:
@@ -1277,7 +1101,7 @@ def run(command, exit_on_error=True):
#
#------------------------------
def volume_get_volid_from_volume_map():
- config_data = config_get()
+ config_data = config()
volume_map = {}
try:
volume_map = yaml.load(config_data['volume-map'].strip())
@@ -1316,7 +1140,7 @@ def volume_mount_point_from_volid(volid):
# @returns volid
# None config state is invalid - we should not serve
def volume_get_volume_id():
- config_data = config_get()
+ config_data = config()
ephemeral_storage = config_data['volume-ephemeral-storage']
volid = volume_get_volid_from_volume_map()
juju_unit_name = os.environ['JUJU_UNIT_NAME']
@@ -1367,7 +1191,7 @@ def volume_get_all_mounted():
# - manipulate /var/lib/mongodb/VERSION/CLUSTER symlink
#------------------------------------------------------------------------------
def config_changed_volume_apply():
- config_data = config_get()
+ config_data = config()
data_directory_path = config_data["dbpath"]
assert(data_directory_path)
volid = volume_get_volume_id()
diff --git a/metadata.yaml b/metadata.yaml
index f62077b..8ace6c0 100644
--- a/metadata.yaml
+++ b/metadata.yaml
@@ -1,6 +1,9 @@
name: mongodb
-maintainer: Juan Negron <juan.negron@canonical.com>
-summary: MongoDB (from humongous) is an open-source document database
+summary: An open-source document database, and the leading NoSQL database
+maintainers:
+ - Juan Negron <juan.negron@canonical.com>
+ - Marco Ceppi <marco@ceppi.net>
+ - Charles Butler <chuck@dasroot.net>
description: |
MongoDB is a high-performance, open source, schema-free document-
oriented data store that's easy to deploy, manage and use. It's
diff --git a/scripts/charm-helpers-sync b/scripts/charm-helpers-sync
new file mode 100755
index 0000000..03bf64d
--- /dev/null
+++ b/scripts/charm-helpers-sync
@@ -0,0 +1,225 @@
+#!/usr/bin/python
+#
+# Copyright 2013 Canonical Ltd.
+
+# Authors:
+# Adam Gandelman <adamg@ubuntu.com>
+#
+
+import logging
+import optparse
+import os
+import subprocess
+import shutil
+import sys
+import tempfile
+import yaml
+
+from fnmatch import fnmatch
+
+CHARM_HELPERS_BRANCH = 'lp:charm-helpers'
+
+
+def parse_config(conf_file):
+ if not os.path.isfile(conf_file):
+ logging.error('Invalid config file: %s.' % conf_file)
+ return False
+ return yaml.load(open(conf_file).read())
+
+
+def clone_helpers(work_dir, branch):
+ dest = os.path.join(work_dir, 'charm-helpers')
+ logging.info('Checking out %s to %s.' % (branch, dest))
+ cmd = ['bzr', 'checkout', '--lightweight', branch, dest]
+ subprocess.check_call(cmd)
+ return dest
+
+
+def _module_path(module):
+ return os.path.join(*module.split('.'))
+
+
+def _src_path(src, module):
+ return os.path.join(src, 'charmhelpers', _module_path(module))
+
+
+def _dest_path(dest, module):
+ return os.path.join(dest, _module_path(module))
+
+
+def _is_pyfile(path):
+ return os.path.isfile(path + '.py')
+
+
+def ensure_init(path):
+ '''
+ ensure directories leading up to path are importable, omitting
+ parent directory, eg path='/hooks/helpers/foo'/:
+ hooks/
+ hooks/helpers/__init__.py
+ hooks/helpers/foo/__init__.py
+ '''
+ for d, dirs, files in os.walk(os.path.join(*path.split('/')[:2])):
+ _i = os.path.join(d, '__init__.py')
+ if not os.path.exists(_i):
+ logging.info('Adding missing __init__.py: %s' % _i)
+ open(_i, 'wb').close()
+
+
+def sync_pyfile(src, dest):
+ src = src + '.py'
+ src_dir = os.path.dirname(src)
+ logging.info('Syncing pyfile: %s -> %s.' % (src, dest))
+ if not os.path.exists(dest):
+ os.makedirs(dest)
+ shutil.copy(src, dest)
+ if os.path.isfile(os.path.join(src_dir, '__init__.py')):
+ shutil.copy(os.path.join(src_dir, '__init__.py'),
+ dest)
+ ensure_init(dest)
+
+
+def get_filter(opts=None):
+ opts = opts or []
+ if 'inc=*' in opts:
+ # do not filter any files, include everything
+ return None
+
+ def _filter(dir, ls):
+ incs = [opt.split('=').pop() for opt in opts if 'inc=' in opt]
+ _filter = []
+ for f in ls:
+ _f = os.path.join(dir, f)
+
+ if not os.path.isdir(_f) and not _f.endswith('.py') and incs:
+ if True not in [fnmatch(_f, inc) for inc in incs]:
+ logging.debug('Not syncing %s, does not match include '
+ 'filters (%s)' % (_f, incs))
+ _filter.append(f)
+ else:
+ logging.debug('Including file, which matches include '
+ 'filters (%s): %s' % (incs, _f))
+ elif (os.path.isfile(_f) and not _f.endswith('.py')):
+ logging.debug('Not syncing file: %s' % f)
+ _filter.append(f)
+ elif (os.path.isdir(_f) and not
+ os.path.isfile(os.path.join(_f, '__init__.py'))):
+ logging.debug('Not syncing directory: %s' % f)
+ _filter.append(f)
+ return _filter
+ return _filter
+
+
+def sync_directory(src, dest, opts=None):
+ if os.path.exists(dest):
+ logging.debug('Removing existing directory: %s' % dest)
+ shutil.rmtree(dest)
+ logging.info('Syncing directory: %s -> %s.' % (src, dest))
+
+ shutil.copytree(src, dest, ignore=get_filter(opts))
+ ensure_init(dest)
+
+
+def sync(src, dest, module, opts=None):
+ if os.path.isdir(_src_path(src, module)):
+ sync_directory(_src_path(src, module), _dest_path(dest, module), opts)
+ elif _is_pyfile(_src_path(src, module)):
+ sync_pyfile(_src_path(src, module),
+ os.path.dirname(_dest_path(dest, module)))
+ else:
+ logging.warn('Could not sync: %s. Neither a pyfile or directory, '
+ 'does it even exist?' % module)
+
+
+def parse_sync_options(options):
+ if not options:
+ return []
+ return options.split(',')
+
+
+def extract_options(inc, global_options=None):
+ global_options = global_options or []
+ if global_options and isinstance(global_options, basestring):
+ global_options = [global_options]
+ if '|' not in inc:
+ return (inc, global_options)
+ inc, opts = inc.split('|')
+ return (inc, parse_sync_options(opts) + global_options)
+
+
+def sync_helpers(include, src, dest, options=None):
+ if not os.path.isdir(dest):
+ os.makedirs(dest)
+
+ global_options = parse_sync_options(options)
+
+ for inc in include:
+ if isinstance(inc, str):
+ inc, opts = extract_options(inc, global_options)
+ sync(src, dest, inc, opts)
+ elif isinstance(inc, dict):
+ # could also do nested dicts here.
+ for k, v in inc.iteritems():
+ if isinstance(v, list):
+ for m in v:
+ inc, opts = extract_options(m, global_options)
+ sync(src, dest, '%s.%s' % (k, inc), opts)
+
+if __name__ == '__main__':
+ parser = optparse.OptionParser()
+ parser.add_option('-c', '--config', action='store', dest='config',
+ default=None, help='helper config file')
+ parser.add_option('-D', '--debug', action='store_true', dest='debug',
+ default=False, help='debug')
+ parser.add_option('-b', '--branch', action='store', dest='branch',
+ help='charm-helpers bzr branch (overrides config)')
+ parser.add_option('-d', '--destination', action='store', dest='dest_dir',
+ help='sync destination dir (overrides config)')
+ (opts, args) = parser.parse_args()
+
+ if opts.debug:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ if opts.config:
+ logging.info('Loading charm helper config from %s.' % opts.config)
+ config = parse_config(opts.config)
+ if not config:
+ logging.error('Could not parse config from %s.' % opts.config)
+ sys.exit(1)
+ else:
+ config = {}
+
+ if 'branch' not in config:
+ config['branch'] = CHARM_HELPERS_BRANCH
+ if opts.branch:
+ config['branch'] = opts.branch
+ if opts.dest_dir:
+ config['destination'] = opts.dest_dir
+
+ if 'destination' not in config:
+ logging.error('No destination dir. specified as option or config.')
+ sys.exit(1)
+
+ if 'include' not in config:
+ if not args:
+ logging.error('No modules to sync specified as option or config.')
+ sys.exit(1)
+ config['include'] = []
+ [config['include'].append(a) for a in args]
+
+ sync_options = None
+ if 'options' in config:
+ sync_options = config['options']
+ tmpd = tempfile.mkdtemp()
+ try:
+ checkout = clone_helpers(tmpd, config['branch'])
+ sync_helpers(config['include'], checkout, config['destination'],
+ options=sync_options)
+ except Exception, e:
+ logging.error("Could not sync: %s" % e)
+ raise e
+ finally:
+ logging.debug('Cleaning up %s' % tmpd)
+ shutil.rmtree(tmpd)