Merge lp:~openstack-charmers/charms/precise/keystone/ha-support into lp:~charmers/charms/precise/keystone/trunk

Proposed by Adam Gandelman
Status: Merged
Merged at revision: 42
Proposed branch: lp:~openstack-charmers/charms/precise/keystone/ha-support
Merge into: lp:~charmers/charms/precise/keystone/trunk
Diff against target: 2957 lines (+2068/-364)
16 files modified
config.yaml (+41/-0)
hooks/keystone_hooks.py (+363/-92)
hooks/keystone_ssl.py (+301/-0)
hooks/keystone_utils.py (+296/-234)
hooks/lib/apache_utils.py (+196/-0)
hooks/lib/cluster_utils.py (+130/-0)
hooks/lib/haproxy_utils.py (+55/-0)
hooks/lib/openstack_common.py (+74/-37)
hooks/lib/unison.py (+220/-0)
hooks/lib/utils.py (+332/-0)
hooks/manager.py (+1/-0)
metadata.yaml (+6/-0)
revision (+1/-1)
scripts/add_to_cluster (+13/-0)
scripts/remove_from_cluster (+4/-0)
templates/haproxy.cfg (+35/-0)
To merge this branch: bzr merge lp:~openstack-charmers/charms/precise/keystone/ha-support
Reviewer Review Type Date Requested Status
charmers Pending
Review via email: mp+166344@code.launchpad.net

Description of the change

* Updated for Grizzly.

* HA support via hacluster charm.

* Allows remote services to request roles be created, required for ceilometer and swift.

To post a comment you must log in.

Preview Diff

[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
1=== modified file 'config.yaml'
2--- config.yaml 2012-10-12 17:26:48 +0000
3+++ config.yaml 2013-05-29 18:13:26 +0000
4@@ -26,6 +26,10 @@
5 default: "/etc/keystone/keystone.conf"
6 type: string
7 description: "Location of keystone configuration file"
8+ log-level:
9+ default: WARNING
10+ type: string
11+ description: Log level (WARNING, INFO, DEBUG, ERROR)
12 service-port:
13 default: 5000
14 type: int
15@@ -75,3 +79,40 @@
16 default: "keystone"
17 type: string
18 description: "Database username"
19+ region:
20+ default: RegionOne
21+ type: string
22+ description: "OpenStack Region(s) - separate multiple regions with single space"
23+ # HA configuration settings
24+ vip:
25+ type: string
26+ description: "Virtual IP to use to front keystone in ha configuration"
27+ vip_iface:
28+ type: string
29+ default: eth0
30+ description: "Network Interface where to place the Virtual IP"
31+ vip_cidr:
32+ type: int
33+ default: 24
34+ description: "Netmask that will be used for the Virtual IP"
35+ ha-bindiface:
36+ type: string
37+ default: eth0
38+ description: |
39+ Default network interface on which HA cluster will bind to communication
40+ with the other members of the HA Cluster.
41+ ha-mcastport:
42+ type: int
43+ default: 5403
44+ description: |
45+ Default multicast port number that will be used to communicate between
46+ HA Cluster nodes.
47+ # PKI enablement and configuration (Grizzly and beyond)
48+ enable-pki:
49+ default: "false"
50+ type: string
51+ description: "Enable PKI token signing (Grizzly and beyond)"
52+ https-service-endpoints:
53+ default: "False"
54+ type: string
55+ description: "Manage SSL certificates for all service endpoints."
56
57=== added symlink 'hooks/cluster-relation-changed'
58=== target is u'keystone_hooks.py'
59=== added symlink 'hooks/cluster-relation-departed'
60=== target is u'keystone_hooks.py'
61=== added symlink 'hooks/cluster-relation-joined'
62=== target is u'keystone_hooks.py'
63=== modified symlink 'hooks/config-changed'
64=== target changed u'keystone-hooks' => u'keystone_hooks.py'
65=== added symlink 'hooks/ha-relation-changed'
66=== target is u'keystone_hooks.py'
67=== added symlink 'hooks/ha-relation-joined'
68=== target is u'keystone_hooks.py'
69=== modified symlink 'hooks/identity-service-relation-changed'
70=== target changed u'keystone-hooks' => u'keystone_hooks.py'
71=== modified symlink 'hooks/identity-service-relation-joined'
72=== target changed u'keystone-hooks' => u'keystone_hooks.py'
73=== modified symlink 'hooks/install'
74=== target changed u'keystone-hooks' => u'keystone_hooks.py'
75=== renamed file 'hooks/keystone-hooks' => 'hooks/keystone_hooks.py'
76--- hooks/keystone-hooks 2012-12-12 03:52:01 +0000
77+++ hooks/keystone_hooks.py 2013-05-29 18:13:26 +0000
78@@ -1,14 +1,53 @@
79 #!/usr/bin/python
80
81-import sys
82 import time
83-
84-from utils import *
85-from lib.openstack_common import *
86+import urlparse
87+
88+from base64 import b64encode
89+
90+from keystone_utils import (
91+ config_dirty,
92+ config_get,
93+ execute,
94+ update_config_block,
95+ set_admin_token,
96+ ensure_initial_admin,
97+ create_service_entry,
98+ create_endpoint_template,
99+ create_role,
100+ get_admin_token,
101+ get_service_password,
102+ create_user,
103+ grant_role,
104+ get_ca,
105+ synchronize_service_credentials,
106+ do_openstack_upgrade,
107+ configure_pki_tokens,
108+ SSH_USER,
109+ SSL_DIR,
110+ CLUSTER_RES,
111+ https
112+ )
113+
114+from lib.openstack_common import (
115+ get_os_codename_install_source,
116+ get_os_codename_package,
117+ get_os_version_codename,
118+ get_os_version_package,
119+ save_script_rc
120+ )
121+import lib.unison as unison
122+import lib.utils as utils
123+import lib.cluster_utils as cluster
124+import lib.haproxy_utils as haproxy
125
126 config = config_get()
127
128-packages = "keystone python-mysqldb pwgen"
129+packages = [
130+ "keystone", "python-mysqldb", "pwgen",
131+ "haproxy", "python-jinja2", "openssl", "unison",
132+ "python-sqlalchemy"
133+ ]
134 service = "keystone"
135
136 # used to verify joined services are valid openstack components.
137@@ -46,16 +85,25 @@
138 "quantum": {
139 "type": "network",
140 "desc": "Quantum Networking Service"
141+ },
142+ "oxygen": {
143+ "type": "oxygen",
144+ "desc": "Oxygen Cloud Image Service"
145+ },
146+ "ceilometer": {
147+ "type": "metering",
148+ "desc": "Ceilometer Metering Service"
149 }
150 }
151
152+
153 def install_hook():
154- if config["openstack-origin"] != "distro":
155- configure_installation_source(config["openstack-origin"])
156- execute("apt-get update", die=True)
157- execute("apt-get -y install %s" % packages, die=True, echo=True)
158- update_config_block('DEFAULT', public_port=config["service-port"])
159- update_config_block('DEFAULT', admin_port=config["admin-port"])
160+ utils.configure_source()
161+ utils.install(*packages)
162+ update_config_block('DEFAULT',
163+ public_port=cluster.determine_api_port(config["service-port"]))
164+ update_config_block('DEFAULT',
165+ admin_port=cluster.determine_api_port(config["admin-port"]))
166 set_admin_token(config['admin-token'])
167
168 # set all backends to use sql+sqlite, if they are not already by default
169@@ -69,90 +117,141 @@
170 driver='keystone.token.backends.sql.Token')
171 update_config_block('ec2',
172 driver='keystone.contrib.ec2.backends.sql.Ec2')
173- execute("service keystone stop", echo=True)
174+
175+ utils.stop('keystone')
176 execute("keystone-manage db_sync")
177- execute("service keystone start", echo=True)
178+ utils.start('keystone')
179+
180+ # ensure user + permissions for peer relations that
181+ # may be syncing data there via SSH_USER.
182+ unison.ensure_user(user=SSH_USER, group='keystone')
183+ execute("chmod -R g+wrx /var/lib/keystone/")
184+
185 time.sleep(5)
186 ensure_initial_admin(config)
187
188+
189 def db_joined():
190- relation_data = { "database": config["database"],
191- "username": config["database-user"],
192- "hostname": config["hostname"] }
193- relation_set(relation_data)
194+ relation_data = {
195+ "database": config["database"],
196+ "username": config["database-user"],
197+ "hostname": config["hostname"]
198+ }
199+ utils.relation_set(**relation_data)
200+
201
202 def db_changed():
203- relation_data = relation_get_dict()
204+ relation_data = utils.relation_get_dict()
205 if ('password' not in relation_data or
206- 'private-address' not in relation_data):
207- juju_log("private-address or password not set. Peer not ready, exit 0")
208- exit(0)
209+ 'db_host' not in relation_data):
210+ utils.juju_log('INFO',
211+ "db_host or password not set. Peer not ready, exit 0")
212+ return
213+
214 update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
215 (config["database-user"],
216 relation_data["password"],
217- relation_data["private-address"],
218+ relation_data["db_host"],
219 config["database"]))
220- execute("service keystone stop", echo=True)
221- execute("keystone-manage db_sync", echo=True)
222- execute("service keystone start")
223+
224+ if cluster.eligible_leader(CLUSTER_RES):
225+ utils.juju_log('INFO',
226+ 'Cluster leader, performing db-sync')
227+ execute("keystone-manage db_sync", echo=True)
228+
229+ if config_dirty():
230+ utils.restart('keystone')
231+
232 time.sleep(5)
233- ensure_initial_admin(config)
234-
235- # If the backend database has been switched to something new and there
236- # are existing identity-service relations,, service entries need to be
237- # recreated in the new database. Re-executing identity-service-changed
238- # will do this.
239- for id in relation_ids(relation_name='identity-service'):
240- for unit in relation_list(relation_id=id):
241- juju_log("Re-exec'ing identity-service-changed for: %s - %s" %
242- (id, unit))
243- identity_changed(relation_id=id, remote_unit=unit)
244+
245+ if cluster.eligible_leader(CLUSTER_RES):
246+ ensure_initial_admin(config)
247+ # If the backend database has been switched to something new and there
248+ # are existing identity-service relations,, service entries need to be
249+ # recreated in the new database. Re-executing identity-service-changed
250+ # will do this.
251+ for rid in utils.relation_ids('identity-service'):
252+ for unit in utils.relation_list(rid=rid):
253+ utils.juju_log('INFO',
254+ "Re-exec'ing identity-service-changed"
255+ " for: %s - %s" % (rid, unit))
256+ identity_changed(relation_id=rid, remote_unit=unit)
257+
258+
259+def ensure_valid_service(service):
260+ if service not in valid_services.keys():
261+ utils.juju_log('WARNING',
262+ "Invalid service requested: '%s'" % service)
263+ utils.relation_set(admin_token=-1)
264+ return
265+
266+
267+def add_endpoint(region, service, publicurl, adminurl, internalurl):
268+ desc = valid_services[service]["desc"]
269+ service_type = valid_services[service]["type"]
270+ create_service_entry(service, service_type, desc)
271+ create_endpoint_template(region=region, service=service,
272+ publicurl=publicurl,
273+ adminurl=adminurl,
274+ internalurl=internalurl)
275+
276
277 def identity_joined():
278 """ Do nothing until we get information about requested service """
279 pass
280
281+
282 def identity_changed(relation_id=None, remote_unit=None):
283 """ A service has advertised its API endpoints, create an entry in the
284 service catalog.
285 Optionally allow this hook to be re-fired for an existing
286 relation+unit, for context see see db_changed().
287 """
288- def ensure_valid_service(service):
289- if service not in valid_services.keys():
290- juju_log("WARN: Invalid service requested: '%s'" % service)
291- realtion_set({ "admin_token": -1 })
292- return
293-
294- def add_endpoint(region, service, public_url, admin_url, internal_url):
295- desc = valid_services[service]["desc"]
296- service_type = valid_services[service]["type"]
297- create_service_entry(service, service_type, desc)
298- create_endpoint_template(region=region, service=service,
299- public_url=public_url,
300- admin_url=admin_url,
301- internal_url=internal_url)
302-
303- settings = relation_get_dict(relation_id=relation_id,
304- remote_unit=remote_unit)
305+ if not cluster.eligible_leader(CLUSTER_RES):
306+ utils.juju_log('INFO',
307+ 'Deferring identity_changed() to service leader.')
308+ return
309+
310+ settings = utils.relation_get_dict(relation_id=relation_id,
311+ remote_unit=remote_unit)
312
313 # the minimum settings needed per endpoint
314 single = set(['service', 'region', 'public_url', 'admin_url',
315 'internal_url'])
316 if single.issubset(settings):
317 # other end of relation advertised only one endpoint
318-
319- if 'None' in [v for k,v in settings.iteritems()]:
320+ if 'None' in [v for k, v in settings.iteritems()]:
321 # Some backend services advertise no endpoint but require a
322 # hook execution to update auth strategy.
323+ relation_data = {}
324+ # Check if clustered and use vip + haproxy ports if so
325+ if cluster.is_clustered():
326+ relation_data["auth_host"] = config['vip']
327+ relation_data["service_host"] = config['vip']
328+ else:
329+ relation_data["auth_host"] = config['hostname']
330+ relation_data["service_host"] = config['hostname']
331+ relation_data["auth_port"] = config['admin-port']
332+ relation_data["service_port"] = config['service-port']
333+ if config['https-service-endpoints'] in ['True', 'true']:
334+ # Pass CA cert as client will need it to
335+ # verify https connections
336+ ca = get_ca(user=SSH_USER)
337+ ca_bundle = ca.get_ca_bundle()
338+ relation_data['https_keystone'] = 'True'
339+ relation_data['ca_cert'] = b64encode(ca_bundle)
340+ utils.relation_set(**relation_data)
341 return
342
343 ensure_valid_service(settings['service'])
344+
345 add_endpoint(region=settings['region'], service=settings['service'],
346- public_url=settings['public_url'],
347- admin_url=settings['admin_url'],
348- internal_url=settings['internal_url'])
349+ publicurl=settings['public_url'],
350+ adminurl=settings['admin_url'],
351+ internalurl=settings['internal_url'])
352 service_username = settings['service']
353+ https_cn = urlparse.urlparse(settings['internal_url'])
354+ https_cn = https_cn.hostname
355 else:
356 # assemble multiple endpoints from relation data. service name
357 # should be prepended to setting name, ie:
358@@ -171,13 +270,14 @@
359 # }
360 # }
361 endpoints = {}
362- for k,v in settings.iteritems():
363+ for k, v in settings.iteritems():
364 ep = k.split('_')[0]
365 x = '_'.join(k.split('_')[1:])
366- if ep not in endpoints:
367+ if ep not in endpoints:
368 endpoints[ep] = {}
369 endpoints[ep][x] = v
370 services = []
371+ https_cn = None
372 for ep in endpoints:
373 # weed out any unrelated relation stuff Juju might have added
374 # by ensuring each possible endpiont has appropriate fields
375@@ -186,40 +286,39 @@
376 ep = endpoints[ep]
377 ensure_valid_service(ep['service'])
378 add_endpoint(region=ep['region'], service=ep['service'],
379- public_url=ep['public_url'],
380- admin_url=ep['admin_url'],
381- internal_url=ep['internal_url'])
382+ publicurl=ep['public_url'],
383+ adminurl=ep['admin_url'],
384+ internalurl=ep['internal_url'])
385 services.append(ep['service'])
386+ if not https_cn:
387+ https_cn = urlparse.urlparse(ep['internal_url'])
388+ https_cn = https_cn.hostname
389 service_username = '_'.join(services)
390
391- if 'None' in [v for k,v in settings.iteritems()]:
392+ if 'None' in [v for k, v in settings.iteritems()]:
393 return
394
395 if not service_username:
396 return
397
398 token = get_admin_token()
399- juju_log("Creating service credentials for '%s'" % service_username)
400-
401- stored_passwd = '/var/lib/keystone/%s.passwd' % service_username
402- if os.path.isfile(stored_passwd):
403- juju_log("Loading stored service passwd from %s" % stored_passwd)
404- service_password = open(stored_passwd, 'r').readline().strip('\n')
405- else:
406- juju_log("Generating a new service password for %s" % service_username)
407- service_password = execute('pwgen -c 32 1', die=True)[0].strip()
408- open(stored_passwd, 'w+').writelines("%s\n" % service_password)
409-
410+ utils.juju_log('INFO',
411+ "Creating service credentials for '%s'" % service_username)
412+
413+ service_password = get_service_password(service_username)
414 create_user(service_username, service_password, config['service-tenant'])
415- grant_role(service_username, config['admin-role'], config['service-tenant'])
416+ grant_role(service_username, config['admin-role'],
417+ config['service-tenant'])
418
419 # Allow the remote service to request creation of any additional roles.
420- # Currently used by Swift.
421- if 'requested_roles' in settings:
422+ # Currently used by Swift and Ceilometer.
423+ if 'requested_roles' in settings and settings['requested_roles'] != 'None':
424 roles = settings['requested_roles'].split(',')
425- juju_log("Creating requested roles: %s" % roles)
426+ utils.juju_log('INFO',
427+ "Creating requested roles: %s" % roles)
428 for role in roles:
429- create_role(role, user=config['admin-user'], tenant='admin')
430+ create_role(role, service_username, config['service-tenant'])
431+ grant_role(service_username, role, config['service-tenant'])
432
433 # As of https://review.openstack.org/#change,4675, all nodes hosting
434 # an endpoint(s) needs a service username and password assigned to
435@@ -235,22 +334,193 @@
436 "auth_port": config["admin-port"],
437 "service_username": service_username,
438 "service_password": service_password,
439- "service_tenant": config['service-tenant']
440+ "service_tenant": config['service-tenant'],
441+ "https_keystone": "False",
442+ "ssl_cert": "",
443+ "ssl_key": "",
444+ "ca_cert": ""
445 }
446- relation_set(relation_data)
447+
448+ if relation_id:
449+ relation_data['rid'] = relation_id
450+
451+ # Check if clustered and use vip + haproxy ports if so
452+ if cluster.is_clustered():
453+ relation_data["auth_host"] = config['vip']
454+ relation_data["service_host"] = config['vip']
455+
456+ # generate or get a new cert/key for service if set to manage certs.
457+ if config['https-service-endpoints'] in ['True', 'true']:
458+ ca = get_ca(user=SSH_USER)
459+ cert, key = ca.get_cert_and_key(common_name=https_cn)
460+ ca_bundle = ca.get_ca_bundle()
461+ relation_data['ssl_cert'] = b64encode(cert)
462+ relation_data['ssl_key'] = b64encode(key)
463+ relation_data['ca_cert'] = b64encode(ca_bundle)
464+ relation_data['https_keystone'] = 'True'
465+ unison.sync_to_peers(peer_interface='cluster',
466+ paths=[SSL_DIR], user=SSH_USER, verbose=True)
467+ utils.relation_set(**relation_data)
468+ synchronize_service_credentials()
469+
470
471 def config_changed():
472+ unison.ensure_user(user=SSH_USER, group='keystone')
473+ execute("chmod -R g+wrx /var/lib/keystone/")
474
475 # Determine whether or not we should do an upgrade, based on the
476 # the version offered in keyston-release.
477 available = get_os_codename_install_source(config['openstack-origin'])
478 installed = get_os_codename_package('keystone')
479
480- if get_os_version_codename(available) > get_os_version_codename(installed):
481- do_openstack_upgrade(config['openstack-origin'], packages)
482+ if (available and
483+ get_os_version_codename(available) > \
484+ get_os_version_codename(installed)):
485+ # TODO: fixup this call to work like utils.install()
486+ do_openstack_upgrade(config['openstack-origin'], ' '.join(packages))
487+ # Ensure keystone group permissions
488+ execute("chmod -R g+wrx /var/lib/keystone/")
489+
490+ env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
491+ 'OPENSTACK_PORT_ADMIN': cluster.determine_api_port(
492+ config['admin-port']),
493+ 'OPENSTACK_PORT_PUBLIC': cluster.determine_api_port(
494+ config['service-port'])}
495+ save_script_rc(**env_vars)
496
497 set_admin_token(config['admin-token'])
498- ensure_initial_admin(config)
499+
500+ if cluster.eligible_leader(CLUSTER_RES):
501+ utils.juju_log('INFO',
502+ 'Cluster leader - ensuring endpoint configuration'
503+ ' is up to date')
504+ ensure_initial_admin(config)
505+
506+ update_config_block('logger_root', level=config['log-level'],
507+ file='/etc/keystone/logging.conf')
508+ if get_os_version_package('keystone') >= '2013.1':
509+ # PKI introduced in Grizzly
510+ configure_pki_tokens(config)
511+
512+ if config_dirty():
513+ utils.restart('keystone')
514+
515+ if cluster.eligible_leader(CLUSTER_RES):
516+ utils.juju_log('INFO',
517+ 'Firing identity_changed hook'
518+ ' for all related services.')
519+ # HTTPS may have been set - so fire all identity relations
520+ # again
521+ for r_id in utils.relation_ids('identity-service'):
522+ for unit in utils.relation_list(r_id):
523+ identity_changed(relation_id=r_id,
524+ remote_unit=unit)
525+
526+
527+def upgrade_charm():
528+ # Ensure all required packages are installed
529+ utils.install(*packages)
530+ cluster_changed()
531+ if cluster.eligible_leader(CLUSTER_RES):
532+ utils.juju_log('INFO',
533+ 'Cluster leader - ensuring endpoint configuration'
534+ ' is up to date')
535+ ensure_initial_admin(config)
536+
537+
538+def cluster_joined():
539+ unison.ssh_authorized_peers(user=SSH_USER,
540+ group='keystone',
541+ peer_interface='cluster',
542+ ensure_local_user=True)
543+ update_config_block('DEFAULT',
544+ public_port=cluster.determine_api_port(config["service-port"]))
545+ update_config_block('DEFAULT',
546+ admin_port=cluster.determine_api_port(config["admin-port"]))
547+ if config_dirty():
548+ utils.restart('keystone')
549+ service_ports = {
550+ "keystone_admin": [
551+ cluster.determine_haproxy_port(config['admin-port']),
552+ cluster.determine_api_port(config["admin-port"])
553+ ],
554+ "keystone_service": [
555+ cluster.determine_haproxy_port(config['service-port']),
556+ cluster.determine_api_port(config["service-port"])
557+ ]
558+ }
559+ haproxy.configure_haproxy(service_ports)
560+
561+
562+def cluster_changed():
563+ unison.ssh_authorized_peers(user=SSH_USER,
564+ group='keystone',
565+ peer_interface='cluster',
566+ ensure_local_user=True)
567+ synchronize_service_credentials()
568+ service_ports = {
569+ "keystone_admin": [
570+ cluster.determine_haproxy_port(config['admin-port']),
571+ cluster.determine_api_port(config["admin-port"])
572+ ],
573+ "keystone_service": [
574+ cluster.determine_haproxy_port(config['service-port']),
575+ cluster.determine_api_port(config["service-port"])
576+ ]
577+ }
578+ haproxy.configure_haproxy(service_ports)
579+
580+
581+def ha_relation_changed():
582+ relation_data = utils.relation_get_dict()
583+ if ('clustered' in relation_data and
584+ cluster.is_leader(CLUSTER_RES)):
585+ utils.juju_log('INFO',
586+ 'Cluster configured, notifying other services'
587+ ' and updating keystone endpoint configuration')
588+ # Update keystone endpoint to point at VIP
589+ ensure_initial_admin(config)
590+ # Tell all related services to start using
591+ # the VIP and haproxy ports instead
592+ for r_id in utils.relation_ids('identity-service'):
593+ utils.relation_set(rid=r_id,
594+ auth_host=config['vip'],
595+ service_host=config['vip'])
596+
597+
598+def ha_relation_joined():
599+ # Obtain the config values necessary for the cluster config. These
600+ # include multicast port and interface to bind to.
601+ corosync_bindiface = config['ha-bindiface']
602+ corosync_mcastport = config['ha-mcastport']
603+ vip = config['vip']
604+ vip_cidr = config['vip_cidr']
605+ vip_iface = config['vip_iface']
606+
607+ # Obtain resources
608+ resources = {
609+ 'res_ks_vip': 'ocf:heartbeat:IPaddr2',
610+ 'res_ks_haproxy': 'lsb:haproxy'
611+ }
612+ resource_params = {
613+ 'res_ks_vip': 'params ip="%s" cidr_netmask="%s" nic="%s"' % \
614+ (vip, vip_cidr, vip_iface),
615+ 'res_ks_haproxy': 'op monitor interval="5s"'
616+ }
617+ init_services = {
618+ 'res_ks_haproxy': 'haproxy'
619+ }
620+ clones = {
621+ 'cl_ks_haproxy': 'res_ks_haproxy'
622+ }
623+
624+ utils.relation_set(init_services=init_services,
625+ corosync_bindiface=corosync_bindiface,
626+ corosync_mcastport=corosync_mcastport,
627+ resources=resources,
628+ resource_params=resource_params,
629+ clones=clones)
630+
631
632 hooks = {
633 "install": install_hook,
634@@ -258,12 +528,13 @@
635 "shared-db-relation-changed": db_changed,
636 "identity-service-relation-joined": identity_joined,
637 "identity-service-relation-changed": identity_changed,
638- "config-changed": config_changed
639+ "config-changed": config_changed,
640+ "cluster-relation-joined": cluster_joined,
641+ "cluster-relation-changed": cluster_changed,
642+ "cluster-relation-departed": cluster_changed,
643+ "ha-relation-joined": ha_relation_joined,
644+ "ha-relation-changed": ha_relation_changed,
645+ "upgrade-charm": upgrade_charm
646 }
647
648-# keystone-hooks gets called by symlink corresponding to the requested relation
649-# hook.
650-arg0 = sys.argv[0].split("/").pop()
651-if arg0 not in hooks.keys():
652- error_out("Unsupported hook: %s" % arg0)
653-hooks[arg0]()
654+utils.do_hooks(hooks)
655
656=== added file 'hooks/keystone_ssl.py'
657--- hooks/keystone_ssl.py 1970-01-01 00:00:00 +0000
658+++ hooks/keystone_ssl.py 2013-05-29 18:13:26 +0000
659@@ -0,0 +1,301 @@
660+#!/usr/bin/python
661+
662+import os
663+import shutil
664+import subprocess
665+import tarfile
666+import tempfile
667+
668+CA_EXPIRY = '365'
669+ORG_NAME = 'Ubuntu'
670+ORG_UNIT = 'Ubuntu Cloud'
671+CA_BUNDLE = '/usr/local/share/ca-certificates/juju_ca_cert.crt'
672+
673+CA_CONFIG = """
674+[ ca ]
675+default_ca = CA_default
676+
677+[ CA_default ]
678+dir = %(ca_dir)s
679+policy = policy_match
680+database = $dir/index.txt
681+serial = $dir/serial
682+certs = $dir/certs
683+crl_dir = $dir/crl
684+new_certs_dir = $dir/newcerts
685+certificate = $dir/cacert.pem
686+private_key = $dir/private/cacert.key
687+RANDFILE = $dir/private/.rand
688+default_md = default
689+
690+[ req ]
691+default_bits = 1024
692+default_md = sha1
693+
694+prompt = no
695+distinguished_name = ca_distinguished_name
696+
697+x509_extensions = ca_extensions
698+
699+[ ca_distinguished_name ]
700+organizationName = %(org_name)s
701+organizationalUnitName = %(org_unit_name)s Certificate Authority
702+commonName = %(common_name)s
703+
704+[ policy_match ]
705+countryName = optional
706+stateOrProvinceName = optional
707+organizationName = match
708+organizationalUnitName = optional
709+commonName = supplied
710+
711+[ ca_extensions ]
712+basicConstraints = critical,CA:true
713+subjectKeyIdentifier = hash
714+authorityKeyIdentifier = keyid:always, issuer
715+keyUsage = cRLSign, keyCertSign
716+"""
717+
718+SIGNING_CONFIG = """
719+[ ca ]
720+default_ca = CA_default
721+
722+[ CA_default ]
723+dir = %(ca_dir)s
724+policy = policy_match
725+database = $dir/index.txt
726+serial = $dir/serial
727+certs = $dir/certs
728+crl_dir = $dir/crl
729+new_certs_dir = $dir/newcerts
730+certificate = $dir/cacert.pem
731+private_key = $dir/private/cacert.key
732+RANDFILE = $dir/private/.rand
733+default_md = default
734+
735+[ req ]
736+default_bits = 1024
737+default_md = sha1
738+
739+prompt = no
740+distinguished_name = req_distinguished_name
741+
742+x509_extensions = req_extensions
743+
744+[ req_distinguished_name ]
745+organizationName = %(org_name)s
746+organizationalUnitName = %(org_unit_name)s Server Farm
747+
748+[ policy_match ]
749+countryName = optional
750+stateOrProvinceName = optional
751+organizationName = match
752+organizationalUnitName = optional
753+commonName = supplied
754+
755+[ req_extensions ]
756+basicConstraints = CA:false
757+subjectKeyIdentifier = hash
758+authorityKeyIdentifier = keyid:always, issuer
759+keyUsage = digitalSignature, keyEncipherment, keyAgreement
760+extendedKeyUsage = serverAuth, clientAuth
761+"""
762+
763+
764+def init_ca(ca_dir, common_name, org_name=ORG_NAME, org_unit_name=ORG_UNIT):
765+ print 'Ensuring certificate authority exists at %s.' % ca_dir
766+ if not os.path.exists(ca_dir):
767+ print 'Initializing new certificate authority at %s' % ca_dir
768+ os.mkdir(ca_dir)
769+
770+ for i in ['certs', 'crl', 'newcerts', 'private']:
771+ d = os.path.join(ca_dir, i)
772+ if not os.path.exists(d):
773+ print 'Creating %s.' % d
774+ os.mkdir(d)
775+ os.chmod(os.path.join(ca_dir, 'private'), 0710)
776+
777+ if not os.path.isfile(os.path.join(ca_dir, 'serial')):
778+ with open(os.path.join(ca_dir, 'serial'), 'wb') as out:
779+ out.write('01\n')
780+
781+ if not os.path.isfile(os.path.join(ca_dir, 'index.txt')):
782+ with open(os.path.join(ca_dir, 'index.txt'), 'wb') as out:
783+ out.write('')
784+ if not os.path.isfile(os.path.join(ca_dir, 'ca.cnf')):
785+ print 'Creating new CA config in %s' % ca_dir
786+ with open(os.path.join(ca_dir, 'ca.cnf'), 'wb') as out:
787+ out.write(CA_CONFIG % locals())
788+
789+
790+def root_ca_crt_key(ca_dir):
791+ init = False
792+ crt = os.path.join(ca_dir, 'cacert.pem')
793+ key = os.path.join(ca_dir, 'private', 'cacert.key')
794+ for f in [crt, key]:
795+ if not os.path.isfile(f):
796+ print 'Missing %s, will re-initialize cert+key.' % f
797+ init = True
798+ else:
799+ print 'Found %s.' % f
800+ if init:
801+ cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'),
802+ '-x509', '-nodes', '-newkey', 'rsa', '-days', '21360',
803+ '-keyout', key, '-out', crt, '-outform', 'PEM']
804+ subprocess.check_call(cmd)
805+ return crt, key
806+
807+
808+def intermediate_ca_csr_key(ca_dir):
809+ print 'Creating new intermediate CSR.'
810+ key = os.path.join(ca_dir, 'private', 'cacert.key')
811+ csr = os.path.join(ca_dir, 'cacert.csr')
812+ cmd = ['openssl', 'req', '-config', os.path.join(ca_dir, 'ca.cnf'),
813+ '-sha1', '-newkey', 'rsa', '-nodes', '-keyout', key, '-out',
814+ csr, '-outform',
815+ 'PEM']
816+ subprocess.check_call(cmd)
817+ return csr, key
818+
819+
820+def sign_int_csr(ca_dir, csr, common_name):
821+ print 'Signing certificate request %s.' % csr
822+ crt = os.path.join(ca_dir, 'certs',
823+ '%s.crt' % os.path.basename(csr).split('.')[0])
824+ subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
825+ cmd = ['openssl', 'ca', '-batch', '-config',
826+ os.path.join(ca_dir, 'ca.cnf'),
827+ '-extensions', 'ca_extensions', '-days', CA_EXPIRY, '-notext',
828+ '-in', csr, '-out', crt, '-subj', subj, '-batch']
829+ print ' '.join(cmd)
830+ subprocess.check_call(cmd)
831+ return crt
832+
833+
834+def init_root_ca(ca_dir, common_name):
835+ init_ca(ca_dir, common_name)
836+ return root_ca_crt_key(ca_dir)
837+
838+
839+def init_intermediate_ca(ca_dir, common_name, root_ca_dir,
840+ org_name=ORG_NAME, org_unit_name=ORG_UNIT):
841+ init_ca(ca_dir, common_name)
842+ if not os.path.isfile(os.path.join(ca_dir, 'cacert.pem')):
843+ csr, key = intermediate_ca_csr_key(ca_dir)
844+ crt = sign_int_csr(root_ca_dir, csr, common_name)
845+ shutil.copy(crt, os.path.join(ca_dir, 'cacert.pem'))
846+ else:
847+ print 'Intermediate CA certificate already exists.'
848+
849+ if not os.path.isfile(os.path.join(ca_dir, 'signing.cnf')):
850+ print 'Creating new signing config in %s' % ca_dir
851+ with open(os.path.join(ca_dir, 'signing.cnf'), 'wb') as out:
852+ out.write(SIGNING_CONFIG % locals())
853+
854+
855+def create_certificate(ca_dir, service):
856+ common_name = service
857+ subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
858+ csr = os.path.join(ca_dir, 'certs', '%s.csr' % service)
859+ key = os.path.join(ca_dir, 'certs', '%s.key' % service)
860+ cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes', '-keyout',
861+ key, '-out', csr, '-subj', subj]
862+ subprocess.check_call(cmd)
863+ crt = sign_int_csr(ca_dir, csr, common_name)
864+ print 'Signed new CSR, crt @ %s' % crt
865+ return
866+
867+
868+def update_bundle(bundle_file, new_bundle):
869+ return
870+ if os.path.isfile(bundle_file):
871+ current = open(bundle_file, 'r').read().strip()
872+ if new_bundle == current:
873+ print 'CA Bundle @ %s is up to date.' % bundle_file
874+ return
875+ else:
876+ print 'Updating CA bundle @ %s.' % bundle_file
877+
878+ with open(bundle_file, 'wb') as out:
879+ out.write(new_bundle)
880+ subprocess.check_call(['update-ca-certificates'])
881+
882+
883+def tar_directory(path):
884+ cwd = os.getcwd()
885+ parent = os.path.dirname(path)
886+ directory = os.path.basename(path)
887+ tmp = tempfile.TemporaryFile()
888+ os.chdir(parent)
889+ tarball = tarfile.TarFile(fileobj=tmp, mode='w')
890+ tarball.add(directory)
891+ tarball.close()
892+ tmp.seek(0)
893+ out = tmp.read()
894+ tmp.close()
895+ os.chdir(cwd)
896+ return out
897+
898+
899+class JujuCA(object):
900+ def __init__(self, name, ca_dir, root_ca_dir, user, group):
901+ root_crt, root_key = init_root_ca(root_ca_dir,
902+ '%s Certificate Authority' % name)
903+ init_intermediate_ca(ca_dir,
904+ '%s Intermediate Certificate Authority' % name,
905+ root_ca_dir)
906+ cmd = ['chown', '-R', '%s.%s' % (user, group), ca_dir]
907+ subprocess.check_call(cmd)
908+ cmd = ['chown', '-R', '%s.%s' % (user, group), root_ca_dir]
909+ subprocess.check_call(cmd)
910+ self.ca_dir = ca_dir
911+ self.root_ca_dir = root_ca_dir
912+ self.user = user
913+ self.group = group
914+ update_bundle(CA_BUNDLE, self.get_ca_bundle())
915+
916+ def _sign_csr(self, csr, service, common_name):
917+ subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
918+ crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
919+ cmd = ['openssl', 'ca', '-config',
920+ os.path.join(self.ca_dir, 'signing.cnf'), '-extensions',
921+ 'req_extensions', '-days', '365', '-notext', '-in', csr,
922+ '-out', crt, '-batch', '-subj', subj]
923+ subprocess.check_call(cmd)
924+ return crt
925+
926+ def _create_certificate(self, service, common_name):
927+ subj = '/O=%s/OU=%s/CN=%s' % (ORG_NAME, ORG_UNIT, common_name)
928+ csr = os.path.join(self.ca_dir, 'certs', '%s.csr' % service)
929+ key = os.path.join(self.ca_dir, 'certs', '%s.key' % service)
930+ cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa', '-nodes',
931+ '-keyout', key, '-out', csr, '-subj', subj]
932+ subprocess.check_call(cmd)
933+ crt = self._sign_csr(csr, service, common_name)
934+ cmd = ['chown', '-R', '%s.%s' % (self.user, self.group), self.ca_dir]
935+ subprocess.check_call(cmd)
936+ print 'Signed new CSR, crt @ %s' % crt
937+ return crt, key
938+
939+ def get_cert_and_key(self, common_name):
940+ print 'Getting certificate and key for %s.' % common_name
941+ key = os.path.join(self.ca_dir, 'certs', '%s.key' % common_name)
942+ crt = os.path.join(self.ca_dir, 'certs', '%s.crt' % common_name)
943+ if os.path.isfile(crt):
944+ print 'Found existing certificate for %s.' % common_name
945+ crt = open(crt, 'r').read()
946+ try:
947+ key = open(key, 'r').read()
948+ except:
949+ print 'Could not load ssl private key for %s from %s' %\
950+ (common_name, key)
951+ exit(1)
952+ return crt, key
953+ crt, key = self._create_certificate(common_name, common_name)
954+ return open(crt, 'r').read(), open(key, 'r').read()
955+
956+ def get_ca_bundle(self):
957+ int_cert = open(os.path.join(self.ca_dir, 'cacert.pem')).read()
958+ root_cert = open(os.path.join(self.root_ca_dir, 'cacert.pem')).read()
959+ # NOTE: ordering of certs in bundle matters!
960+ return int_cert + root_cert
961
962=== renamed file 'hooks/utils.py' => 'hooks/keystone_utils.py'
963--- hooks/utils.py 2012-12-12 03:52:41 +0000
964+++ hooks/keystone_utils.py 2013-05-29 18:13:26 +0000
965@@ -1,18 +1,37 @@
966 #!/usr/bin/python
967-import subprocess
968+import ConfigParser
969 import sys
970 import json
971+import time
972+import subprocess
973 import os
974-import time
975-
976-from lib.openstack_common import *
977+
978+from lib.openstack_common import(
979+ get_os_codename_install_source,
980+ get_os_codename_package,
981+ error_out,
982+ configure_installation_source
983+ )
984+
985+import keystone_ssl as ssl
986+import lib.unison as unison
987+import lib.utils as utils
988+import lib.cluster_utils as cluster
989+
990
991 keystone_conf = "/etc/keystone/keystone.conf"
992 stored_passwd = "/var/lib/keystone/keystone.passwd"
993 stored_token = "/var/lib/keystone/keystone.token"
994+SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
995+
996+SSL_DIR = '/var/lib/keystone/juju_ssl/'
997+SSL_CA_NAME = 'Ubuntu Cloud'
998+CLUSTER_RES = 'res_ks_vip'
999+SSH_USER = 'juju_keystone'
1000+
1001
1002 def execute(cmd, die=False, echo=False):
1003- """ Executes a command
1004+ """ Executes a command
1005
1006 if die=True, script will exit(1) if command does not return 0
1007 if echo=True, output of command will be printed to stdout
1008@@ -23,8 +42,8 @@
1009 stdout=subprocess.PIPE,
1010 stdin=subprocess.PIPE,
1011 stderr=subprocess.PIPE)
1012- stdout=""
1013- stderr=""
1014+ stdout = ""
1015+ stderr = ""
1016
1017 def print_line(l):
1018 if echo:
1019@@ -47,7 +66,7 @@
1020
1021
1022 def config_get():
1023- """ Obtain the units config via 'config-get'
1024+ """ Obtain the units config via 'config-get'
1025 Returns a dict representing current config.
1026 private-address and IP of the unit is also tacked on for
1027 convienence
1028@@ -56,75 +75,38 @@
1029 config = json.loads(output)
1030 # make sure no config element is blank after config-get
1031 for c in config.keys():
1032- if not config[c]:
1033+ if not config[c]:
1034 error_out("ERROR: Config option has no paramter: %s" % c)
1035 # tack on our private address and ip
1036- hostname = execute("unit-get private-address")[0].strip()
1037- config["hostname"] = execute("unit-get private-address")[0].strip()
1038+ config["hostname"] = utils.unit_get('private-address')
1039 return config
1040
1041-def relation_ids(relation_name=None):
1042- j = execute('relation-ids --format=json %s' % relation_name)[0]
1043- return json.loads(j)
1044-
1045-def relation_list(relation_id=None):
1046- cmd = 'relation-list --format=json'
1047- if relation_id:
1048- cmd += ' -r %s' % relation_id
1049- j = execute(cmd)[0]
1050- return json.loads(j)
1051-
1052-def relation_set(relation_data):
1053- """ calls relation-set for all key=values in dict """
1054- for k in relation_data:
1055- execute("relation-set %s=%s" % (k, relation_data[k]), die=True)
1056-
1057-def relation_get(relation_data):
1058- """ Obtain all current relation data
1059- relation_data is a list of options to query from the relation
1060- Returns a k,v dict of the results.
1061- Leave empty responses out of the results as they haven't yet been
1062- set on the other end.
1063- Caller can then "len(results.keys()) == len(relation_data)" to find out if
1064- all relation values have been set on the other side
1065- """
1066- results = {}
1067- for r in relation_data:
1068- result = execute("relation-get %s" % r, die=True)[0].strip('\n')
1069- if result != "":
1070- results[r] = result
1071- return results
1072-
1073-def relation_get_dict(relation_id=None, remote_unit=None):
1074- """Obtain all relation data as dict by way of JSON"""
1075- cmd = 'relation-get --format=json'
1076- if relation_id:
1077- cmd += ' -r %s' % relation_id
1078- if remote_unit:
1079- remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
1080- os.environ['JUJU_REMOTE_UNIT'] = remote_unit
1081- j = execute(cmd, die=True)[0]
1082- if remote_unit and remote_unit_orig:
1083- os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
1084- d = json.loads(j)
1085- settings = {}
1086- # convert unicode to strings
1087- for k, v in d.iteritems():
1088- settings[str(k)] = str(v)
1089- return settings
1090+
1091+@utils.cached
1092+def get_local_endpoint():
1093+ """ Returns the URL for the local end-point bypassing haproxy/ssl """
1094+ local_endpoint = 'http://localhost:{}/v2.0/'.format(
1095+ cluster.determine_api_port(utils.config_get('admin-port'))
1096+ )
1097+ return local_endpoint
1098+
1099
1100 def set_admin_token(admin_token):
1101 """Set admin token according to deployment config or use a randomly
1102 generated token if none is specified (default).
1103 """
1104 if admin_token != 'None':
1105- juju_log('Configuring Keystone to use a pre-configured admin token.')
1106+ utils.juju_log('INFO',
1107+ 'Configuring Keystone to use'
1108+ ' a pre-configured admin token.')
1109 token = admin_token
1110 else:
1111- juju_log('Configuring Keystone to use a random admin token.')
1112+ utils.juju_log('INFO',
1113+ 'Configuring Keystone to use a random admin token.')
1114 if os.path.isfile(stored_token):
1115- msg = 'Loading a previously generated admin token from %s' % stored_token
1116- juju_log(msg)
1117+ msg = 'Loading a previously generated' \
1118+ ' admin token from %s' % stored_token
1119+ utils.juju_log('INFO', msg)
1120 f = open(stored_token, 'r')
1121 token = f.read().strip()
1122 f.close()
1123@@ -135,174 +117,130 @@
1124 out.close()
1125 update_config_block('DEFAULT', admin_token=token)
1126
1127+
1128 def get_admin_token():
1129 """Temporary utility to grab the admin token as configured in
1130 keystone.conf
1131 """
1132- f = open(keystone_conf, 'r+')
1133- for l in open(keystone_conf, 'r+').readlines():
1134- if l.split(' ')[0] == 'admin_token':
1135- try:
1136- return l.split('=')[1].strip()
1137- except:
1138- error_out('Could not parse admin_token line from %s' %
1139- keystone_conf)
1140+ with open(keystone_conf, 'r') as f:
1141+ for l in f.readlines():
1142+ if l.split(' ')[0] == 'admin_token':
1143+ try:
1144+ return l.split('=')[1].strip()
1145+ except:
1146+ error_out('Could not parse admin_token line from %s' %
1147+ keystone_conf)
1148 error_out('Could not find admin_token line in %s' % keystone_conf)
1149
1150-def update_config_block(block, **kwargs):
1151+
1152+# Track all updated config settings.
1153+_config_dirty = [False]
1154+
1155+def config_dirty():
1156+ return True in _config_dirty
1157+
1158+def update_config_block(section, **kwargs):
1159 """ Updates keystone.conf blocks given kwargs.
1160- Can be used to update driver settings for a particular backend,
1161- setting the sql connection, etc.
1162-
1163- Parses block heading as '[block]'
1164-
1165- If block does not exist, a new block will be created at end of file with
1166- given kwargs
1167- """
1168- f = open(keystone_conf, "r+")
1169- orig = f.readlines()
1170- new = []
1171- found_block = ""
1172- heading = "[%s]\n" % block
1173-
1174- lines = len(orig)
1175- ln = 0
1176-
1177- def update_block(block):
1178- for k, v in kwargs.iteritems():
1179- for l in block:
1180- if l.strip().split(" ")[0] == k:
1181- block[block.index(l)] = "%s = %s\n" % (k, v)
1182- return
1183- block.append('%s = %s\n' % (k, v))
1184- block.append('\n')
1185-
1186- try:
1187- found = False
1188- while ln < lines:
1189- if orig[ln] != heading:
1190- new.append(orig[ln])
1191- ln += 1
1192- else:
1193- new.append(orig[ln])
1194- ln += 1
1195- block = []
1196- while orig[ln].strip() != '':
1197- block.append(orig[ln])
1198- ln += 1
1199- update_block(block)
1200- new += block
1201- found = True
1202-
1203- if not found:
1204- if new[(len(new) - 1)].strip() != '':
1205- new.append('\n')
1206- new.append('%s' % heading)
1207- for k, v in kwargs.iteritems():
1208- new.append('%s = %s\n' % (k, v))
1209- new.append('\n')
1210- except:
1211- error_out('Error while attempting to update config block. '\
1212- 'Refusing to overwite existing config.')
1213-
1214- return
1215-
1216- # backup original config
1217- backup = open(keystone_conf + '.juju-back', 'w+')
1218- for l in orig:
1219- backup.write(l)
1220- backup.close()
1221-
1222- # update config
1223- f.seek(0)
1224- f.truncate()
1225- for l in new:
1226- f.write(l)
1227-
1228-
1229-def keystone_conf_update(opt, val):
1230- """ Updates keystone.conf values
1231- If option exists, it is reset to new value
1232- If it does not, it added to the top of the config file after the [DEFAULT]
1233- heading to keep it out of the paste deploy config
1234- """
1235- f = open(keystone_conf, "r+")
1236- orig = f.readlines()
1237- new = ""
1238- found = False
1239- for l in orig:
1240- if l.split(' ')[0] == opt:
1241- juju_log("Updating %s, setting %s = %s" % (keystone_conf, opt, val))
1242- new += "%s = %s\n" % (opt, val)
1243- found = True
1244- else:
1245- new += l
1246- new = new.split('\n')
1247- # insert a new value at the top of the file, after the 'DEFAULT' header so
1248- # as not to muck up paste deploy configuration later in the file
1249- if not found:
1250- juju_log("Adding new config option %s = %s" % (opt, val))
1251- header = new.index("[DEFAULT]")
1252- new.insert((header+1), "%s = %s" % (opt, val))
1253- f.seek(0)
1254- f.truncate()
1255- for l in new:
1256- f.write("%s\n" % l)
1257- f.close
1258+ Update a config setting in a specific setting of a config
1259+ file (/etc/keystone/keystone.conf, by default)
1260+ """
1261+ if 'file' in kwargs:
1262+ conf_file = kwargs['file']
1263+ del kwargs['file']
1264+ else:
1265+ conf_file = keystone_conf
1266+ config = ConfigParser.RawConfigParser()
1267+ config.read(conf_file)
1268+
1269+ if section != 'DEFAULT' and not config.has_section(section):
1270+ config.add_section(section)
1271+ _config_dirty[0] = True
1272+
1273+ for k, v in kwargs.iteritems():
1274+ try:
1275+ cur = config.get(section, k)
1276+ if cur != v:
1277+ _config_dirty[0] = True
1278+ except (ConfigParser.NoSectionError,
1279+ ConfigParser.NoOptionError):
1280+ _config_dirty[0] = True
1281+ config.set(section, k, v)
1282+ with open(conf_file, 'wb') as out:
1283+ config.write(out)
1284+
1285
1286 def create_service_entry(service_name, service_type, service_desc, owner=None):
1287 """ Add a new service entry to keystone if one does not already exist """
1288 import manager
1289- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1290+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1291 token=get_admin_token())
1292 for service in [s._info for s in manager.api.services.list()]:
1293 if service['name'] == service_name:
1294- juju_log("Service entry for '%s' already exists." % service_name)
1295+ utils.juju_log('INFO',
1296+ "Service entry for '%s' already exists." % \
1297+ service_name)
1298 return
1299 manager.api.services.create(name=service_name,
1300 service_type=service_type,
1301 description=service_desc)
1302- juju_log("Created new service entry '%s'" % service_name)
1303-
1304-def create_endpoint_template(region, service, public_url, admin_url,
1305- internal_url):
1306+ utils.juju_log('INFO', "Created new service entry '%s'" % service_name)
1307+
1308+
1309+def create_endpoint_template(region, service, publicurl, adminurl,
1310+ internalurl):
1311 """ Create a new endpoint template for service if one does not already
1312 exist matching name *and* region """
1313 import manager
1314- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1315+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1316 token=get_admin_token())
1317 service_id = manager.resolve_service_id(service)
1318 for ep in [e._info for e in manager.api.endpoints.list()]:
1319 if ep['service_id'] == service_id and ep['region'] == region:
1320- juju_log("Endpoint template already exists for '%s' in '%s'"
1321- % (service, region))
1322- return
1323+ utils.juju_log('INFO',
1324+ "Endpoint template already exists for '%s' in '%s'"
1325+ % (service, region))
1326+
1327+ up_to_date = True
1328+ for k in ['publicurl', 'adminurl', 'internalurl']:
1329+ if ep[k] != locals()[k]:
1330+ up_to_date = False
1331+
1332+ if up_to_date:
1333+ return
1334+ else:
1335+ # delete endpoint and recreate if endpoint urls need updating.
1336+ utils.juju_log('INFO',
1337+ "Updating endpoint template with"
1338+ " new endpoint urls.")
1339+ manager.api.endpoints.delete(ep['id'])
1340
1341 manager.api.endpoints.create(region=region,
1342 service_id=service_id,
1343- publicurl=public_url,
1344- adminurl=admin_url,
1345- internalurl=internal_url)
1346- juju_log("Created new endpoint template for '%s' in '%s'" %
1347- (region, service))
1348+ publicurl=publicurl,
1349+ adminurl=adminurl,
1350+ internalurl=internalurl)
1351+ utils.juju_log('INFO', "Created new endpoint template for '%s' in '%s'" %
1352+ (region, service))
1353+
1354
1355 def create_tenant(name):
1356 """ creates a tenant if it does not already exist """
1357 import manager
1358- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1359+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1360 token=get_admin_token())
1361 tenants = [t._info for t in manager.api.tenants.list()]
1362 if not tenants or name not in [t['name'] for t in tenants]:
1363 manager.api.tenants.create(tenant_name=name,
1364 description='Created by Juju')
1365- juju_log("Created new tenant: %s" % name)
1366+ utils.juju_log('INFO', "Created new tenant: %s" % name)
1367 return
1368- juju_log("Tenant '%s' already exists." % name)
1369+ utils.juju_log('INFO', "Tenant '%s' already exists." % name)
1370+
1371
1372 def create_user(name, password, tenant):
1373 """ creates a user if it doesn't already exist, as a member of tenant """
1374 import manager
1375- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1376+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1377 token=get_admin_token())
1378 users = [u._info for u in manager.api.users.list()]
1379 if not users or name not in [u['name'] for u in users]:
1380@@ -313,21 +251,23 @@
1381 password=password,
1382 email='juju@localhost',
1383 tenant_id=tenant_id)
1384- juju_log("Created new user '%s' tenant: %s" % (name, tenant_id))
1385+ utils.juju_log('INFO', "Created new user '%s' tenant: %s" % \
1386+ (name, tenant_id))
1387 return
1388- juju_log("A user named '%s' already exists" % name)
1389+ utils.juju_log('INFO', "A user named '%s' already exists" % name)
1390+
1391
1392 def create_role(name, user=None, tenant=None):
1393 """ creates a role if it doesn't already exist. grants role to user """
1394 import manager
1395- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1396+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1397 token=get_admin_token())
1398 roles = [r._info for r in manager.api.roles.list()]
1399 if not roles or name not in [r['name'] for r in roles]:
1400 manager.api.roles.create(name=name)
1401- juju_log("Created new role '%s'" % name)
1402+ utils.juju_log('INFO', "Created new role '%s'" % name)
1403 else:
1404- juju_log("A role named '%s' already exists" % name)
1405+ utils.juju_log('INFO', "A role named '%s' already exists" % name)
1406
1407 if not user and not tenant:
1408 return
1409@@ -338,49 +278,55 @@
1410 tenant_id = manager.resolve_tenant_id(tenant)
1411
1412 if None in [user_id, role_id, tenant_id]:
1413- error_out("Could not resolve [user_id, role_id, tenant_id]" %
1414- [user_id, role_id, tenant_id])
1415+ error_out("Could not resolve [%s, %s, %s]" %
1416+ (user_id, role_id, tenant_id))
1417
1418 grant_role(user, name, tenant)
1419
1420+
1421 def grant_role(user, role, tenant):
1422 """grant user+tenant a specific role"""
1423 import manager
1424- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1425+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1426 token=get_admin_token())
1427- juju_log("Granting user '%s' role '%s' on tenant '%s'" %\
1428- (user, role, tenant))
1429+ utils.juju_log('INFO', "Granting user '%s' role '%s' on tenant '%s'" % \
1430+ (user, role, tenant))
1431 user_id = manager.resolve_user_id(user)
1432 role_id = manager.resolve_role_id(role)
1433 tenant_id = manager.resolve_tenant_id(tenant)
1434
1435- cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id)
1436+ cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id)
1437 if not cur_roles or role_id not in [r.id for r in cur_roles]:
1438 manager.api.roles.add_user_role(user=user_id,
1439 role=role_id,
1440 tenant=tenant_id)
1441- juju_log("Granted user '%s' role '%s' on tenant '%s'" %\
1442- (user, role, tenant))
1443+ utils.juju_log('INFO', "Granted user '%s' role '%s' on tenant '%s'" % \
1444+ (user, role, tenant))
1445 else:
1446- juju_log("User '%s' already has role '%s' on tenant '%s'" %\
1447- (user, role, tenant))
1448+ utils.juju_log('INFO',
1449+ "User '%s' already has role '%s' on tenant '%s'" % \
1450+ (user, role, tenant))
1451+
1452
1453 def generate_admin_token(config):
1454 """ generate and add an admin token """
1455 import manager
1456- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1457+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1458 token='ADMIN')
1459 if config["admin-token"] == "None":
1460 import random
1461 token = random.randrange(1000000000000, 9999999999999)
1462 else:
1463 return config["admin-token"]
1464- manager.api.add_token(token, config["admin-user"], "admin", config["token-expiry"])
1465- juju_log("Generated and added new random admin token.")
1466+ manager.api.add_token(token, config["admin-user"],
1467+ "admin", config["token-expiry"])
1468+ utils.juju_log('INFO', "Generated and added new random admin token.")
1469 return token
1470
1471+
1472 def ensure_initial_admin(config):
1473- """ Ensures the minimum admin stuff exists in whatever database we're using.
1474+ """ Ensures the minimum admin stuff exists in whatever database we're
1475+ using.
1476 This and the helper functions it calls are meant to be idempotent and
1477 run during install as well as during db-changed. This will maintain
1478 the admin tenant, user, role, service entry and endpoint across every
1479@@ -395,10 +341,11 @@
1480 if config["admin-password"] != "None":
1481 passwd = config["admin-password"]
1482 elif os.path.isfile(stored_passwd):
1483- juju_log("Loading stored passwd from %s" % stored_passwd)
1484+ utils.juju_log('INFO', "Loading stored passwd from %s" % stored_passwd)
1485 passwd = open(stored_passwd, 'r').readline().strip('\n')
1486 if passwd == "":
1487- juju_log("Generating new passwd for user: %s" % config["admin-user"])
1488+ utils.juju_log('INFO', "Generating new passwd for user: %s" % \
1489+ config["admin-user"])
1490 passwd = execute("pwgen -c 16 1", die=True)[0]
1491 open(stored_passwd, 'w+').writelines("%s\n" % passwd)
1492
1493@@ -409,26 +356,82 @@
1494 create_role("KeystoneAdmin", config["admin-user"], 'admin')
1495 create_role("KeystoneServiceAdmin", config["admin-user"], 'admin')
1496 create_service_entry("keystone", "identity", "Keystone Identity Service")
1497- # following documentation here, perhaps we should be using juju
1498- # public/private addresses for public/internal urls.
1499- public_url = "http://%s:%s/v2.0" % (config["hostname"], config["service-port"])
1500- admin_url = "http://%s:%s/v2.0" % (config["hostname"], config["admin-port"])
1501- internal_url = "http://%s:%s/v2.0" % (config["hostname"], config["service-port"])
1502- create_endpoint_template("RegionOne", "keystone", public_url,
1503+
1504+ if cluster.is_clustered():
1505+ utils.juju_log('INFO', "Creating endpoint for clustered configuration")
1506+ service_host = auth_host = config["vip"]
1507+ else:
1508+ utils.juju_log('INFO', "Creating standard endpoint")
1509+ service_host = auth_host = config["hostname"]
1510+
1511+ for region in config['region'].split():
1512+ create_keystone_endpoint(service_host=service_host,
1513+ service_port=config["service-port"],
1514+ auth_host=auth_host,
1515+ auth_port=config["admin-port"],
1516+ region=region)
1517+
1518+
1519+def create_keystone_endpoint(service_host, service_port,
1520+ auth_host, auth_port, region):
1521+ public_url = "http://%s:%s/v2.0" % (service_host, service_port)
1522+ admin_url = "http://%s:%s/v2.0" % (auth_host, auth_port)
1523+ internal_url = "http://%s:%s/v2.0" % (service_host, service_port)
1524+ create_endpoint_template(region, "keystone", public_url,
1525 admin_url, internal_url)
1526
1527+
1528 def update_user_password(username, password):
1529 import manager
1530- manager = manager.KeystoneManager(endpoint='http://localhost:35357/v2.0/',
1531+ manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
1532 token=get_admin_token())
1533- juju_log("Updating password for user '%s'" % username)
1534+ utils.juju_log('INFO', "Updating password for user '%s'" % username)
1535
1536 user_id = manager.resolve_user_id(username)
1537 if user_id is None:
1538 error_out("Could not resolve user id for '%s'" % username)
1539
1540 manager.api.users.update_password(user=user_id, password=password)
1541- juju_log("Successfully updated password for user '%s'" % username)
1542+ utils.juju_log('INFO', "Successfully updated password for user '%s'" % \
1543+ username)
1544+
1545+
1546+def load_stored_passwords(path=SERVICE_PASSWD_PATH):
1547+ creds = {}
1548+ if not os.path.isfile(path):
1549+ return creds
1550+
1551+ stored_passwd = open(path, 'r')
1552+ for l in stored_passwd.readlines():
1553+ user, passwd = l.strip().split(':')
1554+ creds[user] = passwd
1555+ return creds
1556+
1557+
1558+def save_stored_passwords(path=SERVICE_PASSWD_PATH, **creds):
1559+ with open(path, 'wb') as stored_passwd:
1560+ [stored_passwd.write('%s:%s\n' % (u, p)) for u, p in creds.iteritems()]
1561+
1562+
1563+def get_service_password(service_username):
1564+ creds = load_stored_passwords()
1565+ if service_username in creds:
1566+ return creds[service_username]
1567+
1568+ passwd = subprocess.check_output(['pwgen', '-c', '32', '1']).strip()
1569+ creds[service_username] = passwd
1570+ save_stored_passwords(**creds)
1571+
1572+ return passwd
1573+
1574+
1575+def configure_pki_tokens(config):
1576+ '''Configure PKI token signing, if enabled.'''
1577+ if config['enable-pki'] not in ['True', 'true']:
1578+ update_config_block('signing', token_format='UUID')
1579+ else:
1580+ utils.juju_log('INFO', 'TODO: PKI Support, setting to UUID for now.')
1581+ update_config_block('signing', token_format='UUID')
1582
1583
1584 def do_openstack_upgrade(install_src, packages):
1585@@ -438,10 +441,12 @@
1586 old_vers = get_os_codename_package('keystone')
1587 new_vers = get_os_codename_install_source(install_src)
1588
1589- juju_log("Beginning Keystone upgrade: %s -> %s" % (old_vers, new_vers))
1590+ utils.juju_log('INFO',
1591+ "Beginning Keystone upgrade: %s -> %s" % \
1592+ (old_vers, new_vers))
1593
1594 # Backup previous config.
1595- juju_log("Backing up contents of /etc/keystone.")
1596+ utils.juju_log('INFO', "Backing up contents of /etc/keystone.")
1597 stamp = time.strftime('%Y%m%d%H%M')
1598 cmd = 'tar -pcf /var/lib/juju/keystone-backup-%s.tar /etc/keystone' % stamp
1599 execute(cmd, die=True, echo=True)
1600@@ -458,15 +463,17 @@
1601 set_admin_token(config['admin-token'])
1602
1603 # set the sql connection string if a shared-db relation is found.
1604- ids = relation_ids(relation_name='shared-db')
1605+ ids = utils.relation_ids('shared-db')
1606
1607 if ids:
1608- for id in ids:
1609- for unit in relation_list(id):
1610- juju_log('Configuring new keystone.conf for datbase access '\
1611- 'on existing database relation to %s' % unit)
1612- relation_data = relation_get_dict(relation_id=id,
1613- remote_unit=unit)
1614+ for rid in ids:
1615+ for unit in utils.relation_list(rid):
1616+ utils.juju_log('INFO',
1617+ 'Configuring new keystone.conf for '
1618+ 'database access on existing database'
1619+ ' relation to %s' % unit)
1620+ relation_data = utils.relation_get_dict(relation_id=rid,
1621+ remote_unit=unit)
1622
1623 update_config_block('sql', connection="mysql://%s:%s@%s/%s" %
1624 (config["database-user"],
1625@@ -474,9 +481,64 @@
1626 relation_data["private-address"],
1627 config["database"]))
1628
1629- juju_log('Running database migrations for %s' % new_vers)
1630- execute('service keystone stop', echo=True)
1631- execute('keystone-manage db_sync', echo=True, die=True)
1632- execute('service keystone start', echo=True)
1633+ utils.stop('keystone')
1634+ if (cluster.eligible_leader(CLUSTER_RES)):
1635+ utils.juju_log('INFO',
1636+ 'Running database migrations for %s' % new_vers)
1637+ execute('keystone-manage db_sync', echo=True, die=True)
1638+ else:
1639+ utils.juju_log('INFO',
1640+ 'Not cluster leader; snoozing whilst'
1641+ ' leader upgrades DB')
1642+ time.sleep(10)
1643+ utils.start('keystone')
1644 time.sleep(5)
1645- juju_log('Completed Keystone upgrade: %s -> %s' % (old_vers, new_vers))
1646+ utils.juju_log('INFO',
1647+ 'Completed Keystone upgrade: '
1648+ '%s -> %s' % (old_vers, new_vers))
1649+
1650+
1651+def synchronize_service_credentials():
1652+ '''
1653+ Broadcast service credentials to peers or consume those that have been
1654+ broadcasted by peer, depending on hook context.
1655+ '''
1656+ if (not cluster.eligible_leader(CLUSTER_RES) or
1657+ not os.path.isfile(SERVICE_PASSWD_PATH)):
1658+ return
1659+ utils.juju_log('INFO', 'Synchronizing service passwords to all peers.')
1660+ unison.sync_to_peers(peer_interface='cluster',
1661+ paths=[SERVICE_PASSWD_PATH], user=SSH_USER,
1662+ verbose=True)
1663+
1664+CA = []
1665+
1666+
1667+def get_ca(user='keystone', group='keystone'):
1668+ """
1669+ Initialize a new CA object if one hasn't already been loaded.
1670+ This will create a new CA or load an existing one.
1671+ """
1672+ if not CA:
1673+ if not os.path.isdir(SSL_DIR):
1674+ os.mkdir(SSL_DIR)
1675+ d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
1676+ ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
1677+ ca_dir=os.path.join(SSL_DIR,
1678+ '%s_intermediate_ca' % d_name),
1679+ root_ca_dir=os.path.join(SSL_DIR,
1680+ '%s_root_ca' % d_name))
1681+ # SSL_DIR is synchronized via all peers over unison+ssh, need
1682+ # to ensure permissions.
1683+ execute('chown -R %s.%s %s' % (user, group, SSL_DIR))
1684+ execute('chmod -R g+rwx %s' % SSL_DIR)
1685+ CA.append(ca)
1686+ return CA[0]
1687+
1688+
1689+def https():
1690+ if (utils.config_get('https-service-endpoints') in ["yes", "true", "True"]
1691+ or cluster.https()):
1692+ return True
1693+ else:
1694+ return False
1695
1696=== added file 'hooks/lib/apache_utils.py'
1697--- hooks/lib/apache_utils.py 1970-01-01 00:00:00 +0000
1698+++ hooks/lib/apache_utils.py 2013-05-29 18:13:26 +0000
1699@@ -0,0 +1,196 @@
1700+#
1701+# Copyright 2012 Canonical Ltd.
1702+#
1703+# This file is sourced from lp:openstack-charm-helpers
1704+#
1705+# Authors:
1706+# James Page <james.page@ubuntu.com>
1707+# Adam Gandelman <adamg@ubuntu.com>
1708+#
1709+
1710+from lib.utils import (
1711+ relation_ids,
1712+ relation_list,
1713+ relation_get,
1714+ render_template,
1715+ juju_log,
1716+ config_get,
1717+ install,
1718+ get_host_ip,
1719+ restart
1720+ )
1721+from lib.cluster_utils import https
1722+
1723+import os
1724+import subprocess
1725+from base64 import b64decode
1726+
1727+APACHE_SITE_DIR = "/etc/apache2/sites-available"
1728+SITE_TEMPLATE = "apache2_site.tmpl"
1729+RELOAD_CHECK = "To activate the new configuration"
1730+
1731+
1732+def get_cert():
1733+ cert = config_get('ssl_cert')
1734+ key = config_get('ssl_key')
1735+ if not (cert and key):
1736+ juju_log('INFO',
1737+ "Inspecting identity-service relations for SSL certificate.")
1738+ cert = key = None
1739+ for r_id in relation_ids('identity-service'):
1740+ for unit in relation_list(r_id):
1741+ if not cert:
1742+ cert = relation_get('ssl_cert',
1743+ rid=r_id, unit=unit)
1744+ if not key:
1745+ key = relation_get('ssl_key',
1746+ rid=r_id, unit=unit)
1747+ return (cert, key)
1748+
1749+
1750+def get_ca_cert():
1751+ ca_cert = None
1752+ juju_log('INFO',
1753+ "Inspecting identity-service relations for CA SSL certificate.")
1754+ for r_id in relation_ids('identity-service'):
1755+ for unit in relation_list(r_id):
1756+ if not ca_cert:
1757+ ca_cert = relation_get('ca_cert',
1758+ rid=r_id, unit=unit)
1759+ return ca_cert
1760+
1761+
1762+def install_ca_cert(ca_cert):
1763+ if ca_cert:
1764+ with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
1765+ 'w') as crt:
1766+ crt.write(ca_cert)
1767+ subprocess.check_call(['update-ca-certificates', '--fresh'])
1768+
1769+
1770+def enable_https(port_maps, namespace, cert, key, ca_cert=None):
1771+ '''
1772+ For a given number of port mappings, configures apache2
1773+ HTTPs local reverse proxying using certficates and keys provided in
1774+ either configuration data (preferred) or relation data. Assumes ports
1775+ are not in use (calling charm should ensure that).
1776+
1777+ port_maps: dict: external to internal port mappings
1778+ namespace: str: name of charm
1779+ '''
1780+ def _write_if_changed(path, new_content):
1781+ content = None
1782+ if os.path.exists(path):
1783+ with open(path, 'r') as f:
1784+ content = f.read().strip()
1785+ if content != new_content:
1786+ with open(path, 'w') as f:
1787+ f.write(new_content)
1788+ return True
1789+ else:
1790+ return False
1791+
1792+ juju_log('INFO', "Enabling HTTPS for port mappings: {}".format(port_maps))
1793+ http_restart = False
1794+
1795+ if cert:
1796+ cert = b64decode(cert)
1797+ if key:
1798+ key = b64decode(key)
1799+ if ca_cert:
1800+ ca_cert = b64decode(ca_cert)
1801+
1802+ if not cert and not key:
1803+ juju_log('ERROR',
1804+ "Expected but could not find SSL certificate data, not "
1805+ "configuring HTTPS!")
1806+ return False
1807+
1808+ install('apache2')
1809+ if RELOAD_CHECK in subprocess.check_output(['a2enmod', 'ssl',
1810+ 'proxy', 'proxy_http']):
1811+ http_restart = True
1812+
1813+ ssl_dir = os.path.join('/etc/apache2/ssl', namespace)
1814+ if not os.path.exists(ssl_dir):
1815+ os.makedirs(ssl_dir)
1816+
1817+ if (_write_if_changed(os.path.join(ssl_dir, 'cert'), cert)):
1818+ http_restart = True
1819+ if (_write_if_changed(os.path.join(ssl_dir, 'key'), key)):
1820+ http_restart = True
1821+ os.chmod(os.path.join(ssl_dir, 'key'), 0600)
1822+
1823+ install_ca_cert(ca_cert)
1824+
1825+ sites_dir = '/etc/apache2/sites-available'
1826+ for ext_port, int_port in port_maps.items():
1827+ juju_log('INFO',
1828+ 'Creating apache2 reverse proxy vhost'
1829+ ' for {}:{}'.format(ext_port,
1830+ int_port))
1831+ site = "{}_{}".format(namespace, ext_port)
1832+ site_path = os.path.join(sites_dir, site)
1833+ with open(site_path, 'w') as fsite:
1834+ context = {
1835+ "ext": ext_port,
1836+ "int": int_port,
1837+ "namespace": namespace,
1838+ "private_address": get_host_ip()
1839+ }
1840+ fsite.write(render_template(SITE_TEMPLATE,
1841+ context))
1842+
1843+ if RELOAD_CHECK in subprocess.check_output(['a2ensite', site]):
1844+ http_restart = True
1845+
1846+ if http_restart:
1847+ restart('apache2')
1848+
1849+ return True
1850+
1851+
1852+def disable_https(port_maps, namespace):
1853+ '''
1854+ Ensure HTTPS reverse proxying is disables for given port mappings
1855+
1856+ port_maps: dict: of ext -> int port mappings
1857+ namespace: str: name of chamr
1858+ '''
1859+ juju_log('INFO', 'Ensuring HTTPS disabled for {}'.format(port_maps))
1860+
1861+ if (not os.path.exists('/etc/apache2') or
1862+ not os.path.exists(os.path.join('/etc/apache2/ssl', namespace))):
1863+ return
1864+
1865+ http_restart = False
1866+ for ext_port in port_maps.keys():
1867+ if os.path.exists(os.path.join(APACHE_SITE_DIR,
1868+ "{}_{}".format(namespace,
1869+ ext_port))):
1870+ juju_log('INFO',
1871+ "Disabling HTTPS reverse proxy"
1872+ " for {} {}.".format(namespace,
1873+ ext_port))
1874+ if (RELOAD_CHECK in
1875+ subprocess.check_output(['a2dissite',
1876+ '{}_{}'.format(namespace,
1877+ ext_port)])):
1878+ http_restart = True
1879+
1880+ if http_restart:
1881+ restart(['apache2'])
1882+
1883+
1884+def setup_https(port_maps, namespace, cert, key, ca_cert=None):
1885+ '''
1886+ Ensures HTTPS is either enabled or disabled for given port
1887+ mapping.
1888+
1889+ port_maps: dict: of ext -> int port mappings
1890+ namespace: str: name of charm
1891+ '''
1892+ if not https:
1893+ disable_https(port_maps, namespace)
1894+ else:
1895+ enable_https(port_maps, namespace, cert, key, ca_cert)
1896
1897=== added file 'hooks/lib/cluster_utils.py'
1898--- hooks/lib/cluster_utils.py 1970-01-01 00:00:00 +0000
1899+++ hooks/lib/cluster_utils.py 2013-05-29 18:13:26 +0000
1900@@ -0,0 +1,130 @@
1901+#
1902+# Copyright 2012 Canonical Ltd.
1903+#
1904+# This file is sourced from lp:openstack-charm-helpers
1905+#
1906+# Authors:
1907+# James Page <james.page@ubuntu.com>
1908+# Adam Gandelman <adamg@ubuntu.com>
1909+#
1910+
1911+from lib.utils import (
1912+ juju_log,
1913+ relation_ids,
1914+ relation_list,
1915+ relation_get,
1916+ get_unit_hostname,
1917+ config_get
1918+ )
1919+import subprocess
1920+import os
1921+
1922+
1923+def is_clustered():
1924+ for r_id in (relation_ids('ha') or []):
1925+ for unit in (relation_list(r_id) or []):
1926+ clustered = relation_get('clustered',
1927+ rid=r_id,
1928+ unit=unit)
1929+ if clustered:
1930+ return True
1931+ return False
1932+
1933+
1934+def is_leader(resource):
1935+ cmd = [
1936+ "crm", "resource",
1937+ "show", resource
1938+ ]
1939+ try:
1940+ status = subprocess.check_output(cmd)
1941+ except subprocess.CalledProcessError:
1942+ return False
1943+ else:
1944+ if get_unit_hostname() in status:
1945+ return True
1946+ else:
1947+ return False
1948+
1949+
1950+def peer_units():
1951+ peers = []
1952+ for r_id in (relation_ids('cluster') or []):
1953+ for unit in (relation_list(r_id) or []):
1954+ peers.append(unit)
1955+ return peers
1956+
1957+
1958+def oldest_peer(peers):
1959+ local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
1960+ for peer in peers:
1961+ remote_unit_no = int(peer.split('/')[1])
1962+ if remote_unit_no < local_unit_no:
1963+ return False
1964+ return True
1965+
1966+
1967+def eligible_leader(resource):
1968+ if is_clustered():
1969+ if not is_leader(resource):
1970+ juju_log('INFO', 'Deferring action to CRM leader.')
1971+ return False
1972+ else:
1973+ peers = peer_units()
1974+ if peers and not oldest_peer(peers):
1975+ juju_log('INFO', 'Deferring action to oldest service unit.')
1976+ return False
1977+ return True
1978+
1979+
1980+def https():
1981+ '''
1982+ Determines whether enough data has been provided in configuration
1983+ or relation data to configure HTTPS
1984+ .
1985+ returns: boolean
1986+ '''
1987+ if config_get('use-https') == "yes":
1988+ return True
1989+ if config_get('ssl_cert') and config_get('ssl_key'):
1990+ return True
1991+ for r_id in relation_ids('identity-service'):
1992+ for unit in relation_list(r_id):
1993+ if (relation_get('https_keystone', rid=r_id, unit=unit) and
1994+ relation_get('ssl_cert', rid=r_id, unit=unit) and
1995+ relation_get('ssl_key', rid=r_id, unit=unit) and
1996+ relation_get('ca_cert', rid=r_id, unit=unit)):
1997+ return True
1998+ return False
1999+
2000+
2001+def determine_api_port(public_port):
2002+ '''
2003+ Determine correct API server listening port based on
2004+ existence of HTTPS reverse proxy and/or haproxy.
2005+
2006+ public_port: int: standard public port for given service
2007+
2008+ returns: int: the correct listening port for the API service
2009+ '''
2010+ i = 0
2011+ if len(peer_units()) > 0 or is_clustered():
2012+ i += 1
2013+ if https():
2014+ i += 1
2015+ return public_port - (i * 10)
2016+
2017+
2018+def determine_haproxy_port(public_port):
2019+ '''
2020+ Description: Determine correct proxy listening port based on public IP +
2021+ existence of HTTPS reverse proxy.
2022+
2023+ public_port: int: standard public port for given service
2024+
2025+ returns: int: the correct listening port for the HAProxy service
2026+ '''
2027+ i = 0
2028+ if https():
2029+ i += 1
2030+ return public_port - (i * 10)
2031
2032=== added file 'hooks/lib/haproxy_utils.py'
2033--- hooks/lib/haproxy_utils.py 1970-01-01 00:00:00 +0000
2034+++ hooks/lib/haproxy_utils.py 2013-05-29 18:13:26 +0000
2035@@ -0,0 +1,55 @@
2036+#
2037+# Copyright 2012 Canonical Ltd.
2038+#
2039+# This file is sourced from lp:openstack-charm-helpers
2040+#
2041+# Authors:
2042+# James Page <james.page@ubuntu.com>
2043+# Adam Gandelman <adamg@ubuntu.com>
2044+#
2045+
2046+from lib.utils import (
2047+ relation_ids,
2048+ relation_list,
2049+ relation_get,
2050+ unit_get,
2051+ reload,
2052+ render_template
2053+ )
2054+import os
2055+
2056+HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
2057+HAPROXY_DEFAULT = '/etc/default/haproxy'
2058+
2059+
2060+def configure_haproxy(service_ports):
2061+ '''
2062+ Configure HAProxy based on the current peers in the service
2063+ cluster using the provided port map:
2064+
2065+ "swift": [ 8080, 8070 ]
2066+
2067+ HAproxy will also be reloaded/started if required
2068+
2069+ service_ports: dict: dict of lists of [ frontend, backend ]
2070+ '''
2071+ cluster_hosts = {}
2072+ cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
2073+ unit_get('private-address')
2074+ for r_id in relation_ids('cluster'):
2075+ for unit in relation_list(r_id):
2076+ cluster_hosts[unit.replace('/', '-')] = \
2077+ relation_get(attribute='private-address',
2078+ rid=r_id,
2079+ unit=unit)
2080+ context = {
2081+ 'units': cluster_hosts,
2082+ 'service_ports': service_ports
2083+ }
2084+ with open(HAPROXY_CONF, 'w') as f:
2085+ f.write(render_template(os.path.basename(HAPROXY_CONF),
2086+ context))
2087+ with open(HAPROXY_DEFAULT, 'w') as f:
2088+ f.write('ENABLED=1')
2089+
2090+ reload('haproxy')
2091
2092=== modified file 'hooks/lib/openstack_common.py'
2093--- hooks/lib/openstack_common.py 2012-12-05 20:35:05 +0000
2094+++ hooks/lib/openstack_common.py 2013-05-29 18:13:26 +0000
2095@@ -2,7 +2,9 @@
2096
2097 # Common python helper functions used for OpenStack charms.
2098
2099+import apt_pkg as apt
2100 import subprocess
2101+import os
2102
2103 CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
2104 CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
2105@@ -11,7 +13,7 @@
2106 'oneiric': 'diablo',
2107 'precise': 'essex',
2108 'quantal': 'folsom',
2109- 'raring' : 'grizzly'
2110+ 'raring': 'grizzly',
2111 }
2112
2113
2114@@ -19,7 +21,18 @@
2115 '2011.2': 'diablo',
2116 '2012.1': 'essex',
2117 '2012.2': 'folsom',
2118- '2013.1': 'grizzly'
2119+ '2013.1': 'grizzly',
2120+ '2013.2': 'havana',
2121+}
2122+
2123+# The ugly duckling
2124+swift_codenames = {
2125+ '1.4.3': 'diablo',
2126+ '1.4.8': 'essex',
2127+ '1.7.4': 'folsom',
2128+ '1.7.6': 'grizzly',
2129+ '1.7.7': 'grizzly',
2130+ '1.8.0': 'grizzly',
2131 }
2132
2133
2134@@ -62,11 +75,12 @@
2135 return ca_rel
2136
2137 # Best guess match based on deb string provided
2138- if src.startswith('deb'):
2139+ if src.startswith('deb') or src.startswith('ppa'):
2140 for k, v in openstack_codenames.iteritems():
2141 if v in src:
2142 return v
2143
2144+
2145 def get_os_codename_version(vers):
2146 '''Determine OpenStack codename from version number.'''
2147 try:
2148@@ -88,52 +102,54 @@
2149
2150 def get_os_codename_package(pkg):
2151 '''Derive OpenStack release codename from an installed package.'''
2152- cmd = ['dpkg', '-l', pkg]
2153-
2154+ apt.init()
2155+ cache = apt.Cache()
2156 try:
2157- output = subprocess.check_output(cmd)
2158- except subprocess.CalledProcessError:
2159- e = 'Could not derive OpenStack version from package that is not '\
2160- 'installed; %s' % pkg
2161- error_out(e)
2162-
2163- def _clean(line):
2164- line = line.split(' ')
2165- clean = []
2166- for c in line:
2167- if c != '':
2168- clean.append(c)
2169- return clean
2170-
2171- vers = None
2172- for l in output.split('\n'):
2173- if l.startswith('ii'):
2174- l = _clean(l)
2175- if l[1] == pkg:
2176- vers = l[2]
2177-
2178- if not vers:
2179+ pkg = cache[pkg]
2180+ except:
2181 e = 'Could not determine version of installed package: %s' % pkg
2182 error_out(e)
2183
2184- vers = vers[:6]
2185+ vers = apt.UpstreamVersion(pkg.current_ver.ver_str)
2186+
2187 try:
2188- return openstack_codenames[vers]
2189+ if 'swift' in pkg.name:
2190+ vers = vers[:5]
2191+ return swift_codenames[vers]
2192+ else:
2193+ vers = vers[:6]
2194+ return openstack_codenames[vers]
2195 except KeyError:
2196 e = 'Could not determine OpenStack codename for version %s' % vers
2197 error_out(e)
2198
2199
2200+def get_os_version_package(pkg):
2201+ '''Derive OpenStack version number from an installed package.'''
2202+ codename = get_os_codename_package(pkg)
2203+
2204+ if 'swift' in pkg:
2205+ vers_map = swift_codenames
2206+ else:
2207+ vers_map = openstack_codenames
2208+
2209+ for version, cname in vers_map.iteritems():
2210+ if cname == codename:
2211+ return version
2212+ e = "Could not determine OpenStack version for package: %s" % pkg
2213+ error_out(e)
2214+
2215+
2216 def configure_installation_source(rel):
2217 '''Configure apt installation source.'''
2218
2219- def _import_key(id):
2220+ def _import_key(keyid):
2221 cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
2222- "--recv-keys %s" % id
2223+ "--recv-keys %s" % keyid
2224 try:
2225 subprocess.check_call(cmd.split(' '))
2226- except:
2227- error_out("Error importing repo key %s" % id)
2228+ except subprocess.CalledProcessError:
2229+ error_out("Error importing repo key %s" % keyid)
2230
2231 if rel == 'distro':
2232 return
2233@@ -142,7 +158,7 @@
2234 subprocess.check_call(["add-apt-repository", "-y", src])
2235 elif rel[:3] == "deb":
2236 l = len(rel.split('|'))
2237- if l == 2:
2238+ if l == 2:
2239 src, key = rel.split('|')
2240 juju_log("Importing PPA key from keyserver for %s" % src)
2241 _import_key(key)
2242@@ -164,9 +180,11 @@
2243 'version (%s)' % (ca_rel, ubuntu_rel)
2244 error_out(e)
2245
2246- if ca_rel == 'folsom/staging':
2247+ if 'staging' in ca_rel:
2248 # staging is just a regular PPA.
2249- cmd = 'add-apt-repository -y ppa:ubuntu-cloud-archive/folsom-staging'
2250+ os_rel = ca_rel.split('/')[0]
2251+ ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
2252+ cmd = 'add-apt-repository -y %s' % ppa
2253 subprocess.check_call(cmd.split(' '))
2254 return
2255
2256@@ -174,7 +192,10 @@
2257 pockets = {
2258 'folsom': 'precise-updates/folsom',
2259 'folsom/updates': 'precise-updates/folsom',
2260- 'folsom/proposed': 'precise-proposed/folsom'
2261+ 'folsom/proposed': 'precise-proposed/folsom',
2262+ 'grizzly': 'precise-updates/grizzly',
2263+ 'grizzly/updates': 'precise-updates/grizzly',
2264+ 'grizzly/proposed': 'precise-proposed/grizzly'
2265 }
2266
2267 try:
2268@@ -191,3 +212,19 @@
2269 else:
2270 error_out("Invalid openstack-release specified: %s" % rel)
2271
2272+
2273+def save_script_rc(script_path="scripts/scriptrc", **env_vars):
2274+ """
2275+ Write an rc file in the charm-delivered directory containing
2276+ exported environment variables provided by env_vars. Any charm scripts run
2277+ outside the juju hook environment can source this scriptrc to obtain
2278+ updated config information necessary to perform health checks or
2279+ service changes.
2280+ """
2281+ charm_dir = os.getenv('CHARM_DIR')
2282+ juju_rc_path = "%s/%s" % (charm_dir, script_path)
2283+ with open(juju_rc_path, 'wb') as rc_script:
2284+ rc_script.write(
2285+ "#!/bin/bash\n")
2286+ [rc_script.write('export %s=%s\n' % (u, p))
2287+ for u, p in env_vars.iteritems() if u != "script_path"]
2288
2289=== added file 'hooks/lib/unison.py'
2290--- hooks/lib/unison.py 1970-01-01 00:00:00 +0000
2291+++ hooks/lib/unison.py 2013-05-29 18:13:26 +0000
2292@@ -0,0 +1,220 @@
2293+#!/usr/bin/python
2294+#
2295+# Easy file synchronization among peer units using ssh + unison.
2296+#
2297+# From *both* peer relation -joined and -changed, add a call to
2298+# ssh_authorized_peers() describing the peer relation and the desired
2299+# user + group. After all peer relations have settled, all hosts should
2300+# be able to connect to on another via key auth'd ssh as the specified user.
2301+#
2302+# Other hooks are then free to synchronize files and directories using
2303+# sync_to_peers().
2304+#
2305+# For a peer relation named 'cluster', for example:
2306+#
2307+# cluster-relation-joined:
2308+# ...
2309+# ssh_authorized_peers(peer_interface='cluster',
2310+# user='juju_ssh', group='juju_ssh',
2311+# ensure_user=True)
2312+# ...
2313+#
2314+# cluster-relation-changed:
2315+# ...
2316+# ssh_authorized_peers(peer_interface='cluster',
2317+# user='juju_ssh', group='juju_ssh',
2318+# ensure_user=True)
2319+# ...
2320+#
2321+# Hooks are now free to sync files as easily as:
2322+#
2323+# files = ['/etc/fstab', '/etc/apt.conf.d/']
2324+# sync_to_peers(peer_interface='cluster',
2325+# user='juju_ssh, paths=[files])
2326+#
2327+# It is assumed the charm itself has setup permissions on each unit
2328+# such that 'juju_ssh' has read + write permissions. Also assumed
2329+# that the calling charm takes care of leader delegation.
2330+#
2331+# TODO: Currently depends on the utils.py shipped with the keystone charm.
2332+# Either copy required functionality to this library or depend on
2333+# something more generic.
2334+
2335+import os
2336+import sys
2337+import lib.utils as utils
2338+import subprocess
2339+import grp
2340+import pwd
2341+
2342+
2343+def get_homedir(user):
2344+ try:
2345+ user = pwd.getpwnam(user)
2346+ return user.pw_dir
2347+ except KeyError:
2348+ utils.juju_log('INFO',
2349+ 'Could not get homedir for user %s: user exists?')
2350+ sys.exit(1)
2351+
2352+
2353+def get_keypair(user):
2354+ home_dir = get_homedir(user)
2355+ ssh_dir = os.path.join(home_dir, '.ssh')
2356+ if not os.path.isdir(ssh_dir):
2357+ os.mkdir(ssh_dir)
2358+
2359+ priv_key = os.path.join(ssh_dir, 'id_rsa')
2360+ if not os.path.isfile(priv_key):
2361+ utils.juju_log('INFO', 'Generating new ssh key for user %s.' % user)
2362+ cmd = ['ssh-keygen', '-q', '-N', '', '-t', 'rsa', '-b', '2048',
2363+ '-f', priv_key]
2364+ subprocess.check_call(cmd)
2365+
2366+ pub_key = '%s.pub' % priv_key
2367+ if not os.path.isfile(pub_key):
2368+ utils.juju_log('INFO', 'Generatring missing ssh public key @ %s.' % \
2369+ pub_key)
2370+ cmd = ['ssh-keygen', '-y', '-f', priv_key]
2371+ p = subprocess.check_output(cmd).strip()
2372+ with open(pub_key, 'wb') as out:
2373+ out.write(p)
2374+ subprocess.check_call(['chown', '-R', user, ssh_dir])
2375+ return open(priv_key, 'r').read().strip(), \
2376+ open(pub_key, 'r').read().strip()
2377+
2378+
2379+def write_authorized_keys(user, keys):
2380+ home_dir = get_homedir(user)
2381+ ssh_dir = os.path.join(home_dir, '.ssh')
2382+ auth_keys = os.path.join(ssh_dir, 'authorized_keys')
2383+ utils.juju_log('INFO', 'Syncing authorized_keys @ %s.' % auth_keys)
2384+ with open(auth_keys, 'wb') as out:
2385+ for k in keys:
2386+ out.write('%s\n' % k)
2387+
2388+
2389+def write_known_hosts(user, hosts):
2390+ home_dir = get_homedir(user)
2391+ ssh_dir = os.path.join(home_dir, '.ssh')
2392+ known_hosts = os.path.join(ssh_dir, 'known_hosts')
2393+ khosts = []
2394+ for host in hosts:
2395+ cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
2396+ remote_key = subprocess.check_output(cmd).strip()
2397+ khosts.append(remote_key)
2398+ utils.juju_log('INFO', 'Syncing known_hosts @ %s.' % known_hosts)
2399+ with open(known_hosts, 'wb') as out:
2400+ for host in khosts:
2401+ out.write('%s\n' % host)
2402+
2403+
2404+def ensure_user(user, group=None):
2405+ # need to ensure a bash shell'd user exists.
2406+ try:
2407+ pwd.getpwnam(user)
2408+ except KeyError:
2409+ utils.juju_log('INFO', 'Creating new user %s.%s.' % (user, group))
2410+ cmd = ['adduser', '--system', '--shell', '/bin/bash', user]
2411+ if group:
2412+ try:
2413+ grp.getgrnam(group)
2414+ except KeyError:
2415+ subprocess.check_call(['addgroup', group])
2416+ cmd += ['--ingroup', group]
2417+ subprocess.check_call(cmd)
2418+
2419+
2420+def ssh_authorized_peers(peer_interface, user, group=None, ensure_local_user=False):
2421+ """
2422+ Main setup function, should be called from both peer -changed and -joined
2423+ hooks with the same parameters.
2424+ """
2425+ if ensure_local_user:
2426+ ensure_user(user, group)
2427+ priv_key, pub_key = get_keypair(user)
2428+ hook = os.path.basename(sys.argv[0])
2429+ if hook == '%s-relation-joined' % peer_interface:
2430+ utils.relation_set(ssh_pub_key=pub_key)
2431+ print 'joined'
2432+ elif hook == '%s-relation-changed' % peer_interface:
2433+ hosts = []
2434+ keys = []
2435+ for r_id in utils.relation_ids(peer_interface):
2436+ for unit in utils.relation_list(r_id):
2437+ settings = utils.relation_get_dict(relation_id=r_id,
2438+ remote_unit=unit)
2439+ if 'ssh_pub_key' in settings:
2440+ keys.append(settings['ssh_pub_key'])
2441+ hosts.append(settings['private-address'])
2442+ else:
2443+ utils.juju_log('INFO',
2444+ 'ssh_authorized_peers(): ssh_pub_key '\
2445+ 'missing for unit %s, skipping.' % unit)
2446+ write_authorized_keys(user, keys)
2447+ write_known_hosts(user, hosts)
2448+ authed_hosts = ':'.join(hosts)
2449+ utils.relation_set(ssh_authorized_hosts=authed_hosts)
2450+
2451+
2452+def _run_as_user(user):
2453+ try:
2454+ user = pwd.getpwnam(user)
2455+ except KeyError:
2456+ utils.juju_log('INFO', 'Invalid user: %s' % user)
2457+ sys.exit(1)
2458+ uid, gid = user.pw_uid, user.pw_gid
2459+ os.environ['HOME'] = user.pw_dir
2460+
2461+ def _inner():
2462+ os.setgid(gid)
2463+ os.setuid(uid)
2464+ return _inner
2465+
2466+
2467+def run_as_user(user, cmd):
2468+ return subprocess.check_output(cmd, preexec_fn=_run_as_user(user))
2469+
2470+
2471+def sync_to_peers(peer_interface, user, paths=[], verbose=False):
2472+ base_cmd = ['unison', '-auto', '-batch=true', '-confirmbigdel=false',
2473+ '-fastcheck=true', '-group=false', '-owner=false',
2474+ '-prefer=newer', '-times=true']
2475+ if not verbose:
2476+ base_cmd.append('-silent')
2477+
2478+ hosts = []
2479+ for r_id in (utils.relation_ids(peer_interface) or []):
2480+ for unit in utils.relation_list(r_id):
2481+ settings = utils.relation_get_dict(relation_id=r_id,
2482+ remote_unit=unit)
2483+ try:
2484+ authed_hosts = settings['ssh_authorized_hosts'].split(':')
2485+ except KeyError:
2486+ print 'unison sync_to_peers: peer has not authorized *any* '\
2487+ 'hosts yet.'
2488+ return
2489+
2490+ unit_hostname = utils.unit_get('private-address')
2491+ add_host = None
2492+ for authed_host in authed_hosts:
2493+ if unit_hostname == authed_host:
2494+ add_host = settings['private-address']
2495+ if add_host:
2496+ hosts.append(settings['private-address'])
2497+ else:
2498+ print 'unison sync_to_peers: peer (%s) has not authorized '\
2499+ '*this* host yet, skipping.' %\
2500+ settings['private-address']
2501+
2502+ for path in paths:
2503+ # removing trailing slash from directory paths, unison
2504+ # doesn't like these.
2505+ if path.endswith('/'):
2506+ path = path[:(len(path) - 1)]
2507+ for host in hosts:
2508+ cmd = base_cmd + [path, 'ssh://%s@%s/%s' % (user, host, path)]
2509+ utils.juju_log('INFO', 'Syncing local path %s to %s@%s:%s' %\
2510+ (path, user, host, path))
2511+ print ' '.join(cmd)
2512+ run_as_user(user, cmd)
2513
2514=== added file 'hooks/lib/utils.py'
2515--- hooks/lib/utils.py 1970-01-01 00:00:00 +0000
2516+++ hooks/lib/utils.py 2013-05-29 18:13:26 +0000
2517@@ -0,0 +1,332 @@
2518+#
2519+# Copyright 2012 Canonical Ltd.
2520+#
2521+# This file is sourced from lp:openstack-charm-helpers
2522+#
2523+# Authors:
2524+# James Page <james.page@ubuntu.com>
2525+# Paul Collins <paul.collins@canonical.com>
2526+# Adam Gandelman <adamg@ubuntu.com>
2527+#
2528+
2529+import json
2530+import os
2531+import subprocess
2532+import socket
2533+import sys
2534+
2535+
2536+def do_hooks(hooks):
2537+ hook = os.path.basename(sys.argv[0])
2538+
2539+ try:
2540+ hook_func = hooks[hook]
2541+ except KeyError:
2542+ juju_log('INFO',
2543+ "This charm doesn't know how to handle '{}'.".format(hook))
2544+ else:
2545+ hook_func()
2546+
2547+
2548+def install(*pkgs):
2549+ cmd = [
2550+ 'apt-get',
2551+ '-y',
2552+ 'install'
2553+ ]
2554+ for pkg in pkgs:
2555+ cmd.append(pkg)
2556+ subprocess.check_call(cmd)
2557+
2558+TEMPLATES_DIR = 'templates'
2559+
2560+try:
2561+ import jinja2
2562+except ImportError:
2563+ install('python-jinja2')
2564+ import jinja2
2565+
2566+try:
2567+ import dns.resolver
2568+except ImportError:
2569+ install('python-dnspython')
2570+ import dns.resolver
2571+
2572+
2573+def render_template(template_name, context, template_dir=TEMPLATES_DIR):
2574+ templates = jinja2.Environment(
2575+ loader=jinja2.FileSystemLoader(template_dir)
2576+ )
2577+ template = templates.get_template(template_name)
2578+ return template.render(context)
2579+
2580+CLOUD_ARCHIVE = \
2581+""" # Ubuntu Cloud Archive
2582+deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
2583+"""
2584+
2585+CLOUD_ARCHIVE_POCKETS = {
2586+ 'folsom': 'precise-updates/folsom',
2587+ 'folsom/updates': 'precise-updates/folsom',
2588+ 'folsom/proposed': 'precise-proposed/folsom',
2589+ 'grizzly': 'precise-updates/grizzly',
2590+ 'grizzly/updates': 'precise-updates/grizzly',
2591+ 'grizzly/proposed': 'precise-proposed/grizzly'
2592+ }
2593+
2594+
2595+def configure_source():
2596+ source = str(config_get('openstack-origin'))
2597+ if not source:
2598+ return
2599+ if source.startswith('ppa:'):
2600+ cmd = [
2601+ 'add-apt-repository',
2602+ source
2603+ ]
2604+ subprocess.check_call(cmd)
2605+ if source.startswith('cloud:'):
2606+ # CA values should be formatted as cloud:ubuntu-openstack/pocket, eg:
2607+ # cloud:precise-folsom/updates or cloud:precise-folsom/proposed
2608+ install('ubuntu-cloud-keyring')
2609+ pocket = source.split(':')[1]
2610+ pocket = pocket.split('-')[1]
2611+ with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
2612+ apt.write(CLOUD_ARCHIVE.format(CLOUD_ARCHIVE_POCKETS[pocket]))
2613+ if source.startswith('deb'):
2614+ l = len(source.split('|'))
2615+ if l == 2:
2616+ (apt_line, key) = source.split('|')
2617+ cmd = [
2618+ 'apt-key',
2619+ 'adv', '--keyserver keyserver.ubuntu.com',
2620+ '--recv-keys', key
2621+ ]
2622+ subprocess.check_call(cmd)
2623+ elif l == 1:
2624+ apt_line = source
2625+
2626+ with open('/etc/apt/sources.list.d/quantum.list', 'w') as apt:
2627+ apt.write(apt_line + "\n")
2628+ cmd = [
2629+ 'apt-get',
2630+ 'update'
2631+ ]
2632+ subprocess.check_call(cmd)
2633+
2634+# Protocols
2635+TCP = 'TCP'
2636+UDP = 'UDP'
2637+
2638+
2639+def expose(port, protocol='TCP'):
2640+ cmd = [
2641+ 'open-port',
2642+ '{}/{}'.format(port, protocol)
2643+ ]
2644+ subprocess.check_call(cmd)
2645+
2646+
2647+def juju_log(severity, message):
2648+ cmd = [
2649+ 'juju-log',
2650+ '--log-level', severity,
2651+ message
2652+ ]
2653+ subprocess.check_call(cmd)
2654+
2655+
2656+cache = {}
2657+
2658+
2659+def cached(func):
2660+ def wrapper(*args, **kwargs):
2661+ global cache
2662+ key = str((func, args, kwargs))
2663+ try:
2664+ return cache[key]
2665+ except KeyError:
2666+ res = func(*args, **kwargs)
2667+ cache[key] = res
2668+ return res
2669+ return wrapper
2670+
2671+
2672+@cached
2673+def relation_ids(relation):
2674+ cmd = [
2675+ 'relation-ids',
2676+ relation
2677+ ]
2678+ result = str(subprocess.check_output(cmd)).split()
2679+ if result == "":
2680+ return None
2681+ else:
2682+ return result
2683+
2684+
2685+@cached
2686+def relation_list(rid):
2687+ cmd = [
2688+ 'relation-list',
2689+ '-r', rid,
2690+ ]
2691+ result = str(subprocess.check_output(cmd)).split()
2692+ if result == "":
2693+ return None
2694+ else:
2695+ return result
2696+
2697+
2698+@cached
2699+def relation_get(attribute, unit=None, rid=None):
2700+ cmd = [
2701+ 'relation-get',
2702+ ]
2703+ if rid:
2704+ cmd.append('-r')
2705+ cmd.append(rid)
2706+ cmd.append(attribute)
2707+ if unit:
2708+ cmd.append(unit)
2709+ value = subprocess.check_output(cmd).strip() # IGNORE:E1103
2710+ if value == "":
2711+ return None
2712+ else:
2713+ return value
2714+
2715+
2716+@cached
2717+def relation_get_dict(relation_id=None, remote_unit=None):
2718+ """Obtain all relation data as dict by way of JSON"""
2719+ cmd = [
2720+ 'relation-get', '--format=json'
2721+ ]
2722+ if relation_id:
2723+ cmd.append('-r')
2724+ cmd.append(relation_id)
2725+ if remote_unit:
2726+ remote_unit_orig = os.getenv('JUJU_REMOTE_UNIT', None)
2727+ os.environ['JUJU_REMOTE_UNIT'] = remote_unit
2728+ j = subprocess.check_output(cmd)
2729+ if remote_unit and remote_unit_orig:
2730+ os.environ['JUJU_REMOTE_UNIT'] = remote_unit_orig
2731+ d = json.loads(j)
2732+ settings = {}
2733+ # convert unicode to strings
2734+ for k, v in d.iteritems():
2735+ settings[str(k)] = str(v)
2736+ return settings
2737+
2738+
2739+def relation_set(**kwargs):
2740+ cmd = [
2741+ 'relation-set'
2742+ ]
2743+ args = []
2744+ for k, v in kwargs.items():
2745+ if k == 'rid':
2746+ if v:
2747+ cmd.append('-r')
2748+ cmd.append(v)
2749+ else:
2750+ args.append('{}={}'.format(k, v))
2751+ cmd += args
2752+ subprocess.check_call(cmd)
2753+
2754+
2755+@cached
2756+def unit_get(attribute):
2757+ cmd = [
2758+ 'unit-get',
2759+ attribute
2760+ ]
2761+ value = subprocess.check_output(cmd).strip() # IGNORE:E1103
2762+ if value == "":
2763+ return None
2764+ else:
2765+ return value
2766+
2767+
2768+@cached
2769+def config_get(attribute):
2770+ cmd = [
2771+ 'config-get',
2772+ '--format',
2773+ 'json',
2774+ ]
2775+ out = subprocess.check_output(cmd).strip() # IGNORE:E1103
2776+ cfg = json.loads(out)
2777+
2778+ try:
2779+ return cfg[attribute]
2780+ except KeyError:
2781+ return None
2782+
2783+
2784+@cached
2785+def get_unit_hostname():
2786+ return socket.gethostname()
2787+
2788+
2789+@cached
2790+def get_host_ip(hostname=unit_get('private-address')):
2791+ try:
2792+ # Test to see if already an IPv4 address
2793+ socket.inet_aton(hostname)
2794+ return hostname
2795+ except socket.error:
2796+ answers = dns.resolver.query(hostname, 'A')
2797+ if answers:
2798+ return answers[0].address
2799+ return None
2800+
2801+
2802+def _svc_control(service, action):
2803+ subprocess.check_call(['service', service, action])
2804+
2805+
2806+def restart(*services):
2807+ for service in services:
2808+ _svc_control(service, 'restart')
2809+
2810+
2811+def stop(*services):
2812+ for service in services:
2813+ _svc_control(service, 'stop')
2814+
2815+
2816+def start(*services):
2817+ for service in services:
2818+ _svc_control(service, 'start')
2819+
2820+
2821+def reload(*services):
2822+ for service in services:
2823+ try:
2824+ _svc_control(service, 'reload')
2825+ except subprocess.CalledProcessError:
2826+ # Reload failed - either service does not support reload
2827+ # or it was not running - restart will fixup most things
2828+ _svc_control(service, 'restart')
2829+
2830+
2831+def running(service):
2832+ try:
2833+ output = subprocess.check_output(['service', service, 'status'])
2834+ except subprocess.CalledProcessError:
2835+ return False
2836+ else:
2837+ if ("start/running" in output or
2838+ "is running" in output):
2839+ return True
2840+ else:
2841+ return False
2842+
2843+
2844+def is_relation_made(relation, key='private-address'):
2845+ for r_id in (relation_ids(relation) or []):
2846+ for unit in (relation_list(r_id) or []):
2847+ if relation_get(key, rid=r_id, unit=unit):
2848+ return True
2849+ return False
2850
2851=== modified file 'hooks/manager.py'
2852--- hooks/manager.py 2012-02-29 01:18:17 +0000
2853+++ hooks/manager.py 2013-05-29 18:13:26 +0000
2854@@ -1,6 +1,7 @@
2855 #!/usr/bin/python
2856 from keystoneclient.v2_0 import client
2857
2858+
2859 class KeystoneManager(object):
2860 def __init__(self, endpoint, token):
2861 self.api = client.Client(endpoint=endpoint, token=token)
2862
2863=== modified symlink 'hooks/shared-db-relation-changed'
2864=== target changed u'keystone-hooks' => u'keystone_hooks.py'
2865=== modified symlink 'hooks/shared-db-relation-joined'
2866=== target changed u'keystone-hooks' => u'keystone_hooks.py'
2867=== added symlink 'hooks/upgrade-charm'
2868=== target is u'keystone_hooks.py'
2869=== modified file 'metadata.yaml'
2870--- metadata.yaml 2013-04-22 19:39:48 +0000
2871+++ metadata.yaml 2013-05-29 18:13:26 +0000
2872@@ -12,3 +12,9 @@
2873 requires:
2874 shared-db:
2875 interface: mysql-shared
2876+ ha:
2877+ interface: hacluster
2878+ scope: container
2879+peers:
2880+ cluster:
2881+ interface: keystone-ha
2882
2883=== modified file 'revision'
2884--- revision 2012-12-12 03:52:01 +0000
2885+++ revision 2013-05-29 18:13:26 +0000
2886@@ -1,1 +1,1 @@
2887-165
2888+221
2889
2890=== added directory 'scripts'
2891=== added file 'scripts/add_to_cluster'
2892--- scripts/add_to_cluster 1970-01-01 00:00:00 +0000
2893+++ scripts/add_to_cluster 2013-05-29 18:13:26 +0000
2894@@ -0,0 +1,13 @@
2895+#!/bin/bash
2896+service corosync start || /bin/true
2897+sleep 2
2898+while ! service pacemaker start; do
2899+ echo "Attempting to start pacemaker"
2900+ sleep 1;
2901+done;
2902+crm node online
2903+sleep 2
2904+while crm status | egrep -q 'Stopped$'; do
2905+ echo "Waiting for nodes to come online"
2906+ sleep 1
2907+done
2908
2909=== added file 'scripts/remove_from_cluster'
2910--- scripts/remove_from_cluster 1970-01-01 00:00:00 +0000
2911+++ scripts/remove_from_cluster 2013-05-29 18:13:26 +0000
2912@@ -0,0 +1,4 @@
2913+#!/bin/bash
2914+crm node standby
2915+service pacemaker stop
2916+service corosync stop
2917
2918=== added directory 'templates'
2919=== added file 'templates/haproxy.cfg'
2920--- templates/haproxy.cfg 1970-01-01 00:00:00 +0000
2921+++ templates/haproxy.cfg 2013-05-29 18:13:26 +0000
2922@@ -0,0 +1,35 @@
2923+global
2924+ log 127.0.0.1 local0
2925+ log 127.0.0.1 local1 notice
2926+ maxconn 20000
2927+ user haproxy
2928+ group haproxy
2929+ spread-checks 0
2930+
2931+defaults
2932+ log global
2933+ mode http
2934+ option httplog
2935+ option dontlognull
2936+ retries 3
2937+ timeout queue 1000
2938+ timeout connect 1000
2939+ timeout client 30000
2940+ timeout server 30000
2941+
2942+listen stats :8888
2943+ mode http
2944+ stats enable
2945+ stats hide-version
2946+ stats realm Haproxy\ Statistics
2947+ stats uri /
2948+ stats auth admin:password
2949+
2950+{% for service, ports in service_ports.iteritems() -%}
2951+listen {{ service }} 0.0.0.0:{{ ports[0] }}
2952+ balance roundrobin
2953+ option tcplog
2954+ {% for unit, address in units.iteritems() -%}
2955+ server {{ unit }} {{ address }}:{{ ports[1] }} check
2956+ {% endfor %}
2957+{% endfor %}

Subscribers

People subscribed via source and target branches