Merge lp:~stub/charms/precise/postgresql/bug-1205286 into lp:charms/postgresql
- Precise Pangolin (12.04)
- bug-1205286
- Merge into trunk
Proposed by Stuart Bishop
| Status: | Merged | ||||
|---|---|---|---|---|---|
| Approved by: | Mark Mims | ||||
| Approved revision: | 66 | ||||
| Merged at revision: | 61 | ||||
| Proposed branch: | lp:~stub/charms/precise/postgresql/bug-1205286 | ||||
| Merge into: | lp:charms/postgresql | ||||
| Prerequisite: | lp:~stub/charms/precise/postgresql/charm-helpers | ||||
| Diff against target: | 295 lines (+81/-79) 2 files modified hooks/hooks.py (+69/-75) test.py (+12/-4) | ||||
| To merge this branch: | bzr merge lp:~stub/charms/precise/postgresql/bug-1205286 | ||||
| Related bugs: |
|
| Reviewer | Review Type | Date Requested | Status |
|---|---|---|---|
| Mark Mims (community) | Approve | ||
| Review via email: | |||
Commit message
Description of the change
Instead of storing pgpass files in the charm directory where sudo subprocesses can no longer access them, generate them as required to a temporary file.
A few minor drivebys, such as using pwgen from charmhelpers.
To post a comment you must log in.
Revision history for this message
| Mark Mims (mark-mims) : | # |
review: Approve
Preview Diff
[H/L] Next/Prev Comment, [J/K] Next/Prev File, [N/P] Next/Prev Hunk
| 1 | === modified file 'hooks/hooks.py' |
| 2 | --- hooks/hooks.py 2013-08-09 19:33:47 +0000 |
| 3 | +++ hooks/hooks.py 2013-08-20 14:59:39 +0000 |
| 4 | @@ -1,19 +1,19 @@ |
| 5 | #!/usr/bin/env python |
| 6 | # vim: et ai ts=4 sw=4: |
| 7 | |
| 8 | +from contextlib import contextmanager |
| 9 | import commands |
| 10 | import cPickle as pickle |
| 11 | import glob |
| 12 | from grp import getgrnam |
| 13 | import os.path |
| 14 | from pwd import getpwnam |
| 15 | -import random |
| 16 | import re |
| 17 | import shutil |
| 18 | import socket |
| 19 | -import string |
| 20 | import subprocess |
| 21 | import sys |
| 22 | +from tempfile import NamedTemporaryFile |
| 23 | import time |
| 24 | import yaml |
| 25 | from yaml.constructor import ConstructorError |
| 26 | @@ -545,7 +545,7 @@ |
| 27 | write_file('/etc/cron.d/postgres', crontab_template, perms=0600) |
| 28 | |
| 29 | |
| 30 | -def create_recovery_conf(master_host, password, restart_on_change=False): |
| 31 | +def create_recovery_conf(master_host, restart_on_change=False): |
| 32 | recovery_conf_path = os.path.join(postgresql_cluster_dir, 'recovery.conf') |
| 33 | if os.path.exists(recovery_conf_path): |
| 34 | old_recovery_conf = open(recovery_conf_path, 'r').read() |
| 35 | @@ -592,17 +592,6 @@ |
| 36 | hookenv.open_port(new_service_port) |
| 37 | |
| 38 | |
| 39 | -def pwgen(pwd_length=None): |
| 40 | - '''Generate a random password.''' |
| 41 | - if pwd_length is None: |
| 42 | - pwd_length = random.choice(range(30, 40)) |
| 43 | - alphanumeric_chars = [l for l in (string.letters + string.digits) |
| 44 | - if l not in 'Iil0oO1'] |
| 45 | - random_chars = [random.choice(alphanumeric_chars) |
| 46 | - for i in range(pwd_length)] |
| 47 | - return(''.join(random_chars)) |
| 48 | - |
| 49 | - |
| 50 | def set_password(user, password): |
| 51 | if not os.path.isdir("passwords"): |
| 52 | os.makedirs("passwords") |
| 53 | @@ -638,7 +627,8 @@ |
| 54 | start = time.time() |
| 55 | while True: |
| 56 | try: |
| 57 | - conn = psycopg2.connect(conn_str) |
| 58 | + with pgpass(): |
| 59 | + conn = psycopg2.connect(conn_str) |
| 60 | break |
| 61 | except psycopg2.Error, x: |
| 62 | if time.time() > start + timeout: |
| 63 | @@ -825,7 +815,6 @@ |
| 64 | updated_service_port = config_data["listen_port"] |
| 65 | update_service_port(current_service_port, updated_service_port) |
| 66 | update_nrpe_checks() |
| 67 | - generate_pgpass() |
| 68 | if force_restart: |
| 69 | return postgresql_restart() |
| 70 | return postgresql_reload_or_restart() |
| 71 | @@ -840,7 +829,7 @@ |
| 72 | |
| 73 | add_extra_repos() |
| 74 | |
| 75 | - packages = ["postgresql", "pwgen", "python-jinja2", "syslinux", |
| 76 | + packages = ["postgresql", "python-jinja2", "syslinux", |
| 77 | "python-psycopg2", "postgresql-contrib", "postgresql-plpython", |
| 78 | "postgresql-%s-debversion" % config_data["version"]] |
| 79 | packages.extend((hookenv.config('extra-packages') or '').split()) |
| 80 | @@ -984,7 +973,7 @@ |
| 81 | |
| 82 | password = get_password(user) |
| 83 | if password is None: |
| 84 | - password = pwgen() |
| 85 | + password = host.pwgen() |
| 86 | set_password(user, password) |
| 87 | if user_exists(user): |
| 88 | log("Updating {} user".format(user)) |
| 89 | @@ -1317,7 +1306,8 @@ |
| 90 | local_state.publish() |
| 91 | |
| 92 | |
| 93 | -def generate_pgpass(): |
| 94 | +@contextmanager |
| 95 | +def pgpass(): |
| 96 | passwords = {} |
| 97 | |
| 98 | # Replication. |
| 99 | @@ -1326,13 +1316,23 @@ |
| 100 | if 'replication_password' in local_state: |
| 101 | passwords['juju_replication'] = local_state['replication_password'] |
| 102 | |
| 103 | - if passwords: |
| 104 | - pgpass = '\n'.join( |
| 105 | - "*:*:*:{}:{}".format(username, password) |
| 106 | - for username, password in passwords.items()) |
| 107 | - write_file( |
| 108 | - charm_pgpass, pgpass, |
| 109 | - owner="postgres", group="postgres", perms=0o400) |
| 110 | + pgpass_contents = '\n'.join( |
| 111 | + "*:*:*:{}:{}".format(username, password) |
| 112 | + for username, password in passwords.items()) |
| 113 | + pgpass_file = NamedTemporaryFile() |
| 114 | + pgpass_file.write(pgpass_contents) |
| 115 | + pgpass_file.flush() |
| 116 | + os.chown(pgpass_file.name, getpwnam('postgres').pw_uid, -1) |
| 117 | + os.chmod(pgpass_file.name, 0o400) |
| 118 | + org_pgpassfile = os.environ.get('PGPASSFILE', None) |
| 119 | + os.environ['PGPASSFILE'] = pgpass_file.name |
| 120 | + try: |
| 121 | + yield pgpass_file.name |
| 122 | + finally: |
| 123 | + if org_pgpassfile is None: |
| 124 | + del os.environ['PGPASSFILE'] |
| 125 | + else: |
| 126 | + os.environ['PGPASSFILE'] = org_pgpassfile |
| 127 | |
| 128 | |
| 129 | def drop_database(dbname, warn=True): |
| 130 | @@ -1377,8 +1377,7 @@ |
| 131 | '''Connect the database as a streaming replica of the master.''' |
| 132 | master_relation = hookenv.relation_get(unit=master) |
| 133 | create_recovery_conf( |
| 134 | - master_relation['private-address'], |
| 135 | - local_state['replication_password'], restart_on_change=True) |
| 136 | + master_relation['private-address'], restart_on_change=True) |
| 137 | |
| 138 | |
| 139 | def elected_master(): |
| 140 | @@ -1498,7 +1497,6 @@ |
| 141 | log("Syncing replication_password from {}".format(master), DEBUG) |
| 142 | local_state['replication_password'] = hookenv.relation_get( |
| 143 | 'replication_password', master) |
| 144 | - generate_pgpass() |
| 145 | |
| 146 | if 'following' not in local_state: |
| 147 | log("Fresh unit. I will clone {} and become a hot standby".format( |
| 148 | @@ -1526,7 +1524,7 @@ |
| 149 | else: |
| 150 | log("I am a hot standby following new master {}".format(master)) |
| 151 | follow_database(master) |
| 152 | - if not local_state["paused_at_failover"]: |
| 153 | + if not local_state.get("paused_at_failover", None): |
| 154 | run_sql_as_postgres("SELECT pg_xlog_replay_resume()") |
| 155 | local_state['state'] = 'hot standby' |
| 156 | local_state['following'] = master |
| 157 | @@ -1664,51 +1662,52 @@ |
| 158 | def replication_relation_broken(): |
| 159 | # This unit has been removed from the service. |
| 160 | promote_database() |
| 161 | - if os.path.exists(charm_pgpass): |
| 162 | - os.unlink(charm_pgpass) |
| 163 | config_changed() |
| 164 | |
| 165 | |
| 166 | def clone_database(master_unit, master_host): |
| 167 | - postgresql_stop() |
| 168 | - log("Cloning master {}".format(master_unit)) |
| 169 | - |
| 170 | - cmd = ['sudo', '-E', '-u', 'postgres', # -E needed to locate pgpass file. |
| 171 | - 'pg_basebackup', '-D', postgresql_cluster_dir, |
| 172 | - '--xlog', '--checkpoint=fast', '--no-password', |
| 173 | - '-h', master_host, '-p', '5432', '--username=juju_replication'] |
| 174 | - log(' '.join(cmd), DEBUG) |
| 175 | - if os.path.isdir(postgresql_cluster_dir): |
| 176 | - shutil.rmtree(postgresql_cluster_dir) |
| 177 | - try: |
| 178 | - output = subprocess.check_output(cmd) |
| 179 | - log(output, DEBUG) |
| 180 | - # Debian by default expects SSL certificates in the datadir. |
| 181 | - os.symlink( |
| 182 | - '/etc/ssl/certs/ssl-cert-snakeoil.pem', |
| 183 | - os.path.join(postgresql_cluster_dir, 'server.crt')) |
| 184 | - os.symlink( |
| 185 | - '/etc/ssl/private/ssl-cert-snakeoil.key', |
| 186 | - os.path.join(postgresql_cluster_dir, 'server.key')) |
| 187 | - create_recovery_conf(master_host, local_state['replication_password']) |
| 188 | - except subprocess.CalledProcessError, x: |
| 189 | - # We failed, and this cluster is broken. Rebuild a |
| 190 | - # working cluster so start/stop etc. works and we |
| 191 | - # can retry hooks again. Even assuming the charm is |
| 192 | - # functioning correctly, the clone may still fail |
| 193 | - # due to eg. lack of disk space. |
| 194 | - log("Clone failed, db cluster destroyed", ERROR) |
| 195 | - log(x.output, ERROR) |
| 196 | - if os.path.exists(postgresql_cluster_dir): |
| 197 | + with pgpass(): |
| 198 | + postgresql_stop() |
| 199 | + log("Cloning master {}".format(master_unit)) |
| 200 | + |
| 201 | + cmd = ['sudo', '-E', # -E needed to locate pgpass file. |
| 202 | + '-u', 'postgres', 'pg_basebackup', '-D', postgresql_cluster_dir, |
| 203 | + '--xlog', '--checkpoint=fast', '--no-password', |
| 204 | + '-h', master_host, '-p', '5432', '--username=juju_replication'] |
| 205 | + log(' '.join(cmd), DEBUG) |
| 206 | + |
| 207 | + if os.path.isdir(postgresql_cluster_dir): |
| 208 | shutil.rmtree(postgresql_cluster_dir) |
| 209 | - if os.path.exists(postgresql_config_dir): |
| 210 | - shutil.rmtree(postgresql_config_dir) |
| 211 | - run('pg_createcluster {} main'.format(version)) |
| 212 | - config_changed() |
| 213 | - raise |
| 214 | - finally: |
| 215 | - postgresql_start() |
| 216 | - wait_for_db() |
| 217 | + |
| 218 | + try: |
| 219 | + output = subprocess.check_output(cmd) |
| 220 | + log(output, DEBUG) |
| 221 | + # Debian by default expects SSL certificates in the datadir. |
| 222 | + os.symlink( |
| 223 | + '/etc/ssl/certs/ssl-cert-snakeoil.pem', |
| 224 | + os.path.join(postgresql_cluster_dir, 'server.crt')) |
| 225 | + os.symlink( |
| 226 | + '/etc/ssl/private/ssl-cert-snakeoil.key', |
| 227 | + os.path.join(postgresql_cluster_dir, 'server.key')) |
| 228 | + create_recovery_conf(master_host) |
| 229 | + except subprocess.CalledProcessError, x: |
| 230 | + # We failed, and this cluster is broken. Rebuild a |
| 231 | + # working cluster so start/stop etc. works and we |
| 232 | + # can retry hooks again. Even assuming the charm is |
| 233 | + # functioning correctly, the clone may still fail |
| 234 | + # due to eg. lack of disk space. |
| 235 | + log("Clone failed, db cluster destroyed", ERROR) |
| 236 | + log(x.output, ERROR) |
| 237 | + if os.path.exists(postgresql_cluster_dir): |
| 238 | + shutil.rmtree(postgresql_cluster_dir) |
| 239 | + if os.path.exists(postgresql_config_dir): |
| 240 | + shutil.rmtree(postgresql_config_dir) |
| 241 | + run('pg_createcluster {} main'.format(version)) |
| 242 | + config_changed() |
| 243 | + raise |
| 244 | + finally: |
| 245 | + postgresql_start() |
| 246 | + wait_for_db() |
| 247 | |
| 248 | |
| 249 | def slave_count(): |
| 250 | @@ -1851,11 +1850,6 @@ |
| 251 | hook_name = os.path.basename(sys.argv[0]) |
| 252 | replication_relation_types = ['master', 'slave', 'replication'] |
| 253 | local_state = State('local_state.pickle') |
| 254 | -charm_pgpass = os.path.abspath( |
| 255 | - os.path.join(os.path.dirname(__file__), '..', 'pgpass')) |
| 256 | - |
| 257 | -# Hooks, running as root, need to be pointed at the correct .pgpass. |
| 258 | -os.environ['PGPASSFILE'] = charm_pgpass |
| 259 | |
| 260 | |
| 261 | if __name__ == '__main__': |
| 262 | |
| 263 | === modified file 'test.py' |
| 264 | --- test.py 2013-07-08 11:04:27 +0000 |
| 265 | +++ test.py 2013-08-20 14:59:39 +0000 |
| 266 | @@ -21,7 +21,7 @@ |
| 267 | |
| 268 | SERIES = 'precise' |
| 269 | TEST_CHARM = 'local:postgresql' |
| 270 | -PSQL_CHARM = 'cs:postgresql-psql' |
| 271 | +PSQL_CHARM = 'local:postgresql-psql' |
| 272 | |
| 273 | |
| 274 | def DEBUG(msg): |
| 275 | @@ -118,9 +118,17 @@ |
| 276 | unit, units[unit].get('agent-state-info',''))) |
| 277 | if agent_state != 'started': |
| 278 | ready = False |
| 279 | - # Wait a little longer, as we have no way of telling |
| 280 | - # if relationship hooks have finished running. |
| 281 | - time.sleep(10) |
| 282 | + # Unfortunately, there is no way to tell when a system is |
| 283 | + # actually ready for us to test. Juju only tells us that a |
| 284 | + # relation has started being setup, and that no errors have been |
| 285 | + # encountered yet. It utterly fails to inform us when the |
| 286 | + # cascade of hooks this triggers has finished and the |
| 287 | + # environment is in a stable and actually testable state. |
| 288 | + # So as a work around for Bug #1200267, we need to sleep long |
| 289 | + # enough that our system is probably stable. This means we have |
| 290 | + # extremely slow and flaky tests, but that is possibly better |
| 291 | + # than no tests. |
| 292 | + time.sleep(30) |
| 293 | |
| 294 | def setUp(self): |
| 295 | DEBUG("JujuFixture.setUp()") |