summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRyan Beisner <ryan.beisner@canonical.com>2018-10-03 13:09:52 -0500
committerRyan Beisner <ryan.beisner@canonical.com>2018-10-03 13:09:59 -0500
commite7b36212fdb9fac0a531507c2fa7967a6c759895 (patch)
treee32143daaa602ce904344a759b6bb01ffe43159d
parente88e36dc2c860c973a1542b0452c3e7acd906bb2 (diff)
downloadcharm-nova-lxd-e7b36212fdb9fac0a531507c2fa7967a6c759895.zip
charm-nova-lxd-e7b36212fdb9fac0a531507c2fa7967a6c759895.tar.gz
charm-nova-lxd-e7b36212fdb9fac0a531507c2fa7967a6c759895.tar.bz2
Update requirements
Also remove tests/charm-helpers if present, in favor of the pip-installed charm-helpers. Change-Id: I6d4bf06f4e10bf1263a3b4de74303bd8586a485b
-rw-r--r--requirements.txt1
-rw-r--r--test-requirements.txt14
-rw-r--r--tests/charmhelpers/__init__.py97
-rw-r--r--tests/charmhelpers/contrib/__init__.py13
-rw-r--r--tests/charmhelpers/contrib/amulet/__init__.py13
-rw-r--r--tests/charmhelpers/contrib/amulet/deployment.py99
-rw-r--r--tests/charmhelpers/contrib/amulet/utils.py821
-rw-r--r--tests/charmhelpers/contrib/openstack/__init__.py13
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/__init__.py13
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/deployment.py357
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py1581
-rw-r--r--tests/charmhelpers/core/__init__.py13
-rw-r--r--tests/charmhelpers/core/decorators.py55
-rw-r--r--tests/charmhelpers/core/files.py43
-rw-r--r--tests/charmhelpers/core/fstab.py132
-rw-r--r--tests/charmhelpers/core/hookenv.py1353
-rw-r--r--tests/charmhelpers/core/host.py1042
-rw-r--r--tests/charmhelpers/core/host_factory/__init__.py0
-rw-r--r--tests/charmhelpers/core/host_factory/centos.py72
-rw-r--r--tests/charmhelpers/core/host_factory/ubuntu.py91
-rw-r--r--tests/charmhelpers/core/hugepage.py69
-rw-r--r--tests/charmhelpers/core/kernel.py72
-rw-r--r--tests/charmhelpers/core/kernel_factory/__init__.py0
-rw-r--r--tests/charmhelpers/core/kernel_factory/centos.py17
-rw-r--r--tests/charmhelpers/core/kernel_factory/ubuntu.py13
-rw-r--r--tests/charmhelpers/core/services/__init__.py16
-rw-r--r--tests/charmhelpers/core/services/base.py362
-rw-r--r--tests/charmhelpers/core/services/helpers.py290
-rw-r--r--tests/charmhelpers/core/strutils.py129
-rw-r--r--tests/charmhelpers/core/sysctl.py58
-rw-r--r--tests/charmhelpers/core/templating.py93
-rw-r--r--tests/charmhelpers/core/unitdata.py525
-rw-r--r--tests/charmhelpers/osplatform.py25
33 files changed, 10 insertions, 7482 deletions
diff --git a/requirements.txt b/requirements.txt
index 6a3271b..b8fec1e 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,6 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=1.8.0,<1.9.0
-PyYAML>=3.1.0
simplejson>=2.2.0
netifaces>=0.10.4
netaddr>=0.7.12,!=0.7.16
diff --git a/test-requirements.txt b/test-requirements.txt
index 04ca623..2b2c0e1 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,23 +1,29 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
+charm-tools>=2.4.4
coverage>=3.6
mock>=1.2
flake8>=2.2.4,<=2.4.1
os-testr>=0.4.1
-charm-tools>=2.0.0
-requests==2.6.0
+requests>=2.18.4
# BEGIN: Amulet OpenStack Charm Helper Requirements
# Liberty client lower constraints
-amulet>=1.14.3,<2.0
-bundletester>=0.6.1,<1.0
+amulet>=1.14.3,<2.0;python_version=='2.7'
+bundletester>=0.6.1,<1.0;python_version=='2.7'
python-ceilometerclient>=1.5.0
python-cinderclient>=1.4.0
python-glanceclient>=1.1.0
python-heatclient>=0.8.0
+python-keystoneclient>=1.7.1
+python-neutronclient>=3.1.0
python-novaclient>=2.30.1
python-openstackclient>=1.7.0
python-swiftclient>=2.6.0
pika>=0.10.0,<1.0
distro-info
+git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# END: Amulet OpenStack Charm Helper Requirements
+# NOTE: workaround for 14.04 pip/tox
+pytz
+pyudev # for ceph-* charm unit tests (not mocked?)
diff --git a/tests/charmhelpers/__init__.py b/tests/charmhelpers/__init__.py
deleted file mode 100644
index e7aa471..0000000
--- a/tests/charmhelpers/__init__.py
+++ /dev/null
@@ -1,97 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Bootstrap charm-helpers, installing its dependencies if necessary using
-# only standard libraries.
-from __future__ import print_function
-from __future__ import absolute_import
-
-import functools
-import inspect
-import subprocess
-import sys
-
-try:
- import six # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
- import six # flake8: noqa
-
-try:
- import yaml # flake8: noqa
-except ImportError:
- if sys.version_info.major == 2:
- subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
- else:
- subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
- import yaml # flake8: noqa
-
-
-# Holds a list of mapping of mangled function names that have been deprecated
-# using the @deprecate decorator below. This is so that the warning is only
-# printed once for each usage of the function.
-__deprecated_functions = {}
-
-
-def deprecate(warning, date=None, log=None):
- """Add a deprecation warning the first time the function is used.
- The date, which is a string in semi-ISO8660 format indicate the year-month
- that the function is officially going to be removed.
-
- usage:
-
- @deprecate('use core/fetch/add_source() instead', '2017-04')
- def contributed_add_source_thing(...):
- ...
-
- And it then prints to the log ONCE that the function is deprecated.
- The reason for passing the logging function (log) is so that hookenv.log
- can be used for a charm if needed.
-
- :param warning: String to indicat where it has moved ot.
- :param date: optional sting, in YYYY-MM format to indicate when the
- function will definitely (probably) be removed.
- :param log: The log function to call to log. If not, logs to stdout
- """
- def wrap(f):
-
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- try:
- module = inspect.getmodule(f)
- file = inspect.getsourcefile(f)
- lines = inspect.getsourcelines(f)
- f_name = "{}-{}-{}..{}-{}".format(
- module.__name__, file, lines[0], lines[-1], f.__name__)
- except (IOError, TypeError):
- # assume it was local, so just use the name of the function
- f_name = f.__name__
- if f_name not in __deprecated_functions:
- __deprecated_functions[f_name] = True
- s = "DEPRECATION WARNING: Function {} is being removed".format(
- f.__name__)
- if date:
- s = "{} on/around {}".format(s, date)
- if warning:
- s = "{} : {}".format(s, warning)
- if log:
- log(s)
- else:
- print(s)
- return f(*args, **kwargs)
- return wrapped_f
- return wrap
diff --git a/tests/charmhelpers/contrib/__init__.py b/tests/charmhelpers/contrib/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/tests/charmhelpers/contrib/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/charmhelpers/contrib/amulet/__init__.py b/tests/charmhelpers/contrib/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/tests/charmhelpers/contrib/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py
deleted file mode 100644
index d21d01d..0000000
--- a/tests/charmhelpers/contrib/amulet/deployment.py
+++ /dev/null
@@ -1,99 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import os
-import six
-
-
-class AmuletDeployment(object):
- """Amulet deployment.
-
- This class provides generic Amulet deployment and test runner
- methods.
- """
-
- def __init__(self, series=None):
- """Initialize the deployment environment."""
- self.series = None
-
- if series:
- self.series = series
- self.d = amulet.Deployment(series=self.series)
- else:
- self.d = amulet.Deployment()
-
- def _add_services(self, this_service, other_services):
- """Add services.
-
- Add services to the deployment where this_service is the local charm
- that we're testing and other_services are the other services that
- are being used in the local amulet tests.
- """
- if this_service['name'] != os.path.basename(os.getcwd()):
- s = this_service['name']
- msg = "The charm's root directory name needs to be {}".format(s)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- if 'units' not in this_service:
- this_service['units'] = 1
-
- self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'),
- storage=this_service.get('storage'))
-
- for svc in other_services:
- if 'location' in svc:
- branch_location = svc['location']
- elif self.series:
- branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
- else:
- branch_location = None
-
- if 'units' not in svc:
- svc['units'] = 1
-
- self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'),
- storage=svc.get('storage'))
-
- def _add_relations(self, relations):
- """Add all of the relations for the services."""
- for k, v in six.iteritems(relations):
- self.d.relate(k, v)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _deploy(self):
- """Deploy environment and wait for all hooks to finish executing."""
- timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 900))
- try:
- self.d.setup(timeout=timeout)
- self.d.sentry.wait(timeout=timeout)
- except amulet.helpers.TimeoutError:
- amulet.raise_status(
- amulet.FAIL,
- msg="Deployment timed out ({}s)".format(timeout)
- )
- except Exception:
- raise
-
- def run_tests(self):
- """Run all of the methods that are prefixed with 'test_'."""
- for test in dir(self):
- if test.startswith('test_'):
- getattr(self, test)()
diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py
deleted file mode 100644
index 8a6b764..0000000
--- a/tests/charmhelpers/contrib/amulet/utils.py
+++ /dev/null
@@ -1,821 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import io
-import json
-import logging
-import os
-import re
-import socket
-import subprocess
-import sys
-import time
-import uuid
-
-import amulet
-import distro_info
-import six
-from six.moves import configparser
-if six.PY3:
- from urllib import parse as urlparse
-else:
- import urlparse
-
-
-class AmuletUtils(object):
- """Amulet utilities.
-
- This class provides common utility functions that are used by Amulet
- tests.
- """
-
- def __init__(self, log_level=logging.ERROR):
- self.log = self.get_logger(level=log_level)
- self.ubuntu_releases = self.get_ubuntu_releases()
-
- def get_logger(self, name="amulet-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def valid_ip(self, ip):
- if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
- return True
- else:
- return False
-
- def valid_url(self, url):
- p = re.compile(
- r'^(?:http|ftp)s?://'
- r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
- r'localhost|'
- r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
- r'(?::\d+)?'
- r'(?:/?|[/?]\S+)$',
- re.IGNORECASE)
- if p.match(url):
- return True
- else:
- return False
-
- def get_ubuntu_release_from_sentry(self, sentry_unit):
- """Get Ubuntu release codename from sentry unit.
-
- :param sentry_unit: amulet sentry/service unit pointer
- :returns: list of strings - release codename, failure message
- """
- msg = None
- cmd = 'lsb_release -cs'
- release, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} lsb_release: {}'.format(
- sentry_unit.info['unit_name'], release))
- else:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, release, code))
- if release not in self.ubuntu_releases:
- msg = ("Release ({}) not found in Ubuntu releases "
- "({})".format(release, self.ubuntu_releases))
- return release, msg
-
- def validate_services(self, commands):
- """Validate that lists of commands succeed on service units. Can be
- used to verify system services are running on the corresponding
- service units.
-
- :param commands: dict with sentry keys and arbitrary command list vals
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # /!\ DEPRECATION WARNING (beisner):
- # New and existing tests should be rewritten to use
- # validate_services_by_name() as it is aware of init systems.
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_services_by_name instead of validate_services '
- 'due to init system differences.')
-
- for k, v in six.iteritems(commands):
- for cmd in v:
- output, code = k.run(cmd)
- self.log.debug('{} `{}` returned '
- '{}'.format(k.info['unit_name'],
- cmd, code))
- if code != 0:
- return "command `{}` returned {}".format(cmd, str(code))
- return None
-
- def validate_services_by_name(self, sentry_services):
- """Validate system service status by service name, automatically
- detecting init system based on Ubuntu release codename.
-
- :param sentry_services: dict with sentry keys and svc list values
- :returns: None if successful, Failure string message otherwise
- """
- self.log.debug('Checking status of system services...')
-
- # Point at which systemd became a thing
- systemd_switch = self.ubuntu_releases.index('vivid')
-
- for sentry_unit, services_list in six.iteritems(sentry_services):
- # Get lsb_release codename from unit
- release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
- if ret:
- return ret
-
- for service_name in services_list:
- if (self.ubuntu_releases.index(release) >= systemd_switch or
- service_name in ['rabbitmq-server', 'apache2',
- 'memcached']):
- # init is systemd (or regular sysv)
- cmd = 'sudo service {} status'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0
- elif self.ubuntu_releases.index(release) < systemd_switch:
- # init is upstart
- cmd = 'sudo status {}'.format(service_name)
- output, code = sentry_unit.run(cmd)
- service_running = code == 0 and "start/running" in output
-
- self.log.debug('{} `{}` returned '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code))
- if not service_running:
- return u"command `{}` returned {} {}".format(
- cmd, output, str(code))
- return None
-
- def _get_config(self, unit, filename):
- """Get a ConfigParser object for parsing a unit's config file."""
- file_contents = unit.file_contents(filename)
-
- # NOTE(beisner): by default, ConfigParser does not handle options
- # with no value, such as the flags used in the mysql my.cnf file.
- # https://bugs.python.org/issue7005
- config = configparser.ConfigParser(allow_no_value=True)
- config.readfp(io.StringIO(file_contents))
- return config
-
- def validate_config_data(self, sentry_unit, config_file, section,
- expected):
- """Validate config file data.
-
- Verify that the specified section of the config file contains
- the expected option key:value pairs.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('Validating config file data ({} in {} on {})'
- '...'.format(section, config_file,
- sentry_unit.info['unit_name']))
- config = self._get_config(sentry_unit, config_file)
-
- if section != 'DEFAULT' and not config.has_section(section):
- return "section [{}] does not exist".format(section)
-
- for k in expected.keys():
- if not config.has_option(section, k):
- return "section [{}] is missing option {}".format(section, k)
-
- actual = config.get(section, k)
- v = expected[k]
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if actual != v:
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual):
- return "section [{}] {}:{} != expected {}:{}".format(
- section, k, actual, k, expected[k])
- return None
-
- def _validate_dict_data(self, expected, actual):
- """Validate dictionary data.
-
- Compare expected dictionary data vs actual dictionary data.
- The values in the 'expected' dictionary can be strings, bools, ints,
- longs, or can be a function that evaluates a variable and returns a
- bool.
- """
- self.log.debug('actual: {}'.format(repr(actual)))
- self.log.debug('expected: {}'.format(repr(expected)))
-
- for k, v in six.iteritems(expected):
- if k in actual:
- if (isinstance(v, six.string_types) or
- isinstance(v, bool) or
- isinstance(v, six.integer_types)):
- # handle explicit values
- if v != actual[k]:
- return "{}:{}".format(k, actual[k])
- # handle function pointers, such as not_null or valid_ip
- elif not v(actual[k]):
- return "{}:{}".format(k, actual[k])
- else:
- return "key '{}' does not exist".format(k)
- return None
-
- def validate_relation_data(self, sentry_unit, relation, expected):
- """Validate actual relation data based on expected relation data."""
- actual = sentry_unit.relation(relation[0], relation[1])
- return self._validate_dict_data(expected, actual)
-
- def _validate_list_data(self, expected, actual):
- """Compare expected list vs actual list data."""
- for e in expected:
- if e not in actual:
- return "expected item {} not found in actual list".format(e)
- return None
-
- def not_null(self, string):
- if string is not None:
- return True
- else:
- return False
-
- def _get_file_mtime(self, sentry_unit, filename):
- """Get last modification time of file."""
- return sentry_unit.file_stat(filename)['mtime']
-
- def _get_dir_mtime(self, sentry_unit, directory):
- """Get last modification time of directory."""
- return sentry_unit.directory_stat(directory)['mtime']
-
- def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
- """Get start time of a process based on the last modification time
- of the /proc/pid directory.
-
- :sentry_unit: The sentry unit to check for the service on
- :service: service name to look for in process table
- :pgrep_full: [Deprecated] Use full command line search mode with pgrep
- :returns: epoch time of service process start
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- if pgrep_full is not None:
- # /!\ DEPRECATION WARNING (beisner):
- # No longer implemented, as pidof is now used instead of pgrep.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
- 'longer implemented re: lp 1474030.')
-
- pid_list = self.get_process_id_list(sentry_unit, service)
- pid = pid_list[0]
- proc_dir = '/proc/{}'.format(pid)
- self.log.debug('Pid for {} on {}: {}'.format(
- service, sentry_unit.info['unit_name'], pid))
-
- return self._get_dir_mtime(sentry_unit, proc_dir)
-
- def service_restarted(self, sentry_unit, service, filename,
- pgrep_full=None, sleep_time=20):
- """Check if service was restarted.
-
- Compare a service's start time vs a file's last modification time
- (such as a config file for that service) to determine if the service
- has been restarted.
- """
- # /!\ DEPRECATION WARNING (beisner):
- # This method is prone to races in that no before-time is known.
- # Use validate_service_config_changed instead.
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
- self.log.warn('DEPRECATION WARNING: use '
- 'validate_service_config_changed instead of '
- 'service_restarted due to known races.')
-
- time.sleep(sleep_time)
- if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
- self._get_file_mtime(sentry_unit, filename)):
- return True
- else:
- return False
-
- def service_restarted_since(self, sentry_unit, mtime, service,
- pgrep_full=None, sleep_time=20,
- retry_count=30, retry_sleep_time=10):
- """Check if service was been started after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if service found and its start time it newer than mtime,
- False if service is older than mtime or if service was
- not found.
- """
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s service restarted since %s on '
- '%s' % (service, mtime, unit_name))
- time.sleep(sleep_time)
- proc_start_time = None
- tries = 0
- while tries <= retry_count and not proc_start_time:
- try:
- proc_start_time = self._get_proc_start_time(sentry_unit,
- service,
- pgrep_full)
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'OK'.format(tries, service, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, proc may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} proc start time on {} '
- 'failed\n{}'.format(tries, service,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not proc_start_time:
- self.log.warn('No proc start time found, assuming service did '
- 'not start')
- return False
- if proc_start_time >= mtime:
- self.log.debug('Proc start time is newer than provided mtime'
- '(%s >= %s) on %s (OK)' % (proc_start_time,
- mtime, unit_name))
- return True
- else:
- self.log.warn('Proc start time (%s) is older than provided mtime '
- '(%s) on %s, service did not '
- 'restart' % (proc_start_time, mtime, unit_name))
- return False
-
- def config_updated_since(self, sentry_unit, filename, mtime,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check if file was modified after a given time.
-
- Args:
- sentry_unit (sentry): The sentry unit to check the file mtime on
- filename (string): The file to check mtime of
- mtime (float): The epoch time to check against
- sleep_time (int): Initial sleep time (s) before looking for file
- retry_sleep_time (int): Time (s) to sleep between retries
- retry_count (int): If file is not found, how many times to retry
-
- Returns:
- bool: True if file was modified more recently than mtime, False if
- file was modified before mtime, or if file not found.
- """
- unit_name = sentry_unit.info['unit_name']
- self.log.debug('Checking that %s updated since %s on '
- '%s' % (filename, mtime, unit_name))
- time.sleep(sleep_time)
- file_mtime = None
- tries = 0
- while tries <= retry_count and not file_mtime:
- try:
- file_mtime = self._get_file_mtime(sentry_unit, filename)
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'OK'.format(tries, filename, unit_name))
- except IOError as e:
- # NOTE(beisner) - race avoidance, file may not exist yet.
- # https://bugs.launchpad.net/charm-helpers/+bug/1474030
- self.log.debug('Attempt {} to get {} file mtime on {} '
- 'failed\n{}'.format(tries, filename,
- unit_name, e))
- time.sleep(retry_sleep_time)
- tries += 1
-
- if not file_mtime:
- self.log.warn('Could not determine file mtime, assuming '
- 'file does not exist')
- return False
-
- if file_mtime >= mtime:
- self.log.debug('File mtime is newer than provided mtime '
- '(%s >= %s) on %s (OK)' % (file_mtime,
- mtime, unit_name))
- return True
- else:
- self.log.warn('File mtime is older than provided mtime'
- '(%s < on %s) on %s' % (file_mtime,
- mtime, unit_name))
- return False
-
- def validate_service_config_changed(self, sentry_unit, mtime, service,
- filename, pgrep_full=None,
- sleep_time=20, retry_count=30,
- retry_sleep_time=10):
- """Check service and file were updated after mtime
-
- Args:
- sentry_unit (sentry): The sentry unit to check for the service on
- mtime (float): The epoch time to check against
- service (string): service name to look for in process table
- filename (string): The file to check mtime of
- pgrep_full: [Deprecated] Use full command line search mode with pgrep
- sleep_time (int): Initial sleep in seconds to pass to test helpers
- retry_count (int): If service is not found, how many times to retry
- retry_sleep_time (int): Time in seconds to wait between retries
-
- Typical Usage:
- u = OpenStackAmuletUtils(ERROR)
- ...
- mtime = u.get_sentry_time(self.cinder_sentry)
- self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
- if not u.validate_service_config_changed(self.cinder_sentry,
- mtime,
- 'cinder-api',
- '/etc/cinder/cinder.conf')
- amulet.raise_status(amulet.FAIL, msg='update failed')
- Returns:
- bool: True if both service and file where updated/restarted after
- mtime, False if service is older than mtime or if service was
- not found or if filename was modified before mtime.
- """
-
- # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
- # used instead of pgrep. pgrep_full is still passed through to ensure
- # deprecation WARNS. lp1474030
-
- service_restart = self.service_restarted_since(
- sentry_unit, mtime,
- service,
- pgrep_full=pgrep_full,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- config_update = self.config_updated_since(
- sentry_unit,
- filename,
- mtime,
- sleep_time=sleep_time,
- retry_count=retry_count,
- retry_sleep_time=retry_sleep_time)
-
- return service_restart and config_update
-
- def get_sentry_time(self, sentry_unit):
- """Return current epoch time on a sentry"""
- cmd = "date +'%s'"
- return float(sentry_unit.run(cmd)[0])
-
- def relation_error(self, name, data):
- return 'unexpected relation data in {} - {}'.format(name, data)
-
- def endpoint_error(self, name, data):
- return 'unexpected endpoint data in {} - {}'.format(name, data)
-
- def get_ubuntu_releases(self):
- """Return a list of all Ubuntu releases in order of release."""
- _d = distro_info.UbuntuDistroInfo()
- _release_list = _d.all
- return _release_list
-
- def file_to_url(self, file_rel_path):
- """Convert a relative file path to a file URL."""
- _abs_path = os.path.abspath(file_rel_path)
- return urlparse.urlparse(_abs_path, scheme='file').geturl()
-
- def check_commands_on_units(self, commands, sentry_units):
- """Check that all commands in a list exit zero on all
- sentry units in a list.
-
- :param commands: list of bash commands
- :param sentry_units: list of sentry unit pointers
- :returns: None if successful; Failure message otherwise
- """
- self.log.debug('Checking exit codes for {} commands on {} '
- 'sentry units...'.format(len(commands),
- len(sentry_units)))
- for sentry_unit in sentry_units:
- for cmd in commands:
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- return ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- return None
-
- def get_process_id_list(self, sentry_unit, process_name,
- expect_success=True):
- """Get a list of process ID(s) from a single sentry juju unit
- for a single process name.
-
- :param sentry_unit: Amulet sentry instance (juju unit)
- :param process_name: Process name
- :param expect_success: If False, expect the PID to be missing,
- raise if it is present.
- :returns: List of process IDs
- """
- cmd = 'pidof -x "{}"'.format(process_name)
- if not expect_success:
- cmd += " || exit 0 && exit 1"
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output).split()
-
- def get_unit_process_ids(self, unit_processes, expect_success=True):
- """Construct a dict containing unit sentries, process names, and
- process IDs.
-
- :param unit_processes: A dictionary of Amulet sentry instance
- to list of process names.
- :param expect_success: if False expect the processes to not be
- running, raise if they are.
- :returns: Dictionary of Amulet sentry instance to dictionary
- of process names to PIDs.
- """
- pid_dict = {}
- for sentry_unit, process_list in six.iteritems(unit_processes):
- pid_dict[sentry_unit] = {}
- for process in process_list:
- pids = self.get_process_id_list(
- sentry_unit, process, expect_success=expect_success)
- pid_dict[sentry_unit].update({process: pids})
- return pid_dict
-
- def validate_unit_process_ids(self, expected, actual):
- """Validate process id quantities for services on units."""
- self.log.debug('Checking units for running processes...')
- self.log.debug('Expected PIDs: {}'.format(expected))
- self.log.debug('Actual PIDs: {}'.format(actual))
-
- if len(actual) != len(expected):
- return ('Unit count mismatch. expected, actual: {}, '
- '{} '.format(len(expected), len(actual)))
-
- for (e_sentry, e_proc_names) in six.iteritems(expected):
- e_sentry_name = e_sentry.info['unit_name']
- if e_sentry in actual.keys():
- a_proc_names = actual[e_sentry]
- else:
- return ('Expected sentry ({}) not found in actual dict data.'
- '{}'.format(e_sentry_name, e_sentry))
-
- if len(e_proc_names.keys()) != len(a_proc_names.keys()):
- return ('Process name count mismatch. expected, actual: {}, '
- '{}'.format(len(expected), len(actual)))
-
- for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
- zip(e_proc_names.items(), a_proc_names.items()):
- if e_proc_name != a_proc_name:
- return ('Process name mismatch. expected, actual: {}, '
- '{}'.format(e_proc_name, a_proc_name))
-
- a_pids_length = len(a_pids)
- fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
- '{}, {} ({})'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids_length,
- a_pids))
-
- # If expected is a list, ensure at least one PID quantity match
- if isinstance(e_pids, list) and \
- a_pids_length not in e_pids:
- return fail_msg
- # If expected is not bool and not list,
- # ensure PID quantities match
- elif not isinstance(e_pids, bool) and \
- not isinstance(e_pids, list) and \
- a_pids_length != e_pids:
- return fail_msg
- # If expected is bool True, ensure 1 or more PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is True and a_pids_length < 1:
- return fail_msg
- # If expected is bool False, ensure 0 PIDs exist
- elif isinstance(e_pids, bool) and \
- e_pids is False and a_pids_length != 0:
- return fail_msg
- else:
- self.log.debug('PID check OK: {} {} {}: '
- '{}'.format(e_sentry_name, e_proc_name,
- e_pids, a_pids))
- return None
-
- def validate_list_of_identical_dicts(self, list_of_dicts):
- """Check that all dicts within a list are identical."""
- hashes = []
- for _dict in list_of_dicts:
- hashes.append(hash(frozenset(_dict.items())))
-
- self.log.debug('Hashes: {}'.format(hashes))
- if len(set(hashes)) == 1:
- self.log.debug('Dicts within list are identical')
- else:
- return 'Dicts within list are not identical'
-
- return None
-
- def validate_sectionless_conf(self, file_contents, expected):
- """A crude conf parser. Useful to inspect configuration files which
- do not have section headers (as would be necessary in order to use
- the configparser). Such as openstack-dashboard or rabbitmq confs."""
- for line in file_contents.split('\n'):
- if '=' in line:
- args = line.split('=')
- if len(args) <= 1:
- continue
- key = args[0].strip()
- value = args[1].strip()
- if key in expected.keys():
- if expected[key] != value:
- msg = ('Config mismatch. Expected, actual: {}, '
- '{}'.format(expected[key], value))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def get_unit_hostnames(self, units):
- """Return a dict of juju unit names to hostnames."""
- host_names = {}
- for unit in units:
- host_names[unit.info['unit_name']] = \
- str(unit.file_contents('/etc/hostname').strip())
- self.log.debug('Unit host names: {}'.format(host_names))
- return host_names
-
- def run_cmd_unit(self, sentry_unit, cmd):
- """Run a command on a unit, return the output and exit code."""
- output, code = sentry_unit.run(cmd)
- if code == 0:
- self.log.debug('{} `{}` command returned {} '
- '(OK)'.format(sentry_unit.info['unit_name'],
- cmd, code))
- else:
- msg = ('{} `{}` command returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return str(output), code
-
- def file_exists_on_unit(self, sentry_unit, file_name):
- """Check if a file exists on a unit."""
- try:
- sentry_unit.file_stat(file_name)
- return True
- except IOError:
- return False
- except Exception as e:
- msg = 'Error checking file {}: {}'.format(file_name, e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- def file_contents_safe(self, sentry_unit, file_name,
- max_wait=60, fatal=False):
- """Get file contents from a sentry unit. Wrap amulet file_contents
- with retry logic to address races where a file checks as existing,
- but no longer exists by the time file_contents is called.
- Return None if file not found. Optionally raise if fatal is True."""
- unit_name = sentry_unit.info['unit_name']
- file_contents = False
- tries = 0
- while not file_contents and tries < (max_wait / 4):
- try:
- file_contents = sentry_unit.file_contents(file_name)
- except IOError:
- self.log.debug('Attempt {} to open file {} from {} '
- 'failed'.format(tries, file_name,
- unit_name))
- time.sleep(4)
- tries += 1
-
- if file_contents:
- return file_contents
- elif not fatal:
- return None
- elif fatal:
- msg = 'Failed to get file contents from unit.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def port_knock_tcp(self, host="localhost", port=22, timeout=15):
- """Open a TCP socket to check for a listening sevice on a host.
-
- :param host: host name or IP address, default to localhost
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :returns: True if successful, False if connect failed
- """
-
- # Resolve host name if possible
- try:
- connect_host = socket.gethostbyname(host)
- host_human = "{} ({})".format(connect_host, host)
- except socket.error as e:
- self.log.warn('Unable to resolve address: '
- '{} ({}) Trying anyway!'.format(host, e))
- connect_host = host
- host_human = connect_host
-
- # Attempt socket connection
- try:
- knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- knock.settimeout(timeout)
- knock.connect((connect_host, port))
- knock.close()
- self.log.debug('Socket connect OK for host '
- '{} on port {}.'.format(host_human, port))
- return True
- except socket.error as e:
- self.log.debug('Socket connect FAIL for'
- ' {} port {} ({})'.format(host_human, port, e))
- return False
-
- def port_knock_units(self, sentry_units, port=22,
- timeout=15, expect_success=True):
- """Open a TCP socket to check for a listening sevice on each
- listed juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param port: TCP port number, default to 22
- :param timeout: Connect timeout, default to 15 seconds
- :expect_success: True by default, set False to invert logic
- :returns: None if successful, Failure message otherwise
- """
- for unit in sentry_units:
- host = unit.info['public-address']
- connected = self.port_knock_tcp(host, port, timeout)
- if not connected and expect_success:
- return 'Socket connect failed.'
- elif connected and not expect_success:
- return 'Socket connected unexpectedly.'
-
- def get_uuid_epoch_stamp(self):
- """Returns a stamp string based on uuid4 and epoch time. Useful in
- generating test messages which need to be unique-ish."""
- return '[{}-{}]'.format(uuid.uuid4(), time.time())
-
- # amulet juju action helpers:
- def run_action(self, unit_sentry, action,
- _check_output=subprocess.check_output,
- params=None):
- """Translate to amulet's built in run_action(). Deprecated.
-
- Run the named action on a given unit sentry.
-
- params a dict of parameters to use
- _check_output parameter is no longer used
-
- @return action_id.
- """
- self.log.warn('charmhelpers.contrib.amulet.utils.run_action has been '
- 'deprecated for amulet.run_action')
- return unit_sentry.run_action(action, action_args=params)
-
- def wait_on_action(self, action_id, _check_output=subprocess.check_output):
- """Wait for a given action, returning if it completed or not.
-
- action_id a string action uuid
- _check_output parameter is no longer used
- """
- data = amulet.actions.get_action_output(action_id, full_output=True)
- return data.get(u"status") == "completed"
-
- def status_get(self, unit):
- """Return the current service status of this unit."""
- raw_status, return_code = unit.run(
- "status-get --format=json --include-data")
- if return_code != 0:
- return ("unknown", "")
- status = json.loads(raw_status)
- return (status["status"], status["message"])
diff --git a/tests/charmhelpers/contrib/openstack/__init__.py b/tests/charmhelpers/contrib/openstack/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/tests/charmhelpers/contrib/openstack/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/charmhelpers/contrib/openstack/amulet/__init__.py b/tests/charmhelpers/contrib/openstack/amulet/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/tests/charmhelpers/contrib/openstack/amulet/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
deleted file mode 100644
index 1c96752..0000000
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ /dev/null
@@ -1,357 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import logging
-import os
-import re
-import sys
-import six
-from collections import OrderedDict
-from charmhelpers.contrib.amulet.deployment import (
- AmuletDeployment
-)
-from charmhelpers.contrib.openstack.amulet.utils import (
- OPENSTACK_RELEASES_PAIRS
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-
-class OpenStackAmuletDeployment(AmuletDeployment):
- """OpenStack amulet deployment.
-
- This class inherits from AmuletDeployment and has additional support
- that is specifically for use by OpenStack charms.
- """
-
- def __init__(self, series=None, openstack=None, source=None,
- stable=True, log_level=DEBUG):
- """Initialize the deployment environment."""
- super(OpenStackAmuletDeployment, self).__init__(series)
- self.log = self.get_logger(level=log_level)
- self.log.info('OpenStackAmuletDeployment: init')
- self.openstack = openstack
- self.source = source
- self.stable = stable
-
- def get_logger(self, name="deployment-logger", level=logging.DEBUG):
- """Get a logger object that will log to stdout."""
- log = logging
- logger = log.getLogger(name)
- fmt = log.Formatter("%(asctime)s %(funcName)s "
- "%(levelname)s: %(message)s")
-
- handler = log.StreamHandler(stream=sys.stdout)
- handler.setLevel(level)
- handler.setFormatter(fmt)
-
- logger.addHandler(handler)
- logger.setLevel(level)
-
- return logger
-
- def _determine_branch_locations(self, other_services):
- """Determine the branch locations for the other services.
-
- Determine if the local branch being tested is derived from its
- stable or next (dev) branch, and based on this, use the corresonding
- stable or next branches for the other_services."""
-
- self.log.info('OpenStackAmuletDeployment: determine branch locations')
-
- # Charms outside the ~openstack-charmers
- base_charms = {
- 'mysql': ['trusty'],
- 'mongodb': ['trusty'],
- 'nrpe': ['trusty', 'xenial'],
- }
-
- for svc in other_services:
- # If a location has been explicitly set, use it
- if svc.get('location'):
- continue
- if svc['name'] in base_charms:
- # NOTE: not all charms have support for all series we
- # want/need to test against, so fix to most recent
- # that each base charm supports
- target_series = self.series
- if self.series not in base_charms[svc['name']]:
- target_series = base_charms[svc['name']][-1]
- svc['location'] = 'cs:{}/{}'.format(target_series,
- svc['name'])
- elif self.stable:
- svc['location'] = 'cs:{}/{}'.format(self.series,
- svc['name'])
- else:
- svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
- self.series,
- svc['name']
- )
-
- return other_services
-
- def _add_services(self, this_service, other_services, use_source=None,
- no_origin=None):
- """Add services to the deployment and optionally set
- openstack-origin/source.
-
- :param this_service dict: Service dictionary describing the service
- whose amulet tests are being run
- :param other_services dict: List of service dictionaries describing
- the services needed to support the target
- service
- :param use_source list: List of services which use the 'source' config
- option rather than 'openstack-origin'
- :param no_origin list: List of services which do not support setting
- the Cloud Archive.
- Service Dict:
- {
- 'name': str charm-name,
- 'units': int number of units,
- 'constraints': dict of juju constraints,
- 'location': str location of charm,
- }
- eg
- this_service = {
- 'name': 'openvswitch-odl',
- 'constraints': {'mem': '8G'},
- }
- other_services = [
- {
- 'name': 'nova-compute',
- 'units': 2,
- 'constraints': {'mem': '4G'},
- 'location': cs:~bob/xenial/nova-compute
- },
- {
- 'name': 'mysql',
- 'constraints': {'mem': '2G'},
- },
- {'neutron-api-odl'}]
- use_source = ['mysql']
- no_origin = ['neutron-api-odl']
- """
- self.log.info('OpenStackAmuletDeployment: adding services')
-
- other_services = self._determine_branch_locations(other_services)
-
- super(OpenStackAmuletDeployment, self)._add_services(this_service,
- other_services)
-
- services = other_services
- services.append(this_service)
-
- use_source = use_source or []
- no_origin = no_origin or []
-
- # Charms which should use the source config option
- use_source = list(set(
- use_source + ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
- 'ceph-osd', 'ceph-radosgw', 'ceph-mon',
- 'ceph-proxy', 'percona-cluster', 'lxd']))
-
- # Charms which can not use openstack-origin, ie. many subordinates
- no_origin = list(set(
- no_origin + ['cinder-ceph', 'hacluster', 'neutron-openvswitch',
- 'nrpe', 'openvswitch-odl', 'neutron-api-odl',
- 'odl-controller', 'cinder-backup', 'nexentaedge-data',
- 'nexentaedge-iscsi-gw', 'nexentaedge-swift-gw',
- 'cinder-nexentaedge', 'nexentaedge-mgmt']))
-
- if self.openstack:
- for svc in services:
- if svc['name'] not in use_source + no_origin:
- config = {'openstack-origin': self.openstack}
- self.d.configure(svc['name'], config)
-
- if self.source:
- for svc in services:
- if svc['name'] in use_source and svc['name'] not in no_origin:
- config = {'source': self.source}
- self.d.configure(svc['name'], config)
-
- def _configure_services(self, configs):
- """Configure all of the services."""
- self.log.info('OpenStackAmuletDeployment: configure services')
- for service, config in six.iteritems(configs):
- self.d.configure(service, config)
-
- def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=None):
- """Wait for all units to have a specific extended status, except
- for any defined as excluded. Unless specified via message, any
- status containing any case of 'ready' will be considered a match.
-
- Examples of message usage:
-
- Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
- message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
-
- Wait for all units to reach this status (exact match):
- message = re.compile('^Unit is ready and clustered$')
-
- Wait for all units to reach any one of these (exact match):
- message = re.compile('Unit is ready|OK|Ready')
-
- Wait for at least one unit to reach this status (exact match):
- message = {'ready'}
-
- See Amulet's sentry.wait_for_messages() for message usage detail.
- https://github.com/juju/amulet/blob/master/amulet/sentry.py
-
- :param message: Expected status match
- :param exclude_services: List of juju service names to ignore,
- not to be used in conjuction with include_only.
- :param include_only: List of juju service names to exclusively check,
- not to be used in conjuction with exclude_services.
- :param timeout: Maximum time in seconds to wait for status match
- :returns: None. Raises if timeout is hit.
- """
- if not timeout:
- timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
- self.log.info('Waiting for extended status on units for {}s...'
- ''.format(timeout))
-
- all_services = self.d.services.keys()
-
- if exclude_services and include_only:
- raise ValueError('exclude_services can not be used '
- 'with include_only')
-
- if message:
- if isinstance(message, re._pattern_type):
- match = message.pattern
- else:
- match = message
-
- self.log.debug('Custom extended status wait match: '
- '{}'.format(match))
- else:
- self.log.debug('Default extended status wait match: contains '
- 'READY (case-insensitive)')
- message = re.compile('.*ready.*', re.IGNORECASE)
-
- if exclude_services:
- self.log.debug('Excluding services from extended status match: '
- '{}'.format(exclude_services))
- else:
- exclude_services = []
-
- if include_only:
- services = include_only
- else:
- services = list(set(all_services) - set(exclude_services))
-
- self.log.debug('Waiting up to {}s for extended status on services: '
- '{}'.format(timeout, services))
- service_messages = {service: message for service in services}
-
- # Check for idleness
- self.d.sentry.wait(timeout=timeout)
- # Check for error states and bail early
- self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
- # Check for ready messages
- self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
-
- self.log.info('OK')
-
- def _get_openstack_release(self):
- """Get openstack release.
-
- Return an integer representing the enum value of the openstack
- release.
- """
- # Must be ordered by OpenStack release (not by Ubuntu release):
- for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
- setattr(self, os_pair, i)
-
- releases = {
- ('trusty', None): self.trusty_icehouse,
- ('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
- ('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
- ('trusty', 'cloud:trusty-mitaka'): self.trusty_mitaka,
- ('xenial', None): self.xenial_mitaka,
- ('xenial', 'cloud:xenial-newton'): self.xenial_newton,
- ('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
- ('xenial', 'cloud:xenial-pike'): self.xenial_pike,
- ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
- ('yakkety', None): self.yakkety_newton,
- ('zesty', None): self.zesty_ocata,
- ('artful', None): self.artful_pike,
- ('bionic', None): self.bionic_queens,
- ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
- ('cosmic', None): self.cosmic_rocky,
- }
- return releases[(self.series, self.openstack)]
-
- def _get_openstack_release_string(self):
- """Get openstack release string.
-
- Return a string representing the openstack release.
- """
- releases = OrderedDict([
- ('trusty', 'icehouse'),
- ('xenial', 'mitaka'),
- ('yakkety', 'newton'),
- ('zesty', 'ocata'),
- ('artful', 'pike'),
- ('bionic', 'queens'),
- ('cosmic', 'rocky'),
- ])
- if self.openstack:
- os_origin = self.openstack.split(':')[1]
- return os_origin.split('%s-' % self.series)[1].split('/')[0]
- else:
- return releases[self.series]
-
- def get_ceph_expected_pools(self, radosgw=False):
- """Return a list of expected ceph pools in a ceph + cinder + glance
- test scenario, based on OpenStack release and whether ceph radosgw
- is flagged as present or not."""
-
- if self._get_openstack_release() == self.trusty_icehouse:
- # Icehouse
- pools = [
- 'data',
- 'metadata',
- 'rbd',
- 'cinder-ceph',
- 'glance'
- ]
- elif (self.trusty_kilo <= self._get_openstack_release() <=
- self.zesty_ocata):
- # Kilo through Ocata
- pools = [
- 'rbd',
- 'cinder-ceph',
- 'glance'
- ]
- else:
- # Pike and later
- pools = [
- 'cinder-ceph',
- 'glance'
- ]
-
- if radosgw:
- pools.extend([
- '.rgw.root',
- '.rgw.control',
- '.rgw',
- '.rgw.gc',
- '.users.uid'
- ])
-
- return pools
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
deleted file mode 100644
index 10dbe59..0000000
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ /dev/null
@@ -1,1581 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import amulet
-import json
-import logging
-import os
-import re
-import six
-import time
-import urllib
-import urlparse
-
-import cinderclient.v1.client as cinder_client
-import cinderclient.v2.client as cinder_clientv2
-import glanceclient.v1 as glance_client
-import glanceclient.v2 as glance_clientv2
-import heatclient.v1.client as heat_client
-from keystoneclient.v2_0 import client as keystone_client
-from keystoneauth1.identity import (
- v3,
- v2,
-)
-from keystoneauth1 import session as keystone_session
-from keystoneclient.v3 import client as keystone_client_v3
-from novaclient import exceptions
-
-import novaclient.client as nova_client
-import novaclient
-import pika
-import swiftclient
-
-from charmhelpers.core.decorators import retry_on_exception
-from charmhelpers.contrib.amulet.utils import (
- AmuletUtils
-)
-from charmhelpers.core.host import CompareHostReleases
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-NOVA_CLIENT_VERSION = "2"
-
-OPENSTACK_RELEASES_PAIRS = [
- 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
- 'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
- 'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
- 'xenial_pike', 'artful_pike', 'xenial_queens',
- 'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
-
-
-class OpenStackAmuletUtils(AmuletUtils):
- """OpenStack amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(OpenStackAmuletUtils, self).__init__(log_level)
-
- def validate_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected, openstack_release=None):
- """Validate endpoint data. Pick the correct validator based on
- OpenStack release. Expected data should be in the v2 format:
- {
- 'id': id,
- 'region': region,
- 'adminurl': adminurl,
- 'internalurl': internalurl,
- 'publicurl': publicurl,
- 'service_id': service_id}
-
- """
- validation_function = self.validate_v2_endpoint_data
- xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
- if openstack_release and openstack_release >= xenial_queens:
- validation_function = self.validate_v3_endpoint_data
- expected = {
- 'id': expected['id'],
- 'region': expected['region'],
- 'region_id': 'RegionOne',
- 'url': self.valid_url,
- 'interface': self.not_null,
- 'service_id': expected['service_id']}
- return validation_function(endpoints, admin_port, internal_port,
- public_port, expected)
-
- def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected):
- """Validate endpoint data.
-
- Validate actual endpoint data vs expected endpoint data. The ports
- are used to find the matching endpoint.
- """
- self.log.debug('Validating endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = False
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if (admin_port in ep.adminurl and
- internal_port in ep.internalurl and
- public_port in ep.publicurl):
- found = True
- actual = {'id': ep.id,
- 'region': ep.region,
- 'adminurl': ep.adminurl,
- 'internalurl': ep.internalurl,
- 'publicurl': ep.publicurl,
- 'service_id': ep.service_id}
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if not found:
- return 'endpoint not found'
-
- def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
- public_port, expected, expected_num_eps=3):
- """Validate keystone v3 endpoint data.
-
- Validate the v3 endpoint data which has changed from v2. The
- ports are used to find the matching endpoint.
-
- The new v3 endpoint data looks like:
-
- [<Endpoint enabled=True,
- id=0432655fc2f74d1e9fa17bdaa6f6e60b,
- interface=admin,
- links={u'self': u'<RESTful URL of this endpoint>'},
- region=RegionOne,
- region_id=RegionOne,
- service_id=17f842a0dc084b928e476fafe67e4095,
- url=http://10.5.6.5:9312>,
- <Endpoint enabled=True,
- id=6536cb6cb92f4f41bf22b079935c7707,
- interface=admin,
- links={u'self': u'<RESTful url of this endpoint>'},
- region=RegionOne,
- region_id=RegionOne,
- service_id=72fc8736fb41435e8b3584205bb2cfa3,
- url=http://10.5.6.6:35357/v3>,
- ... ]
- """
- self.log.debug('Validating v3 endpoint data...')
- self.log.debug('actual: {}'.format(repr(endpoints)))
- found = []
- for ep in endpoints:
- self.log.debug('endpoint: {}'.format(repr(ep)))
- if ((admin_port in ep.url and ep.interface == 'admin') or
- (internal_port in ep.url and ep.interface == 'internal') or
- (public_port in ep.url and ep.interface == 'public')):
- found.append(ep.interface)
- # note we ignore the links member.
- actual = {'id': ep.id,
- 'region': ep.region,
- 'region_id': ep.region_id,
- 'interface': self.not_null,
- 'url': ep.url,
- 'service_id': ep.service_id, }
- ret = self._validate_dict_data(expected, actual)
- if ret:
- return 'unexpected endpoint data - {}'.format(ret)
-
- if len(found) != expected_num_eps:
- return 'Unexpected number of endpoints found'
-
- def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
- """Convert v2 endpoint data into v3.
-
- {
- 'service_name1': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- 'service_name2': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- }
- """
- self.log.warn("Endpoint ID and Region ID validation is limited to not "
- "null checks after v2 to v3 conversion")
- for svc in ep_data.keys():
- assert len(ep_data[svc]) == 1, "Unknown data format"
- svc_ep_data = ep_data[svc][0]
- ep_data[svc] = [
- {
- 'url': svc_ep_data['adminURL'],
- 'interface': 'admin',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null},
- {
- 'url': svc_ep_data['publicURL'],
- 'interface': 'public',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null},
- {
- 'url': svc_ep_data['internalURL'],
- 'interface': 'internal',
- 'region': svc_ep_data['region'],
- 'region_id': self.not_null,
- 'id': self.not_null}]
- return ep_data
-
- def validate_svc_catalog_endpoint_data(self, expected, actual,
- openstack_release=None):
- """Validate service catalog endpoint data. Pick the correct validator
- for the OpenStack version. Expected data should be in the v2 format:
- {
- 'service_name1': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- 'service_name2': [
- {
- 'adminURL': adminURL,
- 'id': id,
- 'region': region.
- 'publicURL': publicURL,
- 'internalURL': internalURL
- }],
- }
-
- """
- validation_function = self.validate_v2_svc_catalog_endpoint_data
- xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
- if openstack_release and openstack_release >= xenial_queens:
- validation_function = self.validate_v3_svc_catalog_endpoint_data
- expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
- return validation_function(expected, actual)
-
- def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
- """Validate service catalog endpoint data.
-
- Validate a list of actual service catalog endpoints vs a list of
- expected service catalog endpoints.
- """
- self.log.debug('Validating service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- ret = self._validate_dict_data(expected[k][0], actual[k][0])
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
- """Validate the keystone v3 catalog endpoint data.
-
- Validate a list of dictinaries that make up the keystone v3 service
- catalogue.
-
- It is in the form of:
-
-
- {u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
- u'interface': u'admin',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:35357/v3'},
- {u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
- u'interface': u'public',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:5000/v3'},
- {u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
- u'interface': u'internal',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.224:5000/v3'}],
- u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
- u'interface': u'public',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9311'},
- {u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
- u'interface': u'internal',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9311'},
- {u'id': u'f629388955bc407f8b11d8b7ca168086',
- u'interface': u'admin',
- u'region': u'RegionOne',
- u'region_id': u'RegionOne',
- u'url': u'http://10.5.5.223:9312'}]}
-
- Note, that an added complication is that the order of admin, public,
- internal against 'interface' in each region.
-
- Thus, the function sorts the expected and actual lists using the
- interface key as a sort key, prior to the comparison.
- """
- self.log.debug('Validating v3 service catalog endpoint data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for k, v in six.iteritems(expected):
- if k in actual:
- l_expected = sorted(v, key=lambda x: x['interface'])
- l_actual = sorted(actual[k], key=lambda x: x['interface'])
- if len(l_actual) != len(l_expected):
- return ("endpoint {} has differing number of interfaces "
- " - expected({}), actual({})"
- .format(k, len(l_expected), len(l_actual)))
- for i_expected, i_actual in zip(l_expected, l_actual):
- self.log.debug("checking interface {}"
- .format(i_expected['interface']))
- ret = self._validate_dict_data(i_expected, i_actual)
- if ret:
- return self.endpoint_error(k, ret)
- else:
- return "endpoint {} does not exist".format(k)
- return ret
-
- def validate_tenant_data(self, expected, actual):
- """Validate tenant data.
-
- Validate a list of actual tenant data vs list of expected tenant
- data.
- """
- self.log.debug('Validating tenant data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'enabled': act.enabled, 'description': act.description,
- 'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected tenant data - {}".format(ret)
- if not found:
- return "tenant {} does not exist".format(e['name'])
- return ret
-
- def validate_role_data(self, expected, actual):
- """Validate role data.
-
- Validate a list of actual role data vs a list of expected role
- data.
- """
- self.log.debug('Validating role data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- a = {'name': act.name, 'id': act.id}
- if e['name'] == a['name']:
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected role data - {}".format(ret)
- if not found:
- return "role {} does not exist".format(e['name'])
- return ret
-
- def validate_user_data(self, expected, actual, api_version=None):
- """Validate user data.
-
- Validate a list of actual user data vs a list of expected user
- data.
- """
- self.log.debug('Validating user data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- for e in expected:
- found = False
- for act in actual:
- if e['name'] == act.name:
- a = {'enabled': act.enabled, 'name': act.name,
- 'email': act.email, 'id': act.id}
- if api_version == 3:
- a['default_project_id'] = getattr(act,
- 'default_project_id',
- 'none')
- else:
- a['tenantId'] = act.tenantId
- found = True
- ret = self._validate_dict_data(e, a)
- if ret:
- return "unexpected user data - {}".format(ret)
- if not found:
- return "user {} does not exist".format(e['name'])
- return ret
-
- def validate_flavor_data(self, expected, actual):
- """Validate flavor data.
-
- Validate a list of actual flavors vs a list of expected flavors.
- """
- self.log.debug('Validating flavor data...')
- self.log.debug('actual: {}'.format(repr(actual)))
- act = [a.name for a in actual]
- return self._validate_list_data(expected, act)
-
- def tenant_exists(self, keystone, tenant):
- """Return True if tenant exists."""
- self.log.debug('Checking if tenant exists ({})...'.format(tenant))
- return tenant in [t.name for t in keystone.tenants.list()]
-
- @retry_on_exception(num_retries=5, base_delay=1)
- def keystone_wait_for_propagation(self, sentry_relation_pairs,
- api_version):
- """Iterate over list of sentry and relation tuples and verify that
- api_version has the expected value.
-
- :param sentry_relation_pairs: list of sentry, relation name tuples used
- for monitoring propagation of relation
- data
- :param api_version: api_version to expect in relation data
- :returns: None if successful. Raise on error.
- """
- for (sentry, relation_name) in sentry_relation_pairs:
- rel = sentry.relation('identity-service',
- relation_name)
- self.log.debug('keystone relation data: {}'.format(rel))
- if rel.get('api_version') != str(api_version):
- raise Exception("api_version not propagated through relation"
- " data yet ('{}' != '{}')."
- "".format(rel.get('api_version'), api_version))
-
- def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
- api_version):
- """Configure preferred-api-version of keystone in deployment and
- monitor provided list of relation objects for propagation
- before returning to caller.
-
- :param sentry_relation_pairs: list of sentry, relation tuples used for
- monitoring propagation of relation data
- :param deployment: deployment to configure
- :param api_version: value preferred-api-version will be set to
- :returns: None if successful. Raise on error.
- """
- self.log.debug("Setting keystone preferred-api-version: '{}'"
- "".format(api_version))
-
- config = {'preferred-api-version': api_version}
- deployment.d.configure('keystone', config)
- deployment._auto_wait_for_status()
- self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
-
- def authenticate_cinder_admin(self, keystone, api_version=2):
- """Authenticates admin user with cinder."""
- self.log.debug('Authenticating cinder admin...')
- _clients = {
- 1: cinder_client.Client,
- 2: cinder_clientv2.Client}
- return _clients[api_version](session=keystone.session)
-
- def authenticate_keystone(self, keystone_ip, username, password,
- api_version=False, admin_port=False,
- user_domain_name=None, domain_name=None,
- project_domain_name=None, project_name=None):
- """Authenticate with Keystone"""
- self.log.debug('Authenticating with keystone...')
- if not api_version:
- api_version = 2
- sess, auth = self.get_keystone_session(
- keystone_ip=keystone_ip,
- username=username,
- password=password,
- api_version=api_version,
- admin_port=admin_port,
- user_domain_name=user_domain_name,
- domain_name=domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name
- )
- if api_version == 2:
- client = keystone_client.Client(session=sess)
- else:
- client = keystone_client_v3.Client(session=sess)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(sess)
- return client
-
- def get_keystone_session(self, keystone_ip, username, password,
- api_version=False, admin_port=False,
- user_domain_name=None, domain_name=None,
- project_domain_name=None, project_name=None):
- """Return a keystone session object"""
- ep = self.get_keystone_endpoint(keystone_ip,
- api_version=api_version,
- admin_port=admin_port)
- if api_version == 2:
- auth = v2.Password(
- username=username,
- password=password,
- tenant_name=project_name,
- auth_url=ep
- )
- sess = keystone_session.Session(auth=auth)
- else:
- auth = v3.Password(
- user_domain_name=user_domain_name,
- username=username,
- password=password,
- domain_name=domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name,
- auth_url=ep
- )
- sess = keystone_session.Session(auth=auth)
- return (sess, auth)
-
- def get_keystone_endpoint(self, keystone_ip, api_version=None,
- admin_port=False):
- """Return keystone endpoint"""
- port = 5000
- if admin_port:
- port = 35357
- base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
- port)
- if api_version == 2:
- ep = base_ep + "/v2.0"
- else:
- ep = base_ep + "/v3"
- return ep
-
- def get_default_keystone_session(self, keystone_sentry,
- openstack_release=None, api_version=2):
- """Return a keystone session object and client object assuming standard
- default settings
-
- Example call in amulet tests:
- self.keystone_session, self.keystone = u.get_default_keystone_session(
- self.keystone_sentry,
- openstack_release=self._get_openstack_release())
-
- The session can then be used to auth other clients:
- neutronclient.Client(session=session)
- aodh_client.Client(session=session)
- eyc
- """
- self.log.debug('Authenticating keystone admin...')
- # 11 => xenial_queens
- if api_version == 3 or (openstack_release and openstack_release >= 11):
- client_class = keystone_client_v3.Client
- api_version = 3
- else:
- client_class = keystone_client.Client
- keystone_ip = keystone_sentry.info['public-address']
- session, auth = self.get_keystone_session(
- keystone_ip,
- api_version=api_version,
- username='admin',
- password='openstack',
- project_name='admin',
- user_domain_name='admin_domain',
- project_domain_name='admin_domain')
- client = client_class(session=session)
- # This populates the client.service_catalog
- client.auth_ref = auth.get_access(session)
- return session, client
-
- def authenticate_keystone_admin(self, keystone_sentry, user, password,
- tenant=None, api_version=None,
- keystone_ip=None, user_domain_name=None,
- project_domain_name=None,
- project_name=None):
- """Authenticates admin user with the keystone admin endpoint."""
- self.log.debug('Authenticating keystone admin...')
- if not keystone_ip:
- keystone_ip = keystone_sentry.info['public-address']
-
- # To support backward compatibility usage of this function
- if not project_name:
- project_name = tenant
- if api_version == 3 and not user_domain_name:
- user_domain_name = 'admin_domain'
- if api_version == 3 and not project_domain_name:
- project_domain_name = 'admin_domain'
- if api_version == 3 and not project_name:
- project_name = 'admin'
-
- return self.authenticate_keystone(
- keystone_ip, user, password,
- api_version=api_version,
- user_domain_name=user_domain_name,
- project_domain_name=project_domain_name,
- project_name=project_name,
- admin_port=True)
-
- def authenticate_keystone_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with the keystone public endpoint."""
- self.log.debug('Authenticating keystone user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- keystone_ip = urlparse.urlparse(ep).hostname
-
- return self.authenticate_keystone(keystone_ip, user, password,
- project_name=tenant)
-
- def authenticate_glance_admin(self, keystone):
- """Authenticates admin user with glance."""
- self.log.debug('Authenticating glance admin...')
- ep = keystone.service_catalog.url_for(service_type='image',
- interface='adminURL')
- if keystone.session:
- return glance_clientv2.Client("2", session=keystone.session)
- else:
- return glance_client.Client(ep, token=keystone.auth_token)
-
- def authenticate_heat_admin(self, keystone):
- """Authenticates the admin user with heat."""
- self.log.debug('Authenticating heat admin...')
- ep = keystone.service_catalog.url_for(service_type='orchestration',
- interface='publicURL')
- if keystone.session:
- return heat_client.Client(endpoint=ep, session=keystone.session)
- else:
- return heat_client.Client(endpoint=ep, token=keystone.auth_token)
-
- def authenticate_nova_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with nova-api."""
- self.log.debug('Authenticating nova user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- if keystone.session:
- return nova_client.Client(NOVA_CLIENT_VERSION,
- session=keystone.session,
- auth_url=ep)
- elif novaclient.__version__[0] >= "7":
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, password=password,
- project_name=tenant, auth_url=ep)
- else:
- return nova_client.Client(NOVA_CLIENT_VERSION,
- username=user, api_key=password,
- project_id=tenant, auth_url=ep)
-
- def authenticate_swift_user(self, keystone, user, password, tenant):
- """Authenticates a regular user with swift api."""
- self.log.debug('Authenticating swift user ({})...'.format(user))
- ep = keystone.service_catalog.url_for(service_type='identity',
- interface='publicURL')
- if keystone.session:
- return swiftclient.Connection(session=keystone.session)
- else:
- return swiftclient.Connection(authurl=ep,
- user=user,
- key=password,
- tenant_name=tenant,
- auth_version='2.0')
-
- def create_flavor(self, nova, name, ram, vcpus, disk, flavorid="auto",
- ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True):
- """Create the specified flavor."""
- try:
- nova.flavors.find(name=name)
- except (exceptions.NotFound, exceptions.NoUniqueMatch):
- self.log.debug('Creating flavor ({})'.format(name))
- nova.flavors.create(name, ram, vcpus, disk, flavorid,
- ephemeral, swap, rxtx_factor, is_public)
-
- def glance_create_image(self, glance, image_name, image_url,
- download_dir='tests',
- hypervisor_type='qemu',
- disk_format='qcow2',
- architecture='x86_64',
- container_format='bare'):
- """Download an image and upload it to glance, validate its status
- and return an image object pointer. KVM defaults, can override for
- LXD.
-
- :param glance: pointer to authenticated glance api connection
- :param image_name: display name for new image
- :param image_url: url to retrieve
- :param download_dir: directory to store downloaded image file
- :param hypervisor_type: glance image hypervisor property
- :param disk_format: glance image disk format
- :param architecture: glance image architecture property
- :param container_format: glance image container format
- :returns: glance image pointer
- """
- self.log.debug('Creating glance image ({}) from '
- '{}...'.format(image_name, image_url))
-
- # Download image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- abs_file_name = os.path.join(download_dir, image_name)
- if not os.path.exists(abs_file_name):
- opener.retrieve(image_url, abs_file_name)
-
- # Create glance image
- glance_properties = {
- 'architecture': architecture,
- 'hypervisor_type': hypervisor_type
- }
- # Create glance image
- if float(glance.version) < 2.0:
- with open(abs_file_name) as f:
- image = glance.images.create(
- name=image_name,
- is_public=True,
- disk_format=disk_format,
- container_format=container_format,
- properties=glance_properties,
- data=f)
- else:
- image = glance.images.create(
- name=image_name,
- visibility="public",
- disk_format=disk_format,
- container_format=container_format)
- glance.images.upload(image.id, open(abs_file_name, 'rb'))
- glance.images.update(image.id, **glance_properties)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
-
- if float(glance.version) < 2.0:
- val_img_pub = glance.images.get(img_id).is_public
- else:
- val_img_pub = glance.images.get(img_id).visibility == "public"
-
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == container_format \
- and val_img_dfmt == disk_format:
- self.log.debug(msg_attr)
- else:
- msg = ('Image validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image
-
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
- :param image_name: display name for new image
- :returns: glance image pointer
- """
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'glance_create_image instead of '
- 'create_cirros_image.')
-
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
-
- # Get cirros image URL
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- f.close()
-
- return self.glance_create_image(glance, image_name, cirros_url)
-
- def delete_image(self, glance, image):
- """Delete the specified image."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_image.')
- self.log.debug('Deleting glance image ({})...'.format(image))
- return self.delete_resource(glance.images, image, msg='glance image')
-
- def create_instance(self, nova, image_name, instance_name, flavor):
- """Create the specified instance."""
- self.log.debug('Creating instance '
- '({}|{}|{})'.format(instance_name, image_name, flavor))
- image = nova.glance.find_image(image_name)
- flavor = nova.flavors.find(name=flavor)
- instance = nova.servers.create(name=instance_name, image=image,
- flavor=flavor)
-
- count = 1
- status = instance.status
- while status != 'ACTIVE' and count < 60:
- time.sleep(3)
- instance = nova.servers.get(instance.id)
- status = instance.status
- self.log.debug('instance status: {}'.format(status))
- count += 1
-
- if status != 'ACTIVE':
- self.log.error('instance creation timed out')
- return None
-
- return instance
-
- def delete_instance(self, nova, instance):
- """Delete the specified instance."""
-
- # /!\ DEPRECATION WARNING
- self.log.warn('/!\\ DEPRECATION WARNING: use '
- 'delete_resource instead of delete_instance.')
- self.log.debug('Deleting instance ({})...'.format(instance))
- return self.delete_resource(nova.servers, instance,
- msg='nova instance')
-
- def create_or_get_keypair(self, nova, keypair_name="testkey"):
- """Create a new keypair, or return pointer if it already exists."""
- try:
- _keypair = nova.keypairs.get(keypair_name)
- self.log.debug('Keypair ({}) already exists, '
- 'using it.'.format(keypair_name))
- return _keypair
- except Exception:
- self.log.debug('Keypair ({}) does not exist, '
- 'creating it.'.format(keypair_name))
-
- _keypair = nova.keypairs.create(name=keypair_name)
- return _keypair
-
- def _get_cinder_obj_name(self, cinder_object):
- """Retrieve name of cinder object.
-
- :param cinder_object: cinder snapshot or volume object
- :returns: str cinder object name
- """
- # v1 objects store name in 'display_name' attr but v2+ use 'name'
- try:
- return cinder_object.display_name
- except AttributeError:
- return cinder_object.name
-
- def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
- img_id=None, src_vol_id=None, snap_id=None):
- """Create cinder volume, optionally from a glance image, OR
- optionally as a clone of an existing volume, OR optionally
- from a snapshot. Wait for the new volume status to reach
- the expected status, validate and return a resource pointer.
-
- :param vol_name: cinder volume display name
- :param vol_size: size in gigabytes
- :param img_id: optional glance image id
- :param src_vol_id: optional source volume id to clone
- :param snap_id: optional snapshot id to use
- :returns: cinder volume pointer
- """
- # Handle parameter input and avoid impossible combinations
- if img_id and not src_vol_id and not snap_id:
- # Create volume from image
- self.log.debug('Creating cinder volume from glance image...')
- bootable = 'true'
- elif src_vol_id and not img_id and not snap_id:
- # Clone an existing volume
- self.log.debug('Cloning cinder volume...')
- bootable = cinder.volumes.get(src_vol_id).bootable
- elif snap_id and not src_vol_id and not img_id:
- # Create volume from snapshot
- self.log.debug('Creating cinder volume from snapshot...')
- snap = cinder.volume_snapshots.find(id=snap_id)
- vol_size = snap.size
- snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
- bootable = cinder.volumes.get(snap_vol_id).bootable
- elif not img_id and not src_vol_id and not snap_id:
- # Create volume
- self.log.debug('Creating cinder volume...')
- bootable = 'false'
- else:
- # Impossible combination of parameters
- msg = ('Invalid method use - name:{} size:{} img_id:{} '
- 'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
- img_id, src_vol_id,
- snap_id))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Create new volume
- try:
- vol_new = cinder.volumes.create(display_name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except TypeError:
- vol_new = cinder.volumes.create(name=vol_name,
- imageRef=img_id,
- size=vol_size,
- source_volid=src_vol_id,
- snapshot_id=snap_id)
- vol_id = vol_new.id
- except Exception as e:
- msg = 'Failed to create volume: {}'.format(e)
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Wait for volume to reach available status
- ret = self.resource_reaches_status(cinder.volumes, vol_id,
- expected_stat="available",
- msg="Volume status wait")
- if not ret:
- msg = 'Cinder volume failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new volume
- self.log.debug('Validating volume attributes...')
- val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
- val_vol_boot = cinder.volumes.get(vol_id).bootable
- val_vol_stat = cinder.volumes.get(vol_id).status
- val_vol_size = cinder.volumes.get(vol_id).size
- msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
- '{} size:{}'.format(val_vol_name, vol_id,
- val_vol_stat, val_vol_boot,
- val_vol_size))
-
- if val_vol_boot == bootable and val_vol_stat == 'available' \
- and val_vol_name == vol_name and val_vol_size == vol_size:
- self.log.debug(msg_attr)
- else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return vol_new
-
- def delete_resource(self, resource, resource_id,
- msg="resource", max_wait=120):
- """Delete one openstack resource, such as one instance, keypair,
- image, volume, stack, etc., and confirm deletion within max wait time.
-
- :param resource: pointer to os resource type, ex:glance_client.images
- :param resource_id: unique name or id for the openstack resource
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, otherwise False
- """
- self.log.debug('Deleting OpenStack resource '
- '{} ({})'.format(resource_id, msg))
- num_before = len(list(resource.list()))
- resource.delete(resource_id)
-
- tries = 0
- num_after = len(list(resource.list()))
- while num_after != (num_before - 1) and tries < (max_wait / 4):
- self.log.debug('{} delete check: '
- '{} [{}:{}] {}'.format(msg, tries,
- num_before,
- num_after,
- resource_id))
- time.sleep(4)
- num_after = len(list(resource.list()))
- tries += 1
-
- self.log.debug('{}: expected, actual count = {}, '
- '{}'.format(msg, num_before - 1, num_after))
-
- if num_after == (num_before - 1):
- return True
- else:
- self.log.error('{} delete timed out'.format(msg))
- return False
-
- def resource_reaches_status(self, resource, resource_id,
- expected_stat='available',
- msg='resource', max_wait=120):
- """Wait for an openstack resources status to reach an
- expected status within a specified time. Useful to confirm that
- nova instances, cinder vols, snapshots, glance images, heat stacks
- and other resources eventually reach the expected status.
-
- :param resource: pointer to os resource type, ex: heat_client.stacks
- :param resource_id: unique id for the openstack resource
- :param expected_stat: status to expect resource to reach
- :param msg: text to identify purpose in logging
- :param max_wait: maximum wait time in seconds
- :returns: True if successful, False if status is not reached
- """
-
- tries = 0
- resource_stat = resource.get(resource_id).status
- while resource_stat != expected_stat and tries < (max_wait / 4):
- self.log.debug('{} status check: '
- '{} [{}:{}] {}'.format(msg, tries,
- resource_stat,
- expected_stat,
- resource_id))
- time.sleep(4)
- resource_stat = resource.get(resource_id).status
- tries += 1
-
- self.log.debug('{}: expected, actual status = {}, '
- '{}'.format(msg, resource_stat, expected_stat))
-
- if resource_stat == expected_stat:
- return True
- else:
- self.log.debug('{} never reached expected status: '
- '{}'.format(resource_id, expected_stat))
- return False
-
- def get_ceph_osd_id_cmd(self, index):
- """Produce a shell command that will return a ceph-osd id."""
- return ("`initctl list | grep 'ceph-osd ' | "
- "awk 'NR=={} {{ print $2 }}' | "
- "grep -o '[0-9]*'`".format(index + 1))
-
- def get_ceph_pools(self, sentry_unit):
- """Return a dict of ceph pools from a single ceph unit, with
- pool name as keys, pool id as vals."""
- pools = {}
- cmd = 'sudo ceph osd lspools'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # For mimic ceph osd lspools output
- output = output.replace("\n", ",")
-
- # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
- for pool in str(output).split(','):
- pool_id_name = pool.split(' ')
- if len(pool_id_name) == 2:
- pool_id = pool_id_name[0]
- pool_name = pool_id_name[1]
- pools[pool_name] = int(pool_id)
-
- self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
- pools))
- return pools
-
- def get_ceph_df(self, sentry_unit):
- """Return dict of ceph df json output, including ceph pool state.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :returns: Dict of ceph df output
- """
- cmd = 'sudo ceph df --format=json'
- output, code = sentry_unit.run(cmd)
- if code != 0:
- msg = ('{} `{}` returned {} '
- '{}'.format(sentry_unit.info['unit_name'],
- cmd, code, output))
- amulet.raise_status(amulet.FAIL, msg=msg)
- return json.loads(output)
-
- def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
- """Take a sample of attributes of a ceph pool, returning ceph
- pool name, object count and disk space used for the specified
- pool ID number.
-
- :param sentry_unit: Pointer to amulet sentry instance (juju unit)
- :param pool_id: Ceph pool ID
- :returns: List of pool name, object count, kb disk space used
- """
- df = self.get_ceph_df(sentry_unit)
- for pool in df['pools']:
- if pool['id'] == pool_id:
- pool_name = pool['name']
- obj_count = pool['stats']['objects']
- kb_used = pool['stats']['kb_used']
-
- self.log.debug('Ceph {} pool (ID {}): {} objects, '
- '{} kb used'.format(pool_name, pool_id,
- obj_count, kb_used))
- return pool_name, obj_count, kb_used
-
- def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
- """Validate ceph pool samples taken over time, such as pool
- object counts or pool kb used, before adding, after adding, and
- after deleting items which affect those pool attributes. The
- 2nd element is expected to be greater than the 1st; 3rd is expected
- to be less than the 2nd.
-
- :param samples: List containing 3 data samples
- :param sample_type: String for logging and usage context
- :returns: None if successful, Failure message otherwise
- """
- original, created, deleted = range(3)
- if samples[created] <= samples[original] or \
- samples[deleted] >= samples[created]:
- return ('Ceph {} samples ({}) '
- 'unexpected.'.format(sample_type, samples))
- else:
- self.log.debug('Ceph {} samples (OK): '
- '{}'.format(sample_type, samples))
- return None
-
- # rabbitmq/amqp specific helpers:
-
- def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
- """Wait for rmq units extended status to show cluster readiness,
- after an optional initial sleep period. Initial sleep is likely
- necessary to be effective following a config change, as status
- message may not instantly update to non-ready."""
-
- if init_sleep:
- time.sleep(init_sleep)
-
- message = re.compile('^Unit is ready and clustered$')
- deployment._auto_wait_for_status(message=message,
- timeout=timeout,
- include_only=['rabbitmq-server'])
-
- def add_rmq_test_user(self, sentry_units,
- username="testuser1", password="changeme"):
- """Add a test user via the first rmq juju unit, check connection as
- the new user against all sentry units.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Adding rmq user ({})...'.format(username))
-
- # Check that user does not already exist
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
- if username in output:
- self.log.warning('User ({}) already exists, returning '
- 'gracefully.'.format(username))
- return
-
- perms = '".*" ".*" ".*"'
- cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
- 'rabbitmqctl set_permissions {} {}'.format(username, perms)]
-
- # Add user via first unit
- for cmd in cmds:
- output, _ = self.run_cmd_unit(sentry_units[0], cmd)
-
- # Check connection against the other sentry_units
- self.log.debug('Checking user connect against units...')
- for sentry_unit in sentry_units:
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
- username=username,
- password=password)
- connection.close()
-
- def delete_rmq_test_user(self, sentry_units, username="testuser1"):
- """Delete a rabbitmq user via the first rmq juju unit.
-
- :param sentry_units: list of sentry unit pointers
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: None if successful or no such user.
- """
- self.log.debug('Deleting rmq user ({})...'.format(username))
-
- # Check that the user exists
- cmd_user_list = 'rabbitmqctl list_users'
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
-
- if username not in output:
- self.log.warning('User ({}) does not exist, returning '
- 'gracefully.'.format(username))
- return
-
- # Delete the user
- cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
- output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
-
- def get_rmq_cluster_status(self, sentry_unit):
- """Execute rabbitmq cluster status command on a unit and return
- the full output.
-
- :param unit: sentry unit
- :returns: String containing console output of cluster status command
- """
- cmd = 'rabbitmqctl cluster_status'
- output, _ = self.run_cmd_unit(sentry_unit, cmd)
- self.log.debug('{} cluster_status:\n{}'.format(
- sentry_unit.info['unit_name'], output))
- return str(output)
-
- def get_rmq_cluster_running_nodes(self, sentry_unit):
- """Parse rabbitmqctl cluster_status output string, return list of
- running rabbitmq cluster nodes.
-
- :param unit: sentry unit
- :returns: List containing node names of running nodes
- """
- # NOTE(beisner): rabbitmqctl cluster_status output is not
- # json-parsable, do string chop foo, then json.loads that.
- str_stat = self.get_rmq_cluster_status(sentry_unit)
- if 'running_nodes' in str_stat:
- pos_start = str_stat.find("{running_nodes,") + 15
- pos_end = str_stat.find("]},", pos_start) + 1
- str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
- run_nodes = json.loads(str_run_nodes)
- return run_nodes
- else:
- return []
-
- def validate_rmq_cluster_running_nodes(self, sentry_units):
- """Check that all rmq unit hostnames are represented in the
- cluster_status output of all units.
-
- :param host_names: dict of juju unit names to host names
- :param units: list of sentry unit pointers (all rmq units)
- :returns: None if successful, otherwise return error message
- """
- host_names = self.get_unit_hostnames(sentry_units)
- errors = []
-
- # Query every unit for cluster_status running nodes
- for query_unit in sentry_units:
- query_unit_name = query_unit.info['unit_name']
- running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
-
- # Confirm that every unit is represented in the queried unit's
- # cluster_status running nodes output.
- for validate_unit in sentry_units:
- val_host_name = host_names[validate_unit.info['unit_name']]
- val_node_name = 'rabbit@{}'.format(val_host_name)
-
- if val_node_name not in running_nodes:
- errors.append('Cluster member check failed on {}: {} not '
- 'in {}\n'.format(query_unit_name,
- val_node_name,
- running_nodes))
- if errors:
- return ''.join(errors)
-
- def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
- """Check a single juju rmq unit for ssl and port in the config file."""
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- conf_file = '/etc/rabbitmq/rabbitmq.config'
- conf_contents = str(self.file_contents_safe(sentry_unit,
- conf_file, max_wait=16))
- # Checks
- conf_ssl = 'ssl' in conf_contents
- conf_port = str(port) in conf_contents
-
- # Port explicitly checked in config
- if port and conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif port and not conf_port and conf_ssl:
- self.log.debug('SSL is enabled @{} but not on port {} '
- '({})'.format(host, port, unit_name))
- return False
- # Port not checked (useful when checking that ssl is disabled)
- elif not port and conf_ssl:
- self.log.debug('SSL is enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return True
- elif not conf_ssl:
- self.log.debug('SSL not enabled @{}:{} '
- '({})'.format(host, port, unit_name))
- return False
- else:
- msg = ('Unknown condition when checking SSL status @{}:{} '
- '({})'.format(host, port, unit_name))
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
- """Check that ssl is enabled on rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :param port: optional ssl port override to validate
- :returns: None if successful, otherwise return error message
- """
- for sentry_unit in sentry_units:
- if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
- return ('Unexpected condition: ssl is disabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def validate_rmq_ssl_disabled_units(self, sentry_units):
- """Check that ssl is enabled on listed rmq juju sentry units.
-
- :param sentry_units: list of all rmq sentry units
- :returns: True if successful. Raise on error.
- """
- for sentry_unit in sentry_units:
- if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
- return ('Unexpected condition: ssl is enabled on unit '
- '({})'.format(sentry_unit.info['unit_name']))
- return None
-
- def configure_rmq_ssl_on(self, sentry_units, deployment,
- port=None, max_wait=60):
- """Turn ssl charm config option on, with optional non-default
- ssl port specification. Confirm that it is enabled on every
- unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param port: amqp port, use defaults if None
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: on')
-
- # Enable RMQ SSL
- config = {'ssl': 'on'}
- if port:
- config['ssl_port'] = port
-
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
- """Turn ssl charm config option off, confirm that it is disabled
- on every unit.
-
- :param sentry_units: list of sentry units
- :param deployment: amulet deployment object pointer
- :param max_wait: maximum time to wait in seconds to confirm
- :returns: None if successful. Raise on error.
- """
- self.log.debug('Setting ssl charm config option: off')
-
- # Disable RMQ SSL
- config = {'ssl': 'off'}
- deployment.d.configure('rabbitmq-server', config)
-
- # Wait for unit status
- self.rmq_wait_for_cluster(deployment)
-
- # Confirm
- tries = 0
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- while ret and tries < (max_wait / 4):
- time.sleep(4)
- self.log.debug('Attempt {}: {}'.format(tries, ret))
- ret = self.validate_rmq_ssl_disabled_units(sentry_units)
- tries += 1
-
- if ret:
- amulet.raise_status(amulet.FAIL, ret)
-
- def connect_amqp_by_unit(self, sentry_unit, ssl=False,
- port=None, fatal=True,
- username="testuser1", password="changeme"):
- """Establish and return a pika amqp connection to the rabbitmq service
- running on a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :param fatal: boolean, default to True (raises on connect error)
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :returns: pika amqp connection pointer or None if failed and non-fatal
- """
- host = sentry_unit.info['public-address']
- unit_name = sentry_unit.info['unit_name']
-
- # Default port logic if port is not specified
- if ssl and not port:
- port = 5671
- elif not ssl and not port:
- port = 5672
-
- self.log.debug('Connecting to amqp on {}:{} ({}) as '
- '{}...'.format(host, port, unit_name, username))
-
- try:
- credentials = pika.PlainCredentials(username, password)
- parameters = pika.ConnectionParameters(host=host, port=port,
- credentials=credentials,
- ssl=ssl,
- connection_attempts=3,
- retry_delay=5,
- socket_timeout=1)
- connection = pika.BlockingConnection(parameters)
- assert connection.is_open is True
- assert connection.is_closing is False
- self.log.debug('Connect OK')
- return connection
- except Exception as e:
- msg = ('amqp connection failed to {}:{} as '
- '{} ({})'.format(host, port, username, str(e)))
- if fatal:
- amulet.raise_status(amulet.FAIL, msg)
- else:
- self.log.warn(msg)
- return None
-
- def publish_amqp_message_by_unit(self, sentry_unit, message,
- queue="test", ssl=False,
- username="testuser1",
- password="changeme",
- port=None):
- """Publish an amqp message to a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param message: amqp message string
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: None. Raises exception if publish failed.
- """
- self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
- message))
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
-
- # NOTE(beisner): extra debug here re: pika hang potential:
- # https://github.com/pika/pika/issues/297
- # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
- self.log.debug('Defining channel...')
- channel = connection.channel()
- self.log.debug('Declaring queue...')
- channel.queue_declare(queue=queue, auto_delete=False, durable=True)
- self.log.debug('Publishing message...')
- channel.basic_publish(exchange='', routing_key=queue, body=message)
- self.log.debug('Closing channel...')
- channel.close()
- self.log.debug('Closing connection...')
- connection.close()
-
- def get_amqp_message_by_unit(self, sentry_unit, queue="test",
- username="testuser1",
- password="changeme",
- ssl=False, port=None):
- """Get an amqp message from a rmq juju unit.
-
- :param sentry_unit: sentry unit pointer
- :param queue: message queue, default to test
- :param username: amqp user name, default to testuser1
- :param password: amqp user password
- :param ssl: boolean, default to False
- :param port: amqp port, use defaults if None
- :returns: amqp message body as string. Raise if get fails.
- """
- connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
- port=port,
- username=username,
- password=password)
- channel = connection.channel()
- method_frame, _, body = channel.basic_get(queue)
-
- if method_frame:
- self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
- body))
- channel.basic_ack(method_frame.delivery_tag)
- channel.close()
- connection.close()
- return body
- else:
- msg = 'No message retrieved.'
- amulet.raise_status(amulet.FAIL, msg)
-
- def validate_memcache(self, sentry_unit, conf, os_release,
- earliest_release=5, section='keystone_authtoken',
- check_kvs=None):
- """Check Memcache is running and is configured to be used
-
- Example call from Amulet test:
-
- def test_110_memcache(self):
- u.validate_memcache(self.neutron_api_sentry,
- '/etc/neutron/neutron.conf',
- self._get_openstack_release())
-
- :param sentry_unit: sentry unit
- :param conf: OpenStack config file to check memcache settings
- :param os_release: Current OpenStack release int code
- :param earliest_release: Earliest Openstack release to check int code
- :param section: OpenStack config file section to check
- :param check_kvs: Dict of settings to check in config file
- :returns: None
- """
- if os_release < earliest_release:
- self.log.debug('Skipping memcache checks for deployment. {} <'
- 'mitaka'.format(os_release))
- return
- _kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
- self.log.debug('Checking memcached is running')
- ret = self.validate_services_by_name({sentry_unit: ['memcached']})
- if ret:
- amulet.raise_status(amulet.FAIL, msg='Memcache running check'
- 'failed {}'.format(ret))
- else:
- self.log.debug('OK')
- self.log.debug('Checking memcache url is configured in {}'.format(
- conf))
- if self.validate_config_data(sentry_unit, conf, section, _kvs):
- message = "Memcache config error in: {}".format(conf)
- amulet.raise_status(amulet.FAIL, msg=message)
- else:
- self.log.debug('OK')
- self.log.debug('Checking memcache configuration in '
- '/etc/memcached.conf')
- contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
- fatal=True)
- ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
- if CompareHostReleases(ubuntu_release) <= 'trusty':
- memcache_listen_addr = 'ip6-localhost'
- else:
- memcache_listen_addr = '::1'
- expected = {
- '-p': '11211',
- '-l': memcache_listen_addr}
- found = []
- for key, value in expected.items():
- for line in contents.split('\n'):
- if line.startswith(key):
- self.log.debug('Checking {} is set to {}'.format(
- key,
- value))
- assert value == line.split()[-1]
- self.log.debug(line.split()[-1])
- found.append(key)
- if sorted(found) == sorted(expected.keys()):
- self.log.debug('OK')
- else:
- message = "Memcache config error in: /etc/memcached.conf"
- amulet.raise_status(amulet.FAIL, msg=message)
diff --git a/tests/charmhelpers/core/__init__.py b/tests/charmhelpers/core/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/tests/charmhelpers/core/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/tests/charmhelpers/core/decorators.py b/tests/charmhelpers/core/decorators.py
deleted file mode 100644
index 6ad41ee..0000000
--- a/tests/charmhelpers/core/decorators.py
+++ /dev/null
@@ -1,55 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# Copyright 2014 Canonical Ltd.
-#
-# Authors:
-# Edward Hope-Morley <opentastic@gmail.com>
-#
-
-import time
-
-from charmhelpers.core.hookenv import (
- log,
- INFO,
-)
-
-
-def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
- """If the decorated function raises exception exc_type, allow num_retries
- retry attempts before raise the exception.
- """
- def _retry_on_exception_inner_1(f):
- def _retry_on_exception_inner_2(*args, **kwargs):
- retries = num_retries
- multiplier = 1
- while True:
- try:
- return f(*args, **kwargs)
- except exc_type:
- if not retries:
- raise
-
- delay = base_delay * multiplier
- multiplier += 1
- log("Retrying '%s' %d more times (delay=%s)" %
- (f.__name__, retries, delay), level=INFO)
- retries -= 1
- if delay:
- time.sleep(delay)
-
- return _retry_on_exception_inner_2
-
- return _retry_on_exception_inner_1
diff --git a/tests/charmhelpers/core/files.py b/tests/charmhelpers/core/files.py
deleted file mode 100644
index fdd82b7..0000000
--- a/tests/charmhelpers/core/files.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
-
-import os
-import subprocess
-
-
-def sed(filename, before, after, flags='g'):
- """
- Search and replaces the given pattern on filename.
-
- :param filename: relative or absolute file path.
- :param before: expression to be replaced (see 'man sed')
- :param after: expression to replace with (see 'man sed')
- :param flags: sed-compatible regex flags in example, to make
- the search and replace case insensitive, specify ``flags="i"``.
- The ``g`` flag is always specified regardless, so you do not
- need to remember to include it when overriding this parameter.
- :returns: If the sed command exit code was zero then return,
- otherwise raise CalledProcessError.
- """
- expression = r's/{0}/{1}/{2}'.format(before,
- after, flags)
-
- return subprocess.check_call(["sed", "-i", "-r", "-e",
- expression,
- os.path.expanduser(filename)])
diff --git a/tests/charmhelpers/core/fstab.py b/tests/charmhelpers/core/fstab.py
deleted file mode 100644
index d9fa915..0000000
--- a/tests/charmhelpers/core/fstab.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import io
-import os
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-class Fstab(io.FileIO):
- """This class extends file in order to implement a file reader/writer
- for file `/etc/fstab`
- """
-
- class Entry(object):
- """Entry class represents a non-comment line on the `/etc/fstab` file
- """
- def __init__(self, device, mountpoint, filesystem,
- options, d=0, p=0):
- self.device = device
- self.mountpoint = mountpoint
- self.filesystem = filesystem
-
- if not options:
- options = "defaults"
-
- self.options = options
- self.d = int(d)
- self.p = int(p)
-
- def __eq__(self, o):
- return str(self) == str(o)
-
- def __str__(self):
- return "{} {} {} {} {} {}".format(self.device,
- self.mountpoint,
- self.filesystem,
- self.options,
- self.d,
- self.p)
-
- DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
-
- def __init__(self, path=None):
- if path:
- self._path = path
- else:
- self._path = self.DEFAULT_PATH
- super(Fstab, self).__init__(self._path, 'rb+')
-
- def _hydrate_entry(self, line):
- # NOTE: use split with no arguments to split on any
- # whitespace including tabs
- return Fstab.Entry(*filter(
- lambda x: x not in ('', None),
- line.strip("\n").split()))
-
- @property
- def entries(self):
- self.seek(0)
- for line in self.readlines():
- line = line.decode('us-ascii')
- try:
- if line.strip() and not line.strip().startswith("#"):
- yield self._hydrate_entry(line)
- except ValueError:
- pass
-
- def get_entry_by_attr(self, attr, value):
- for entry in self.entries:
- e_attr = getattr(entry, attr)
- if e_attr == value:
- return entry
- return None
-
- def add_entry(self, entry):
- if self.get_entry_by_attr('device', entry.device):
- return False
-
- self.write((str(entry) + '\n').encode('us-ascii'))
- self.truncate()
- return entry
-
- def remove_entry(self, entry):
- self.seek(0)
-
- lines = [l.decode('us-ascii') for l in self.readlines()]
-
- found = False
- for index, line in enumerate(lines):
- if line.strip() and not line.strip().startswith("#"):
- if self._hydrate_entry(line) == entry:
- found = True
- break
-
- if not found:
- return False
-
- lines.remove(line)
-
- self.seek(0)
- self.write(''.join(lines).encode('us-ascii'))
- self.truncate()
- return True
-
- @classmethod
- def remove_by_mountpoint(cls, mountpoint, path=None):
- fstab = cls(path=path)
- entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
- if entry:
- return fstab.remove_entry(entry)
- return False
-
- @classmethod
- def add(cls, device, mountpoint, filesystem, options=None, path=None):
- return cls(path=path).add_entry(Fstab.Entry(device,
- mountpoint, filesystem,
- options=options))
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
deleted file mode 100644
index 6880007..0000000
--- a/tests/charmhelpers/core/hookenv.py
+++ /dev/null
@@ -1,1353 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"Interactions with the Juju environment"
-# Copyright 2013 Canonical Ltd.
-#
-# Authors:
-# Charm Helpers Developers <juju@lists.ubuntu.com>
-
-from __future__ import print_function
-import copy
-from distutils.version import LooseVersion
-from functools import wraps
-from collections import namedtuple
-import glob
-import os
-import json
-import yaml
-import re
-import subprocess
-import sys
-import errno
-import tempfile
-from subprocess import CalledProcessError
-
-import six
-if not six.PY3:
- from UserDict import UserDict
-else:
- from collections import UserDict
-
-
-CRITICAL = "CRITICAL"
-ERROR = "ERROR"
-WARNING = "WARNING"
-INFO = "INFO"
-DEBUG = "DEBUG"
-TRACE = "TRACE"
-MARKER = object()
-
-cache = {}
-
-
-def cached(func):
- """Cache return values for multiple executions of func + args
-
- For example::
-
- @cached
- def unit_get(attribute):
- pass
-
- unit_get('test')
-
- will cache the result of unit_get + 'test' for future calls.
- """
- @wraps(func)
- def wrapper(*args, **kwargs):
- global cache
- key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
- try:
- return cache[key]
- except KeyError:
- pass # Drop out of the exception handler scope.
- res = func(*args, **kwargs)
- cache[key] = res
- return res
- wrapper._wrapped = func
- return wrapper
-
-
-def flush(key):
- """Flushes any entries from function cache where the
- key is found in the function+args """
- flush_list = []
- for item in cache:
- if key in item:
- flush_list.append(item)
- for item in flush_list:
- del cache[item]
-
-
-def log(message, level=None):
- """Write a message to the juju log"""
- command = ['juju-log']
- if level:
- command += ['-l', level]
- if not isinstance(message, six.string_types):
- message = repr(message)
- command += [message]
- # Missing juju-log should not cause failures in unit tests
- # Send log output to stderr
- try:
- subprocess.call(command)
- except OSError as e:
- if e.errno == errno.ENOENT:
- if level:
- message = "{}: {}".format(level, message)
- message = "juju-log: {}".format(message)
- print(message, file=sys.stderr)
- else:
- raise
-
-
-class Serializable(UserDict):
- """Wrapper, an object that can be serialized to yaml or json"""
-
- def __init__(self, obj):
- # wrap the object
- UserDict.__init__(self)
- self.data = obj
-
- def __getattr__(self, attr):
- # See if this object has attribute.
- if attr in ("json", "yaml", "data"):
- return self.__dict__[attr]
- # Check for attribute in wrapped object.
- got = getattr(self.data, attr, MARKER)
- if got is not MARKER:
- return got
- # Proxy to the wrapped object via dict interface.
- try:
- return self.data[attr]
- except KeyError:
- raise AttributeError(attr)
-
- def __getstate__(self):
- # Pickle as a standard dictionary.
- return self.data
-
- def __setstate__(self, state):
- # Unpickle into our wrapper.
- self.data = state
-
- def json(self):
- """Serialize the object to json"""
- return json.dumps(self.data)
-
- def yaml(self):
- """Serialize the object to yaml"""
- return yaml.dump(self.data)
-
-
-def execution_environment():
- """A convenient bundling of the current execution context"""
- context = {}
- context['conf'] = config()
- if relation_id():
- context['reltype'] = relation_type()
- context['relid'] = relation_id()
- context['rel'] = relation_get()
- context['unit'] = local_unit()
- context['rels'] = relations()
- context['env'] = os.environ
- return context
-
-
-def in_relation_hook():
- """Determine whether we're running in a relation hook"""
- return 'JUJU_RELATION' in os.environ
-
-
-def relation_type():
- """The scope for the current relation hook"""
- return os.environ.get('JUJU_RELATION', None)
-
-
-@cached
-def relation_id(relation_name=None, service_or_unit=None):
- """The relation ID for the current or a specified relation"""
- if not relation_name and not service_or_unit:
- return os.environ.get('JUJU_RELATION_ID', None)
- elif relation_name and service_or_unit:
- service_name = service_or_unit.split('/')[0]
- for relid in relation_ids(relation_name):
- remote_service = remote_service_name(relid)
- if remote_service == service_name:
- return relid
- else:
- raise ValueError('Must specify neither or both of relation_name and service_or_unit')
-
-
-def local_unit():
- """Local unit ID"""
- return os.environ['JUJU_UNIT_NAME']
-
-
-def remote_unit():
- """The remote unit for the current relation hook"""
- return os.environ.get('JUJU_REMOTE_UNIT', None)
-
-
-def application_name():
- """
- The name of the deployed application this unit belongs to.
- """
- return local_unit().split('/')[0]
-
-
-def service_name():
- """
- .. deprecated:: 0.19.1
- Alias for :func:`application_name`.
- """
- return application_name()
-
-
-def model_name():
- """
- Name of the model that this unit is deployed in.
- """
- return os.environ['JUJU_MODEL_NAME']
-
-
-def model_uuid():
- """
- UUID of the model that this unit is deployed in.
- """
- return os.environ['JUJU_MODEL_UUID']
-
-
-def principal_unit():
- """Returns the principal unit of this unit, otherwise None"""
- # Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
- principal_unit = os.environ.get('JUJU_PRINCIPAL_UNIT', None)
- # If it's empty, then this unit is the principal
- if principal_unit == '':
- return os.environ['JUJU_UNIT_NAME']
- elif principal_unit is not None:
- return principal_unit
- # For Juju 2.1 and below, let's try work out the principle unit by
- # the various charms' metadata.yaml.
- for reltype in relation_types():
- for rid in relation_ids(reltype):
- for unit in related_units(rid):
- md = _metadata_unit(unit)
- if not md:
- continue
- subordinate = md.pop('subordinate', None)
- if not subordinate:
- return unit
- return None
-
-
-@cached
-def remote_service_name(relid=None):
- """The remote service name for a given relation-id (or the current relation)"""
- if relid is None:
- unit = remote_unit()
- else:
- units = related_units(relid)
- unit = units[0] if units else None
- return unit.split('/')[0] if unit else None
-
-
-def hook_name():
- """The name of the currently executing hook"""
- return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0]))
-
-
-class Config(dict):
- """A dictionary representation of the charm's config.yaml, with some
- extra features:
-
- - See which values in the dictionary have changed since the previous hook.
- - For values that have changed, see what the previous value was.
- - Store arbitrary data for use in a later hook.
-
- NOTE: Do not instantiate this object directly - instead call
- ``hookenv.config()``, which will return an instance of :class:`Config`.
-
- Example usage::
-
- >>> # inside a hook
- >>> from charmhelpers.core import hookenv
- >>> config = hookenv.config()
- >>> config['foo']
- 'bar'
- >>> # store a new key/value for later use
- >>> config['mykey'] = 'myval'
-
-
- >>> # user runs `juju set mycharm foo=baz`
- >>> # now we're inside subsequent config-changed hook
- >>> config = hookenv.config()
- >>> config['foo']
- 'baz'
- >>> # test to see if this val has changed since last hook
- >>> config.changed('foo')
- True
- >>> # what was the previous value?
- >>> config.previous('foo')
- 'bar'
- >>> # keys/values that we add are preserved across hooks
- >>> config['mykey']
- 'myval'
-
- """
- CONFIG_FILE_NAME = '.juju-persistent-config'
-
- def __init__(self, *args, **kw):
- super(Config, self).__init__(*args, **kw)
- self.implicit_save = True
- self._prev_dict = None
- self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
- if os.path.exists(self.path) and os.stat(self.path).st_size:
- self.load_previous()
- atexit(self._implicit_save)
-
- def load_previous(self, path=None):
- """Load previous copy of config from disk.
-
- In normal usage you don't need to call this method directly - it
- is called automatically at object initialization.
-
- :param path:
-
- File path from which to load the previous config. If `None`,
- config is loaded from the default location. If `path` is
- specified, subsequent `save()` calls will write to the same
- path.
-
- """
- self.path = path or self.path
- with open(self.path) as f:
- try:
- self._prev_dict = json.load(f)
- except ValueError as e:
- log('Unable to parse previous config data - {}'.format(str(e)),
- level=ERROR)
- for k, v in copy.deepcopy(self._prev_dict).items():
- if k not in self:
- self[k] = v
-
- def changed(self, key):
- """Return True if the current value for this key is different from
- the previous value.
-
- """
- if self._prev_dict is None:
- return True
- return self.previous(key) != self.get(key)
-
- def previous(self, key):
- """Return previous value for this key, or None if there
- is no previous value.
-
- """
- if self._prev_dict:
- return self._prev_dict.get(key)
- return None
-
- def save(self):
- """Save this config to disk.
-
- If the charm is using the :mod:`Services Framework <services.base>`
- or :meth:'@hook <Hooks.hook>' decorator, this
- is called automatically at the end of successful hook execution.
- Otherwise, it should be called directly by user code.
-
- To disable automatic saves, set ``implicit_save=False`` on this
- instance.
-
- """
- with open(self.path, 'w') as f:
- os.fchmod(f.fileno(), 0o600)
- json.dump(self, f)
-
- def _implicit_save(self):
- if self.implicit_save:
- self.save()
-
-
-_cache_config = None
-
-
-def config(scope=None):
- """
- Get the juju charm configuration (scope==None) or individual key,
- (scope=str). The returned value is a Python data structure loaded as
- JSON from the Juju config command.
-
- :param scope: If set, return the value for the specified key.
- :type scope: Optional[str]
- :returns: Either the whole config as a Config, or a key from it.
- :rtype: Any
- """
- global _cache_config
- config_cmd_line = ['config-get', '--all', '--format=json']
- try:
- # JSON Decode Exception for Python3.5+
- exc_json = json.decoder.JSONDecodeError
- except AttributeError:
- # JSON Decode Exception for Python2.7 through Python3.4
- exc_json = ValueError
- try:
- if _cache_config is None:
- config_data = json.loads(
- subprocess.check_output(config_cmd_line).decode('UTF-8'))
- _cache_config = Config(config_data)
- if scope is not None:
- return _cache_config.get(scope)
- return _cache_config
- except (exc_json, UnicodeDecodeError) as e:
- log('Unable to parse output from config-get: config_cmd_line="{}" '
- 'message="{}"'
- .format(config_cmd_line, str(e)), level=ERROR)
- return None
-
-
-@cached
-def relation_get(attribute=None, unit=None, rid=None):
- """Get relation information"""
- _args = ['relation-get', '--format=json']
- if rid:
- _args.append('-r')
- _args.append(rid)
- _args.append(attribute or '-')
- if unit:
- _args.append(unit)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except CalledProcessError as e:
- if e.returncode == 2:
- return None
- raise
-
-
-def relation_set(relation_id=None, relation_settings=None, **kwargs):
- """Set relation information for the current unit"""
- relation_settings = relation_settings if relation_settings else {}
- relation_cmd_line = ['relation-set']
- accepts_file = "--file" in subprocess.check_output(
- relation_cmd_line + ["--help"], universal_newlines=True)
- if relation_id is not None:
- relation_cmd_line.extend(('-r', relation_id))
- settings = relation_settings.copy()
- settings.update(kwargs)
- for key, value in settings.items():
- # Force value to be a string: it always should, but some call
- # sites pass in things like dicts or numbers.
- if value is not None:
- settings[key] = "{}".format(value)
- if accepts_file:
- # --file was introduced in Juju 1.23.2. Use it by default if
- # available, since otherwise we'll break if the relation data is
- # too big. Ideally we should tell relation-set to read the data from
- # stdin, but that feature is broken in 1.23.2: Bug #1454678.
- with tempfile.NamedTemporaryFile(delete=False) as settings_file:
- settings_file.write(yaml.safe_dump(settings).encode("utf-8"))
- subprocess.check_call(
- relation_cmd_line + ["--file", settings_file.name])
- os.remove(settings_file.name)
- else:
- for key, value in settings.items():
- if value is None:
- relation_cmd_line.append('{}='.format(key))
- else:
- relation_cmd_line.append('{}={}'.format(key, value))
- subprocess.check_call(relation_cmd_line)
- # Flush cache of any relation-gets for local unit
- flush(local_unit())
-
-
-def relation_clear(r_id=None):
- ''' Clears any relation data already set on relation r_id '''
- settings = relation_get(rid=r_id,
- unit=local_unit())
- for setting in settings:
- if setting not in ['public-address', 'private-address']:
- settings[setting] = None
- relation_set(relation_id=r_id,
- **settings)
-
-
-@cached
-def relation_ids(reltype=None):
- """A list of relation_ids"""
- reltype = reltype or relation_type()
- relid_cmd_line = ['relation-ids', '--format=json']
- if reltype is not None:
- relid_cmd_line.append(reltype)
- return json.loads(
- subprocess.check_output(relid_cmd_line).decode('UTF-8')) or []
- return []
-
-
-@cached
-def related_units(relid=None):
- """A list of related units"""
- relid = relid or relation_id()
- units_cmd_line = ['relation-list', '--format=json']
- if relid is not None:
- units_cmd_line.extend(('-r', relid))
- return json.loads(
- subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
-
-
-@cached
-def relation_for_unit(unit=None, rid=None):
- """Get the json represenation of a unit's relation"""
- unit = unit or remote_unit()
- relation = relation_get(unit=unit, rid=rid)
- for key in relation:
- if key.endswith('-list'):
- relation[key] = relation[key].split()
- relation['__unit__'] = unit
- return relation
-
-
-@cached
-def relations_for_id(relid=None):
- """Get relations of a specific relation ID"""
- relation_data = []
- relid = relid or relation_ids()
- for unit in related_units(relid):
- unit_data = relation_for_unit(unit, relid)
- unit_data['__relid__'] = relid
- relation_data.append(unit_data)
- return relation_data
-
-
-@cached
-def relations_of_type(reltype=None):
- """Get relations of a specific type"""
- relation_data = []
- reltype = reltype or relation_type()
- for relid in relation_ids(reltype):
- for relation in relations_for_id(relid):
- relation['__relid__'] = relid
- relation_data.append(relation)
- return relation_data
-
-
-@cached
-def metadata():
- """Get the current charm metadata.yaml contents as a python object"""
- with open(os.path.join(charm_dir(), 'metadata.yaml')) as md:
- return yaml.safe_load(md)
-
-
-def _metadata_unit(unit):
- """Given the name of a unit (e.g. apache2/0), get the unit charm's
- metadata.yaml. Very similar to metadata() but allows us to inspect
- other units. Unit needs to be co-located, such as a subordinate or
- principal/primary.
-
- :returns: metadata.yaml as a python object.
-
- """
- basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
- unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
- joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
- if not os.path.exists(joineddir):
- return None
- with open(joineddir) as md:
- return yaml.safe_load(md)
-
-
-@cached
-def relation_types():
- """Get a list of relation types supported by this charm"""
- rel_types = []
- md = metadata()
- for key in ('provides', 'requires', 'peers'):
- section = md.get(key)
- if section:
- rel_types.extend(section.keys())
- return rel_types
-
-
-@cached
-def peer_relation_id():
- '''Get the peers relation id if a peers relation has been joined, else None.'''
- md = metadata()
- section = md.get('peers')
- if section:
- for key in section:
- relids = relation_ids(key)
- if relids:
- return relids[0]
- return None
-
-
-@cached
-def relation_to_interface(relation_name):
- """
- Given the name of a relation, return the interface that relation uses.
-
- :returns: The interface name, or ``None``.
- """
- return relation_to_role_and_interface(relation_name)[1]
-
-
-@cached
-def relation_to_role_and_interface(relation_name):
- """
- Given the name of a relation, return the role and the name of the interface
- that relation uses (where role is one of ``provides``, ``requires``, or ``peers``).
-
- :returns: A tuple containing ``(role, interface)``, or ``(None, None)``.
- """
- _metadata = metadata()
- for role in ('provides', 'requires', 'peers'):
- interface = _metadata.get(role, {}).get(relation_name, {}).get('interface')
- if interface:
- return role, interface
- return None, None
-
-
-@cached
-def role_and_interface_to_relations(role, interface_name):
- """
- Given a role and interface name, return a list of relation names for the
- current charm that use that interface under that role (where role is one
- of ``provides``, ``requires``, or ``peers``).
-
- :returns: A list of relation names.
- """
- _metadata = metadata()
- results = []
- for relation_name, relation in _metadata.get(role, {}).items():
- if relation['interface'] == interface_name:
- results.append(relation_name)
- return results
-
-
-@cached
-def interface_to_relations(interface_name):
- """
- Given an interface, return a list of relation names for the current
- charm that use that interface.
-
- :returns: A list of relation names.
- """
- results = []
- for role in ('provides', 'requires', 'peers'):
- results.extend(role_and_interface_to_relations(role, interface_name))
- return results
-
-
-@cached
-def charm_name():
- """Get the name of the current charm as is specified on metadata.yaml"""
- return metadata().get('name')
-
-
-@cached
-def relations():
- """Get a nested dictionary of relation data for all related units"""
- rels = {}
- for reltype in relation_types():
- relids = {}
- for relid in relation_ids(reltype):
- units = {local_unit(): relation_get(unit=local_unit(), rid=relid)}
- for unit in related_units(relid):
- reldata = relation_get(unit=unit, rid=relid)
- units[unit] = reldata
- relids[relid] = units
- rels[reltype] = relids
- return rels
-
-
-@cached
-def is_relation_made(relation, keys='private-address'):
- '''
- Determine whether a relation is established by checking for
- presence of key(s). If a list of keys is provided, they
- must all be present for the relation to be identified as made
- '''
- if isinstance(keys, str):
- keys = [keys]
- for r_id in relation_ids(relation):
- for unit in related_units(r_id):
- context = {}
- for k in keys:
- context[k] = relation_get(k, rid=r_id,
- unit=unit)
- if None not in context.values():
- return True
- return False
-
-
-def _port_op(op_name, port, protocol="TCP"):
- """Open or close a service network port"""
- _args = [op_name]
- icmp = protocol.upper() == "ICMP"
- if icmp:
- _args.append(protocol)
- else:
- _args.append('{}/{}'.format(port, protocol))
- try:
- subprocess.check_call(_args)
- except subprocess.CalledProcessError:
- # Older Juju pre 2.3 doesn't support ICMP
- # so treat it as a no-op if it fails.
- if not icmp:
- raise
-
-
-def open_port(port, protocol="TCP"):
- """Open a service network port"""
- _port_op('open-port', port, protocol)
-
-
-def close_port(port, protocol="TCP"):
- """Close a service network port"""
- _port_op('close-port', port, protocol)
-
-
-def open_ports(start, end, protocol="TCP"):
- """Opens a range of service network ports"""
- _args = ['open-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
- subprocess.check_call(_args)
-
-
-def close_ports(start, end, protocol="TCP"):
- """Close a range of service network ports"""
- _args = ['close-port']
- _args.append('{}-{}/{}'.format(start, end, protocol))
- subprocess.check_call(_args)
-
-
-def opened_ports():
- """Get the opened ports
-
- *Note that this will only show ports opened in a previous hook*
-
- :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
- """
- _args = ['opened-ports', '--format=json']
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
-
-
-@cached
-def unit_get(attribute):
- """Get the unit ID for the remote unit"""
- _args = ['unit-get', '--format=json', attribute]
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-def unit_public_ip():
- """Get this unit's public IP address"""
- return unit_get('public-address')
-
-
-def unit_private_ip():
- """Get this unit's private IP address"""
- return unit_get('private-address')
-
-
-@cached
-def storage_get(attribute=None, storage_id=None):
- """Get storage attributes"""
- _args = ['storage-get', '--format=json']
- if storage_id:
- _args.extend(('-s', storage_id))
- if attribute:
- _args.append(attribute)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
-
-
-@cached
-def storage_list(storage_name=None):
- """List the storage IDs for the unit"""
- _args = ['storage-list', '--format=json']
- if storage_name:
- _args.append(storage_name)
- try:
- return json.loads(subprocess.check_output(_args).decode('UTF-8'))
- except ValueError:
- return None
- except OSError as e:
- import errno
- if e.errno == errno.ENOENT:
- # storage-list does not exist
- return []
- raise
-
-
-class UnregisteredHookError(Exception):
- """Raised when an undefined hook is called"""
- pass
-
-
-class Hooks(object):
- """A convenient handler for hook functions.
-
- Example::
-
- hooks = Hooks()
-
- # register a hook, taking its name from the function name
- @hooks.hook()
- def install():
- pass # your code here
-
- # register a hook, providing a custom hook name
- @hooks.hook("config-changed")
- def config_changed():
- pass # your code here
-
- if __name__ == "__main__":
- # execute a hook based on the name the program is called by
- hooks.execute(sys.argv)
- """
-
- def __init__(self, config_save=None):
- super(Hooks, self).__init__()
- self._hooks = {}
-
- # For unknown reasons, we allow the Hooks constructor to override
- # config().implicit_save.
- if config_save is not None:
- config().implicit_save = config_save
-
- def register(self, name, function):
- """Register a hook"""
- self._hooks[name] = function
-
- def execute(self, args):
- """Execute a registered hook based on args[0]"""
- _run_atstart()
- hook_name = os.path.basename(args[0])
- if hook_name in self._hooks:
- try:
- self._hooks[hook_name]()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- _run_atexit()
- raise
- _run_atexit()
- else:
- raise UnregisteredHookError(hook_name)
-
- def hook(self, *hook_names):
- """Decorator, registering them as hooks"""
- def wrapper(decorated):
- for hook_name in hook_names:
- self.register(hook_name, decorated)
- else:
- self.register(decorated.__name__, decorated)
- if '_' in decorated.__name__:
- self.register(
- decorated.__name__.replace('_', '-'), decorated)
- return decorated
- return wrapper
-
-
-class NoNetworkBinding(Exception):
- pass
-
-
-def charm_dir():
- """Return the root directory of the current charm"""
- d = os.environ.get('JUJU_CHARM_DIR')
- if d is not None:
- return d
- return os.environ.get('CHARM_DIR')
-
-
-@cached
-def action_get(key=None):
- """Gets the value of an action parameter, or all key/value param pairs"""
- cmd = ['action-get']
- if key is not None:
- cmd.append(key)
- cmd.append('--format=json')
- action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
- return action_data
-
-
-def action_set(values):
- """Sets the values to be returned after the action finishes"""
- cmd = ['action-set']
- for k, v in list(values.items()):
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-def action_fail(message):
- """Sets the action status to failed and sets the error message.
-
- The results set by action_set are preserved."""
- subprocess.check_call(['action-fail', message])
-
-
-def action_name():
- """Get the name of the currently executing action."""
- return os.environ.get('JUJU_ACTION_NAME')
-
-
-def action_uuid():
- """Get the UUID of the currently executing action."""
- return os.environ.get('JUJU_ACTION_UUID')
-
-
-def action_tag():
- """Get the tag for the currently executing action."""
- return os.environ.get('JUJU_ACTION_TAG')
-
-
-def status_set(workload_state, message):
- """Set the workload state with a message
-
- Use status-set to set the workload state with a message which is visible
- to the user via juju status. If the status-set command is not found then
- assume this is juju < 1.23 and juju-log the message unstead.
-
- workload_state -- valid juju workload state.
- message -- status update message
- """
- valid_states = ['maintenance', 'blocked', 'waiting', 'active']
- if workload_state not in valid_states:
- raise ValueError(
- '{!r} is not a valid workload state'.format(workload_state)
- )
- cmd = ['status-set', workload_state, message]
- try:
- ret = subprocess.call(cmd)
- if ret == 0:
- return
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'status-set failed: {} {}'.format(workload_state,
- message)
- log(log_message, level='INFO')
-
-
-def status_get():
- """Retrieve the previously set juju workload state and message
-
- If the status-get command is not found then assume this is juju < 1.23 and
- return 'unknown', ""
-
- """
- cmd = ['status-get', "--format=json", "--include-data"]
- try:
- raw_status = subprocess.check_output(cmd)
- except OSError as e:
- if e.errno == errno.ENOENT:
- return ('unknown', "")
- else:
- raise
- else:
- status = json.loads(raw_status.decode("UTF-8"))
- return (status["status"], status["message"])
-
-
-def translate_exc(from_exc, to_exc):
- def inner_translate_exc1(f):
- @wraps(f)
- def inner_translate_exc2(*args, **kwargs):
- try:
- return f(*args, **kwargs)
- except from_exc:
- raise to_exc
-
- return inner_translate_exc2
-
- return inner_translate_exc1
-
-
-def application_version_set(version):
- """Charm authors may trigger this command from any hook to output what
- version of the application is running. This could be a package version,
- for instance postgres version 9.5. It could also be a build number or
- version control revision identifier, for instance git sha 6fb7ba68. """
-
- cmd = ['application-version-set']
- cmd.append(version)
- try:
- subprocess.check_call(cmd)
- except OSError:
- log("Application Version: {}".format(version))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def goal_state():
- """Juju goal state values"""
- cmd = ['goal-state', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def is_leader():
- """Does the current unit hold the juju leadership
-
- Uses juju to determine whether the current unit is the leader of its peers
- """
- cmd = ['is-leader', '--format=json']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_get(attribute=None):
- """Juju leader get value(s)"""
- cmd = ['leader-get', '--format=json'] + [attribute or '-']
- return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def leader_set(settings=None, **kwargs):
- """Juju leader set value(s)"""
- # Don't log secrets.
- # log("Juju leader-set '%s'" % (settings), level=DEBUG)
- cmd = ['leader-set']
- settings = settings or {}
- settings.update(kwargs)
- for k, v in settings.items():
- if v is None:
- cmd.append('{}='.format(k))
- else:
- cmd.append('{}={}'.format(k, v))
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_register(ptype, klass, pid):
- """ is used while a hook is running to let Juju know that a
- payload has been started."""
- cmd = ['payload-register']
- for x in [ptype, klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_unregister(klass, pid):
- """ is used while a hook is running to let Juju know
- that a payload has been manually stopped. The <class> and <id> provided
- must match a payload that has been previously registered with juju using
- payload-register."""
- cmd = ['payload-unregister']
- for x in [klass, pid]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def payload_status_set(klass, pid, status):
- """is used to update the current status of a registered payload.
- The <class> and <id> provided must match a payload that has been previously
- registered with juju using payload-register. The <status> must be one of the
- follow: starting, started, stopping, stopped"""
- cmd = ['payload-status-set']
- for x in [klass, pid, status]:
- cmd.append(x)
- subprocess.check_call(cmd)
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def resource_get(name):
- """used to fetch the resource path of the given name.
-
- <name> must match a name of defined resource in metadata.yaml
-
- returns either a path or False if resource not available
- """
- if not name:
- return False
-
- cmd = ['resource-get', name]
- try:
- return subprocess.check_output(cmd).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
-
-
-@cached
-def juju_version():
- """Full version string (eg. '1.23.3.1-trusty-amd64')"""
- # Per https://bugs.launchpad.net/juju-core/+bug/1455368/comments/1
- jujud = glob.glob('/var/lib/juju/tools/machine-*/jujud')[0]
- return subprocess.check_output([jujud, 'version'],
- universal_newlines=True).strip()
-
-
-def has_juju_version(minimum_version):
- """Return True if the Juju version is at least the provided version"""
- return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
-
-
-_atexit = []
-_atstart = []
-
-
-def atstart(callback, *args, **kwargs):
- '''Schedule a callback to run before the main hook.
-
- Callbacks are run in the order they were added.
-
- This is useful for modules and classes to perform initialization
- and inject behavior. In particular:
-
- - Run common code before all of your hooks, such as logging
- the hook name or interesting relation data.
- - Defer object or module initialization that requires a hook
- context until we know there actually is a hook context,
- making testing easier.
- - Rather than requiring charm authors to include boilerplate to
- invoke your helper's behavior, have it run automatically if
- your object is instantiated or module imported.
-
- This is not at all useful after your hook framework as been launched.
- '''
- global _atstart
- _atstart.append((callback, args, kwargs))
-
-
-def atexit(callback, *args, **kwargs):
- '''Schedule a callback to run on successful hook completion.
-
- Callbacks are run in the reverse order that they were added.'''
- _atexit.append((callback, args, kwargs))
-
-
-def _run_atstart():
- '''Hook frameworks must invoke this before running the main hook body.'''
- global _atstart
- for callback, args, kwargs in _atstart:
- callback(*args, **kwargs)
- del _atstart[:]
-
-
-def _run_atexit():
- '''Hook frameworks must invoke this after the main hook body has
- successfully completed. Do not invoke it if the hook fails.'''
- global _atexit
- for callback, args, kwargs in reversed(_atexit):
- callback(*args, **kwargs)
- del _atexit[:]
-
-
-@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
-def network_get_primary_address(binding):
- '''
- Deprecated since Juju 2.3; use network_get()
-
- Retrieve the primary network address for a named binding
-
- :param binding: string. The name of a relation of extra-binding
- :return: string. The primary IP address for the named binding
- :raise: NotImplementedError if run on Juju < 2.0
- '''
- cmd = ['network-get', '--primary-address', binding]
- try:
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
- except CalledProcessError as e:
- if 'no network config found for binding' in e.output.decode('UTF-8'):
- raise NoNetworkBinding("No network binding for {}"
- .format(binding))
- else:
- raise
- return response
-
-
-def network_get(endpoint, relation_id=None):
- """
- Retrieve the network details for a relation endpoint
-
- :param endpoint: string. The name of a relation endpoint
- :param relation_id: int. The ID of the relation for the current context.
- :return: dict. The loaded YAML output of the network-get query.
- :raise: NotImplementedError if request not supported by the Juju version.
- """
- if not has_juju_version('2.2'):
- raise NotImplementedError(juju_version()) # earlier versions require --primary-address
- if relation_id and not has_juju_version('2.3'):
- raise NotImplementedError # 2.3 added the -r option
-
- cmd = ['network-get', endpoint, '--format', 'yaml']
- if relation_id:
- cmd.append('-r')
- cmd.append(relation_id)
- response = subprocess.check_output(
- cmd,
- stderr=subprocess.STDOUT).decode('UTF-8').strip()
- return yaml.safe_load(response)
-
-
-def add_metric(*args, **kwargs):
- """Add metric values. Values may be expressed with keyword arguments. For
- metric names containing dashes, these may be expressed as one or more
- 'key=value' positional arguments. May only be called from the collect-metrics
- hook."""
- _args = ['add-metric']
- _kvpairs = []
- _kvpairs.extend(args)
- _kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
- _args.extend(sorted(_kvpairs))
- try:
- subprocess.check_call(_args)
- return
- except EnvironmentError as e:
- if e.errno != errno.ENOENT:
- raise
- log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
- log(log_message, level='INFO')
-
-
-def meter_status():
- """Get the meter status, if running in the meter-status-changed hook."""
- return os.environ.get('JUJU_METER_STATUS')
-
-
-def meter_info():
- """Get the meter status information, if running in the meter-status-changed
- hook."""
- return os.environ.get('JUJU_METER_INFO')
-
-
-def iter_units_for_relation_name(relation_name):
- """Iterate through all units in a relation
-
- Generator that iterates through all the units in a relation and yields
- a named tuple with rid and unit field names.
-
- Usage:
- data = [(u.rid, u.unit)
- for u in iter_units_for_relation_name(relation_name)]
-
- :param relation_name: string relation name
- :yield: Named Tuple with rid and unit field names
- """
- RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
- for rid in relation_ids(relation_name):
- for unit in related_units(rid):
- yield RelatedUnit(rid, unit)
-
-
-def ingress_address(rid=None, unit=None):
- """
- Retrieve the ingress-address from a relation when available.
- Otherwise, return the private-address.
-
- When used on the consuming side of the relation (unit is a remote
- unit), the ingress-address is the IP address that this unit needs
- to use to reach the provided service on the remote unit.
-
- When used on the providing side of the relation (unit == local_unit()),
- the ingress-address is the IP address that is advertised to remote
- units on this relation. Remote units need to use this address to
- reach the local provided service on this unit.
-
- Note that charms may document some other method to use in
- preference to the ingress_address(), such as an address provided
- on a different relation attribute or a service discovery mechanism.
- This allows charms to redirect inbound connections to their peers
- or different applications such as load balancers.
-
- Usage:
- addresses = [ingress_address(rid=u.rid, unit=u.unit)
- for u in iter_units_for_relation_name(relation_name)]
-
- :param rid: string relation id
- :param unit: string unit name
- :side effect: calls relation_get
- :return: string IP address
- """
- settings = relation_get(rid=rid, unit=unit)
- return (settings.get('ingress-address') or
- settings.get('private-address'))
-
-
-def egress_subnets(rid=None, unit=None):
- """
- Retrieve the egress-subnets from a relation.
-
- This function is to be used on the providing side of the
- relation, and provides the ranges of addresses that client
- connections may come from. The result is uninteresting on
- the consuming side of a relation (unit == local_unit()).
-
- Returns a stable list of subnets in CIDR format.
- eg. ['192.168.1.0/24', '2001::F00F/128']
-
- If egress-subnets is not available, falls back to using the published
- ingress-address, or finally private-address.
-
- :param rid: string relation id
- :param unit: string unit name
- :side effect: calls relation_get
- :return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
- """
- def _to_range(addr):
- if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
- addr += '/32'
- elif ':' in addr and '/' not in addr: # IPv6
- addr += '/128'
- return addr
-
- settings = relation_get(rid=rid, unit=unit)
- if 'egress-subnets' in settings:
- return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
- if 'ingress-address' in settings:
- return [_to_range(settings['ingress-address'])]
- if 'private-address' in settings:
- return [_to_range(settings['private-address'])]
- return [] # Should never happen
-
-
-def unit_doomed(unit=None):
- """Determines if the unit is being removed from the model
-
- Requires Juju 2.4.1.
-
- :param unit: string unit name, defaults to local_unit
- :side effect: calls goal_state
- :side effect: calls local_unit
- :side effect: calls has_juju_version
- :return: True if the unit is being removed, already gone, or never existed
- """
- if not has_juju_version("2.4.1"):
- # We cannot risk blindly returning False for 'we don't know',
- # because that could cause data loss; if call sites don't
- # need an accurate answer, they likely don't need this helper
- # at all.
- # goal-state existed in 2.4.0, but did not handle removals
- # correctly until 2.4.1.
- raise NotImplementedError("is_doomed")
- if unit is None:
- unit = local_unit()
- gs = goal_state()
- units = gs.get('units', {})
- if unit not in units:
- return True
- # I don't think 'dead' units ever show up in the goal-state, but
- # check anyway in addition to 'dying'.
- return units[unit]['status'] in ('dying', 'dead')
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
deleted file mode 100644
index e9fd38a..0000000
--- a/tests/charmhelpers/core/host.py
+++ /dev/null
@@ -1,1042 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Tools for working with the host system"""
-# Copyright 2012 Canonical Ltd.
-#
-# Authors:
-# Nick Moffitt <nick.moffitt@canonical.com>
-# Matthew Wedgwood <matthew.wedgwood@canonical.com>
-
-import os
-import re
-import pwd
-import glob
-import grp
-import random
-import string
-import subprocess
-import hashlib
-import functools
-import itertools
-import six
-
-from contextlib import contextmanager
-from collections import OrderedDict
-from .hookenv import log, DEBUG, local_unit
-from .fstab import Fstab
-from charmhelpers.osplatform import get_platform
-
-__platform__ = get_platform()
-if __platform__ == "ubuntu":
- from charmhelpers.core.host_factory.ubuntu import (
- service_available,
- add_new_group,
- lsb_release,
- cmp_pkgrevno,
- CompareHostReleases,
- ) # flake8: noqa -- ignore F401 for this import
-elif __platform__ == "centos":
- from charmhelpers.core.host_factory.centos import (
- service_available,
- add_new_group,
- lsb_release,
- cmp_pkgrevno,
- CompareHostReleases,
- ) # flake8: noqa -- ignore F401 for this import
-
-UPDATEDB_PATH = '/etc/updatedb.conf'
-
-def service_start(service_name, **kwargs):
- """Start a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example stops the ceph-osd service for instance id=4:
-
- service_stop('ceph-osd', id=4)
-
- :param service_name: the name of the service to stop
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- return service('start', service_name, **kwargs)
-
-
-def service_stop(service_name, **kwargs):
- """Stop a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example stops the ceph-osd service for instance id=4:
-
- service_stop('ceph-osd', id=4)
-
- :param service_name: the name of the service to stop
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- return service('stop', service_name, **kwargs)
-
-
-def service_restart(service_name, **kwargs):
- """Restart a system service.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be restarted. The follow-
- ing example restarts the ceph-osd service for instance id=4:
-
- service_restart('ceph-osd', id=4)
-
- :param service_name: the name of the service to restart
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems not allowing additional
- parameters via the commandline (systemd).
- """
- return service('restart', service_name)
-
-
-def service_reload(service_name, restart_on_failure=False, **kwargs):
- """Reload a system service, optionally falling back to restart if
- reload fails.
-
- The specified service name is managed via the system level init system.
- Some init systems (e.g. upstart) require that additional arguments be
- provided in order to directly control service instances whereas other init
- systems allow for addressing instances of a service directly by name (e.g.
- systemd).
-
- The kwargs allow for the additional parameters to be passed to underlying
- init systems for those systems which require/allow for them. For example,
- the ceph-osd upstart script requires the id parameter to be passed along
- in order to identify which running daemon should be reloaded. The follow-
- ing example restarts the ceph-osd service for instance id=4:
-
- service_reload('ceph-osd', id=4)
-
- :param service_name: the name of the service to reload
- :param restart_on_failure: boolean indicating whether to fallback to a
- restart if the reload fails.
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems not allowing additional
- parameters via the commandline (systemd).
- """
- service_result = service('reload', service_name, **kwargs)
- if not service_result and restart_on_failure:
- service_result = service('restart', service_name, **kwargs)
- return service_result
-
-
-def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
- **kwargs):
- """Pause a system service.
-
- Stop it, and prevent it from starting again at boot.
-
- :param service_name: the name of the service to pause
- :param init_dir: path to the upstart init directory
- :param initd_dir: path to the sysv init directory
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for init systems which do not support
- key=value arguments via the commandline.
- """
- stopped = True
- if service_running(service_name, **kwargs):
- stopped = service_stop(service_name, **kwargs)
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('disable', service_name)
- service('mask', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- with open(override_path, 'w') as fh:
- fh.write("manual\n")
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "disable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- return stopped
-
-
-def service_resume(service_name, init_dir="/etc/init",
- initd_dir="/etc/init.d", **kwargs):
- """Resume a system service.
-
- Reenable starting again at boot. Start the service.
-
- :param service_name: the name of the service to resume
- :param init_dir: the path to the init dir
- :param initd dir: the path to the initd dir
- :param **kwargs: additional parameters to pass to the init system when
- managing services. These will be passed as key=value
- parameters to the init system's commandline. kwargs
- are ignored for systemd enabled systems.
- """
- upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
- sysv_file = os.path.join(initd_dir, service_name)
- if init_is_systemd():
- service('unmask', service_name)
- service('enable', service_name)
- elif os.path.exists(upstart_file):
- override_path = os.path.join(
- init_dir, '{}.override'.format(service_name))
- if os.path.exists(override_path):
- os.unlink(override_path)
- elif os.path.exists(sysv_file):
- subprocess.check_call(["update-rc.d", service_name, "enable"])
- else:
- raise ValueError(
- "Unable to detect {0} as SystemD, Upstart {1} or"
- " SysV {2}".format(
- service_name, upstart_file, sysv_file))
- started = service_running(service_name, **kwargs)
-
- if not started:
- started = service_start(service_name, **kwargs)
- return started
-
-
-def service(action, service_name, **kwargs):
- """Control a system service.
-
- :param action: the action to take on the service
- :param service_name: the name of the service to perform th action on
- :param **kwargs: additional params to be passed to the service command in
- the form of key=value.
- """
- if init_is_systemd():
- cmd = ['systemctl', action, service_name]
- else:
- cmd = ['service', service_name, action]
- for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
- cmd.append(parameter)
- return subprocess.call(cmd) == 0
-
-
-_UPSTART_CONF = "/etc/init/{}.conf"
-_INIT_D_CONF = "/etc/init.d/{}"
-
-
-def service_running(service_name, **kwargs):
- """Determine whether a system service is running.
-
- :param service_name: the name of the service
- :param **kwargs: additional args to pass to the service command. This is
- used to pass additional key=value arguments to the
- service command line for managing specific instance
- units (e.g. service ceph-osd status id=2). The kwargs
- are ignored in systemd services.
- """
- if init_is_systemd():
- return service('is-active', service_name)
- else:
- if os.path.exists(_UPSTART_CONF.format(service_name)):
- try:
- cmd = ['status', service_name]
- for key, value in six.iteritems(kwargs):
- parameter = '%s=%s' % (key, value)
- cmd.append(parameter)
- output = subprocess.check_output(cmd,
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError:
- return False
- else:
- # This works for upstart scripts where the 'service' command
- # returns a consistent string to represent running
- # 'start/running'
- if ("start/running" in output or
- "is running" in output or
- "up and running" in output):
- return True
- elif os.path.exists(_INIT_D_CONF.format(service_name)):
- # Check System V scripts init script return codes
- return service('status', service_name)
- return False
-
-
-SYSTEMD_SYSTEM = '/run/systemd/system'
-
-
-def init_is_systemd():
- """Return True if the host system uses systemd, False otherwise."""
- if lsb_release()['DISTRIB_CODENAME'] == 'trusty':
- return False
- return os.path.isdir(SYSTEMD_SYSTEM)
-
-
-def adduser(username, password=None, shell='/bin/bash',
- system_user=False, primary_group=None,
- secondary_groups=None, uid=None, home_dir=None):
- """Add a user to the system.
-
- Will log but otherwise succeed if the user already exists.
-
- :param str username: Username to create
- :param str password: Password for user; if ``None``, create a system user
- :param str shell: The default shell for the user
- :param bool system_user: Whether to create a login or system user
- :param str primary_group: Primary group for user; defaults to username
- :param list secondary_groups: Optional list of additional groups
- :param int uid: UID for user being created
- :param str home_dir: Home directory for user
-
- :returns: The password database entry struct, as returned by `pwd.getpwnam`
- """
- try:
- user_info = pwd.getpwnam(username)
- log('user {0} already exists!'.format(username))
- if uid:
- user_info = pwd.getpwuid(int(uid))
- log('user with uid {0} already exists!'.format(uid))
- except KeyError:
- log('creating user {0}'.format(username))
- cmd = ['useradd']
- if uid:
- cmd.extend(['--uid', str(uid)])
- if home_dir:
- cmd.extend(['--home', str(home_dir)])
- if system_user or password is None:
- cmd.append('--system')
- else:
- cmd.extend([
- '--create-home',
- '--shell', shell,
- '--password', password,
- ])
- if not primary_group:
- try:
- grp.getgrnam(username)
- primary_group = username # avoid "group exists" error
- except KeyError:
- pass
- if primary_group:
- cmd.extend(['-g', primary_group])
- if secondary_groups:
- cmd.extend(['-G', ','.join(secondary_groups)])
- cmd.append(username)
- subprocess.check_call(cmd)
- user_info = pwd.getpwnam(username)
- return user_info
-
-
-def user_exists(username):
- """Check if a user exists"""
- try:
- pwd.getpwnam(username)
- user_exists = True
- except KeyError:
- user_exists = False
- return user_exists
-
-
-def uid_exists(uid):
- """Check if a uid exists"""
- try:
- pwd.getpwuid(uid)
- uid_exists = True
- except KeyError:
- uid_exists = False
- return uid_exists
-
-
-def group_exists(groupname):
- """Check if a group exists"""
- try:
- grp.getgrnam(groupname)
- group_exists = True
- except KeyError:
- group_exists = False
- return group_exists
-
-
-def gid_exists(gid):
- """Check if a gid exists"""
- try:
- grp.getgrgid(gid)
- gid_exists = True
- except KeyError:
- gid_exists = False
- return gid_exists
-
-
-def add_group(group_name, system_group=False, gid=None):
- """Add a group to the system
-
- Will log but otherwise succeed if the group already exists.
-
- :param str group_name: group to create
- :param bool system_group: Create system group
- :param int gid: GID for user being created
-
- :returns: The password database entry struct, as returned by `grp.getgrnam`
- """
- try:
- group_info = grp.getgrnam(group_name)
- log('group {0} already exists!'.format(group_name))
- if gid:
- group_info = grp.getgrgid(gid)
- log('group with gid {0} already exists!'.format(gid))
- except KeyError:
- log('creating group {0}'.format(group_name))
- add_new_group(group_name, system_group, gid)
- group_info = grp.getgrnam(group_name)
- return group_info
-
-
-def add_user_to_group(username, group):
- """Add a user to a group"""
- cmd = ['gpasswd', '-a', username, group]
- log("Adding user {} to group {}".format(username, group))
- subprocess.check_call(cmd)
-
-
-def chage(username, lastday=None, expiredate=None, inactive=None,
- mindays=None, maxdays=None, root=None, warndays=None):
- """Change user password expiry information
-
- :param str username: User to update
- :param str lastday: Set when password was changed in YYYY-MM-DD format
- :param str expiredate: Set when user's account will no longer be
- accessible in YYYY-MM-DD format.
- -1 will remove an account expiration date.
- :param str inactive: Set the number of days of inactivity after a password
- has expired before the account is locked.
- -1 will remove an account's inactivity.
- :param str mindays: Set the minimum number of days between password
- changes to MIN_DAYS.
- 0 indicates the password can be changed anytime.
- :param str maxdays: Set the maximum number of days during which a
- password is valid.
- -1 as MAX_DAYS will remove checking maxdays
- :param str root: Apply changes in the CHROOT_DIR directory
- :param str warndays: Set the number of days of warning before a password
- change is required
- :raises subprocess.CalledProcessError: if call to chage fails
- """
- cmd = ['chage']
- if root:
- cmd.extend(['--root', root])
- if lastday:
- cmd.extend(['--lastday', lastday])
- if expiredate:
- cmd.extend(['--expiredate', expiredate])
- if inactive:
- cmd.extend(['--inactive', inactive])
- if mindays:
- cmd.extend(['--mindays', mindays])
- if maxdays:
- cmd.extend(['--maxdays', maxdays])
- if warndays:
- cmd.extend(['--warndays', warndays])
- cmd.append(username)
- subprocess.check_call(cmd)
-
-remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
-
-def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
- """Replicate the contents of a path"""
- options = options or ['--delete', '--executability']
- cmd = ['/usr/bin/rsync', flags]
- if timeout:
- cmd = ['timeout', str(timeout)] + cmd
- cmd.extend(options)
- cmd.append(from_path)
- cmd.append(to_path)
- log(" ".join(cmd))
- return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
-
-
-def symlink(source, destination):
- """Create a symbolic link"""
- log("Symlinking {} as {}".format(source, destination))
- cmd = [
- 'ln',
- '-sf',
- source,
- destination,
- ]
- subprocess.check_call(cmd)
-
-
-def mkdir(path, owner='root', group='root', perms=0o555, force=False):
- """Create a directory"""
- log("Making dir {} {}:{} {:o}".format(path, owner, group,
- perms))
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- realpath = os.path.abspath(path)
- path_exists = os.path.exists(realpath)
- if path_exists and force:
- if not os.path.isdir(realpath):
- log("Removing non-directory file {} prior to mkdir()".format(path))
- os.unlink(realpath)
- os.makedirs(realpath, perms)
- elif not path_exists:
- os.makedirs(realpath, perms)
- os.chown(realpath, uid, gid)
- os.chmod(realpath, perms)
-
-
-def write_file(path, content, owner='root', group='root', perms=0o444):
- """Create or overwrite a file with the contents of a byte string."""
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- # lets see if we can grab the file and compare the context, to avoid doing
- # a write.
- existing_content = None
- existing_uid, existing_gid = None, None
- try:
- with open(path, 'rb') as target:
- existing_content = target.read()
- stat = os.stat(path)
- existing_uid, existing_gid = stat.st_uid, stat.st_gid
- except:
- pass
- if content != existing_content:
- log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
- level=DEBUG)
- with open(path, 'wb') as target:
- os.fchown(target.fileno(), uid, gid)
- os.fchmod(target.fileno(), perms)
- if six.PY3 and isinstance(content, six.string_types):
- content = content.encode('UTF-8')
- target.write(content)
- return
- # the contents were the same, but we might still need to change the
- # ownership.
- if existing_uid != uid:
- log("Changing uid on already existing content: {} -> {}"
- .format(existing_uid, uid), level=DEBUG)
- os.chown(path, uid, -1)
- if existing_gid != gid:
- log("Changing gid on already existing content: {} -> {}"
- .format(existing_gid, gid), level=DEBUG)
- os.chown(path, -1, gid)
-
-
-def fstab_remove(mp):
- """Remove the given mountpoint entry from /etc/fstab"""
- return Fstab.remove_by_mountpoint(mp)
-
-
-def fstab_add(dev, mp, fs, options=None):
- """Adds the given device entry to the /etc/fstab file"""
- return Fstab.add(dev, mp, fs, options=options)
-
-
-def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
- """Mount a filesystem at a particular mountpoint"""
- cmd_args = ['mount']
- if options is not None:
- cmd_args.extend(['-o', options])
- cmd_args.extend([device, mountpoint])
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
- return False
-
- if persist:
- return fstab_add(device, mountpoint, filesystem, options=options)
- return True
-
-
-def umount(mountpoint, persist=False):
- """Unmount a filesystem"""
- cmd_args = ['umount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
-
- if persist:
- return fstab_remove(mountpoint)
- return True
-
-
-def mounts():
- """Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
- with open('/proc/mounts') as f:
- # [['/mount/point','/dev/path'],[...]]
- system_mounts = [m[1::-1] for m in [l.strip().split()
- for l in f.readlines()]]
- return system_mounts
-
-
-def fstab_mount(mountpoint):
- """Mount filesystem using fstab"""
- cmd_args = ['mount', mountpoint]
- try:
- subprocess.check_output(cmd_args)
- except subprocess.CalledProcessError as e:
- log('Error unmounting {}\n{}'.format(mountpoint, e.output))
- return False
- return True
-
-
-def file_hash(path, hash_type='md5'):
- """Generate a hash checksum of the contents of 'path' or None if not found.
-
- :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- """
- if os.path.exists(path):
- h = getattr(hashlib, hash_type)()
- with open(path, 'rb') as source:
- h.update(source.read())
- return h.hexdigest()
- else:
- return None
-
-
-def path_hash(path):
- """Generate a hash checksum of all files matching 'path'. Standard
- wildcards like '*' and '?' are supported, see documentation for the 'glob'
- module for more information.
-
- :return: dict: A { filename: hash } dictionary for all matched files.
- Empty if none found.
- """
- return {
- filename: file_hash(filename)
- for filename in glob.iglob(path)
- }
-
-
-def check_hash(path, checksum, hash_type='md5'):
- """Validate a file using a cryptographic checksum.
-
- :param str checksum: Value of the checksum used to validate the file.
- :param str hash_type: Hash algorithm used to generate `checksum`.
- Can be any hash alrgorithm supported by :mod:`hashlib`,
- such as md5, sha1, sha256, sha512, etc.
- :raises ChecksumError: If the file fails the checksum
-
- """
- actual_checksum = file_hash(path, hash_type)
- if checksum != actual_checksum:
- raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
-
-
-class ChecksumError(ValueError):
- """A class derived from Value error to indicate the checksum failed."""
- pass
-
-
-def restart_on_change(restart_map, stopstart=False, restart_functions=None):
- """Restart services based on configuration files changing
-
- This function is used a decorator, for example::
-
- @restart_on_change({
- '/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
- '/etc/apache/sites-enabled/*': [ 'apache2' ]
- })
- def config_changed():
- pass # your code here
-
- In this example, the cinder-api and cinder-volume services
- would be restarted if /etc/ceph/ceph.conf is changed by the
- ceph_client_changed function. The apache2 service would be
- restarted if any file matching the pattern got changed, created
- or removed. Standard wildcards are supported, see documentation
- for the 'glob' module for more information.
-
- @param restart_map: {path_file_name: [service_name, ...]
- @param stopstart: DEFAULT false; whether to stop, start OR restart
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result from decorated function
- """
- def wrap(f):
- @functools.wraps(f)
- def wrapped_f(*args, **kwargs):
- return restart_on_change_helper(
- (lambda: f(*args, **kwargs)), restart_map, stopstart,
- restart_functions)
- return wrapped_f
- return wrap
-
-
-def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
- restart_functions=None):
- """Helper function to perform the restart_on_change function.
-
- This is provided for decorators to restart services if files described
- in the restart_map have changed after an invocation of lambda_f().
-
- @param lambda_f: function to call.
- @param restart_map: {file: [service, ...]}
- @param stopstart: whether to stop, start or restart a service
- @param restart_functions: nonstandard functions to use to restart services
- {svc: func, ...}
- @returns result of lambda_f()
- """
- if restart_functions is None:
- restart_functions = {}
- checksums = {path: path_hash(path) for path in restart_map}
- r = lambda_f()
- # create a list of lists of the services to restart
- restarts = [restart_map[path]
- for path in restart_map
- if path_hash(path) != checksums[path]]
- # create a flat list of ordered services without duplicates from lists
- services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
- if services_list:
- actions = ('stop', 'start') if stopstart else ('restart',)
- for service_name in services_list:
- if service_name in restart_functions:
- restart_functions[service_name](service_name)
- else:
- for action in actions:
- service(action, service_name)
- return r
-
-
-def pwgen(length=None):
- """Generate a random pasword."""
- if length is None:
- # A random length is ok to use a weak PRNG
- length = random.choice(range(35, 45))
- alphanumeric_chars = [
- l for l in (string.ascii_letters + string.digits)
- if l not in 'l0QD1vAEIOUaeiou']
- # Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
- # actual password
- random_generator = random.SystemRandom()
- random_chars = [
- random_generator.choice(alphanumeric_chars) for _ in range(length)]
- return(''.join(random_chars))
-
-
-def is_phy_iface(interface):
- """Returns True if interface is not virtual, otherwise False."""
- if interface:
- sys_net = '/sys/class/net'
- if os.path.isdir(sys_net):
- for iface in glob.glob(os.path.join(sys_net, '*')):
- if '/virtual/' in os.path.realpath(iface):
- continue
-
- if interface == os.path.basename(iface):
- return True
-
- return False
-
-
-def get_bond_master(interface):
- """Returns bond master if interface is bond slave otherwise None.
-
- NOTE: the provided interface is expected to be physical
- """
- if interface:
- iface_path = '/sys/class/net/%s' % (interface)
- if os.path.exists(iface_path):
- if '/virtual/' in os.path.realpath(iface_path):
- return None
-
- master = os.path.join(iface_path, 'master')
- if os.path.exists(master):
- master = os.path.realpath(master)
- # make sure it is a bond master
- if os.path.exists(os.path.join(master, 'bonding')):
- return os.path.basename(master)
-
- return None
-
-
-def list_nics(nic_type=None):
- """Return a list of nics of given type(s)"""
- if isinstance(nic_type, six.string_types):
- int_types = [nic_type]
- else:
- int_types = nic_type
-
- interfaces = []
- if nic_type:
- for int_type in int_types:
- cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- ip_output = ip_output.split('\n')
- ip_output = (line for line in ip_output if line)
- for line in ip_output:
- if line.split()[1].startswith(int_type):
- matched = re.search('.*: (' + int_type +
- r'[0-9]+\.[0-9]+)@.*', line)
- if matched:
- iface = matched.groups()[0]
- else:
- iface = line.split()[1].replace(":", "")
-
- if iface not in interfaces:
- interfaces.append(iface)
- else:
- cmd = ['ip', 'a']
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- ip_output = (line.strip() for line in ip_output if line)
-
- key = re.compile('^[0-9]+:\s+(.+):')
- for line in ip_output:
- matched = re.search(key, line)
- if matched:
- iface = matched.group(1)
- iface = iface.partition("@")[0]
- if iface not in interfaces:
- interfaces.append(iface)
-
- return interfaces
-
-
-def set_nic_mtu(nic, mtu):
- """Set the Maximum Transmission Unit (MTU) on a network interface."""
- cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
- subprocess.check_call(cmd)
-
-
-def get_nic_mtu(nic):
- """Return the Maximum Transmission Unit (MTU) for a network interface."""
- cmd = ['ip', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
- mtu = ""
- for line in ip_output:
- words = line.split()
- if 'mtu' in words:
- mtu = words[words.index("mtu") + 1]
- return mtu
-
-
-def get_nic_hwaddr(nic):
- """Return the Media Access Control (MAC) for a network interface."""
- cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
- ip_output = subprocess.check_output(cmd).decode('UTF-8')
- hwaddr = ""
- words = ip_output.split()
- if 'link/ether' in words:
- hwaddr = words[words.index('link/ether') + 1]
- return hwaddr
-
-
-@contextmanager
-def chdir(directory):
- """Change the current working directory to a different directory for a code
- block and return the previous directory after the block exits. Useful to
- run commands from a specificed directory.
-
- :param str directory: The directory path to change to for this context.
- """
- cur = os.getcwd()
- try:
- yield os.chdir(directory)
- finally:
- os.chdir(cur)
-
-
-def chownr(path, owner, group, follow_links=True, chowntopdir=False):
- """Recursively change user and group ownership of files and directories
- in given path. Doesn't chown path itself by default, only its children.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- :param bool follow_links: Also follow and chown links if True
- :param bool chowntopdir: Also chown path itself if True
- """
- uid = pwd.getpwnam(owner).pw_uid
- gid = grp.getgrnam(group).gr_gid
- if follow_links:
- chown = os.chown
- else:
- chown = os.lchown
-
- if chowntopdir:
- broken_symlink = os.path.lexists(path) and not os.path.exists(path)
- if not broken_symlink:
- chown(path, uid, gid)
- for root, dirs, files in os.walk(path, followlinks=follow_links):
- for name in dirs + files:
- full = os.path.join(root, name)
- broken_symlink = os.path.lexists(full) and not os.path.exists(full)
- if not broken_symlink:
- chown(full, uid, gid)
-
-
-def lchownr(path, owner, group):
- """Recursively change user and group ownership of files and directories
- in a given path, not following symbolic links. See the documentation for
- 'os.lchown' for more information.
-
- :param str path: The string path to start changing ownership.
- :param str owner: The owner string to use when looking up the uid.
- :param str group: The group string to use when looking up the gid.
- """
- chownr(path, owner, group, follow_links=False)
-
-
-def owner(path):
- """Returns a tuple containing the username & groupname owning the path.
-
- :param str path: the string path to retrieve the ownership
- :return tuple(str, str): A (username, groupname) tuple containing the
- name of the user and group owning the path.
- :raises OSError: if the specified path does not exist
- """
- stat = os.stat(path)
- username = pwd.getpwuid(stat.st_uid)[0]
- groupname = grp.getgrgid(stat.st_gid)[0]
- return username, groupname
-
-
-def get_total_ram():
- """The total amount of system RAM in bytes.
-
- This is what is reported by the OS, and may be overcommitted when
- there are multiple containers hosted on the same machine.
- """
- with open('/proc/meminfo', 'r') as f:
- for line in f.readlines():
- if line:
- key, value, unit = line.split()
- if key == 'MemTotal:':
- assert unit == 'kB', 'Unknown unit'
- return int(value) * 1024 # Classic, not KiB.
- raise NotImplementedError()
-
-
-UPSTART_CONTAINER_TYPE = '/run/container_type'
-
-
-def is_container():
- """Determine whether unit is running in a container
-
- @return: boolean indicating if unit is in a container
- """
- if init_is_systemd():
- # Detect using systemd-detect-virt
- return subprocess.call(['systemd-detect-virt',
- '--container']) == 0
- else:
- # Detect using upstart container file marker
- return os.path.exists(UPSTART_CONTAINER_TYPE)
-
-
-def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
- """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
-
- This method has no effect if the path specified by updatedb_path does not
- exist or is not a file.
-
- @param path: string the path to add to the updatedb.conf PRUNEPATHS value
- @param updatedb_path: the path the updatedb.conf file
- """
- if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
- # If the updatedb.conf file doesn't exist then don't attempt to update
- # the file as the package providing mlocate may not be installed on
- # the local system
- return
-
- with open(updatedb_path, 'r+') as f_id:
- updatedb_text = f_id.read()
- output = updatedb(updatedb_text, path)
- f_id.seek(0)
- f_id.write(output)
- f_id.truncate()
-
-
-def updatedb(updatedb_text, new_path):
- lines = [line for line in updatedb_text.split("\n")]
- for i, line in enumerate(lines):
- if line.startswith("PRUNEPATHS="):
- paths_line = line.split("=")[1].replace('"', '')
- paths = paths_line.split(" ")
- if new_path not in paths:
- paths.append(new_path)
- lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
- output = "\n".join(lines)
- return output
-
-
-def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
- """ Modulo distribution
-
- This helper uses the unit number, a modulo value and a constant wait time
- to produce a calculated wait time distribution. This is useful in large
- scale deployments to distribute load during an expensive operation such as
- service restarts.
-
- If you have 1000 nodes that need to restart 100 at a time 1 minute at a
- time:
-
- time.wait(modulo_distribution(modulo=100, wait=60))
- restart()
-
- If you need restarts to happen serially set modulo to the exact number of
- nodes and set a high constant wait time:
-
- time.wait(modulo_distribution(modulo=10, wait=120))
- restart()
-
- @param modulo: int The modulo number creates the group distribution
- @param wait: int The constant time wait value
- @param non_zero_wait: boolean Override unit % modulo == 0,
- return modulo * wait. Used to avoid collisions with
- leader nodes which are often given priority.
- @return: int Calculated time to wait for unit operation
- """
- unit_number = int(local_unit().split('/')[1])
- calculated_wait_time = (unit_number % modulo) * wait
- if non_zero_wait and calculated_wait_time == 0:
- return modulo * wait
- else:
- return calculated_wait_time
diff --git a/tests/charmhelpers/core/host_factory/__init__.py b/tests/charmhelpers/core/host_factory/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tests/charmhelpers/core/host_factory/__init__.py
+++ /dev/null
diff --git a/tests/charmhelpers/core/host_factory/centos.py b/tests/charmhelpers/core/host_factory/centos.py
deleted file mode 100644
index 7781a39..0000000
--- a/tests/charmhelpers/core/host_factory/centos.py
+++ /dev/null
@@ -1,72 +0,0 @@
-import subprocess
-import yum
-import os
-
-from charmhelpers.core.strutils import BasicStringComparator
-
-
-class CompareHostReleases(BasicStringComparator):
- """Provide comparisons of Host releases.
-
- Use in the form of
-
- if CompareHostReleases(release) > 'trusty':
- # do something with mitaka
- """
-
- def __init__(self, item):
- raise NotImplementedError(
- "CompareHostReleases() is not implemented for CentOS")
-
-
-def service_available(service_name):
- # """Determine whether a system service is available."""
- if os.path.isdir('/run/systemd/system'):
- cmd = ['systemctl', 'is-enabled', service_name]
- else:
- cmd = ['service', service_name, 'is-enabled']
- return subprocess.call(cmd) == 0
-
-
-def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['groupadd']
- if gid:
- cmd.extend(['--gid', str(gid)])
- if system_group:
- cmd.append('-r')
- cmd.append(group_name)
- subprocess.check_call(cmd)
-
-
-def lsb_release():
- """Return /etc/os-release in a dict."""
- d = {}
- with open('/etc/os-release', 'r') as lsb:
- for l in lsb:
- s = l.split('=')
- if len(s) != 2:
- continue
- d[s[0].strip()] = s[1].strip()
- return d
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package.
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports YumBase function if the pkgcache argument
- is None.
- """
- if not pkgcache:
- y = yum.YumBase()
- packages = y.doPackageLists()
- pkgcache = {i.Name: i.version for i in packages['installed']}
- pkg = pkgcache[package]
- if pkg > revno:
- return 1
- if pkg < revno:
- return -1
- return 0
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
deleted file mode 100644
index a6d375a..0000000
--- a/tests/charmhelpers/core/host_factory/ubuntu.py
+++ /dev/null
@@ -1,91 +0,0 @@
-import subprocess
-
-from charmhelpers.core.strutils import BasicStringComparator
-
-
-UBUNTU_RELEASES = (
- 'lucid',
- 'maverick',
- 'natty',
- 'oneiric',
- 'precise',
- 'quantal',
- 'raring',
- 'saucy',
- 'trusty',
- 'utopic',
- 'vivid',
- 'wily',
- 'xenial',
- 'yakkety',
- 'zesty',
- 'artful',
- 'bionic',
- 'cosmic',
-)
-
-
-class CompareHostReleases(BasicStringComparator):
- """Provide comparisons of Ubuntu releases.
-
- Use in the form of
-
- if CompareHostReleases(release) > 'trusty':
- # do something with mitaka
- """
- _list = UBUNTU_RELEASES
-
-
-def service_available(service_name):
- """Determine whether a system service is available"""
- try:
- subprocess.check_output(
- ['service', service_name, 'status'],
- stderr=subprocess.STDOUT).decode('UTF-8')
- except subprocess.CalledProcessError as e:
- return b'unrecognized service' not in e.output
- else:
- return True
-
-
-def add_new_group(group_name, system_group=False, gid=None):
- cmd = ['addgroup']
- if gid:
- cmd.extend(['--gid', str(gid)])
- if system_group:
- cmd.append('--system')
- else:
- cmd.extend([
- '--group',
- ])
- cmd.append(group_name)
- subprocess.check_call(cmd)
-
-
-def lsb_release():
- """Return /etc/lsb-release in a dict"""
- d = {}
- with open('/etc/lsb-release', 'r') as lsb:
- for l in lsb:
- k, v = l.split('=')
- d[k.strip()] = v.strip()
- return d
-
-
-def cmp_pkgrevno(package, revno, pkgcache=None):
- """Compare supplied revno with the revno of the installed package.
-
- * 1 => Installed revno is greater than supplied arg
- * 0 => Installed revno is the same as supplied arg
- * -1 => Installed revno is less than supplied arg
-
- This function imports apt_cache function from charmhelpers.fetch if
- the pkgcache argument is None. Be sure to add charmhelpers.fetch if
- you call this function, or pass an apt_pkg.Cache() instance.
- """
- import apt_pkg
- if not pkgcache:
- from charmhelpers.fetch import apt_cache
- pkgcache = apt_cache()
- pkg = pkgcache[package]
- return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
diff --git a/tests/charmhelpers/core/hugepage.py b/tests/charmhelpers/core/hugepage.py
deleted file mode 100644
index 54b5b5e..0000000
--- a/tests/charmhelpers/core/hugepage.py
+++ /dev/null
@@ -1,69 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-from charmhelpers.core import fstab
-from charmhelpers.core import sysctl
-from charmhelpers.core.host import (
- add_group,
- add_user_to_group,
- fstab_mount,
- mkdir,
-)
-from charmhelpers.core.strutils import bytes_from_string
-from subprocess import check_output
-
-
-def hugepage_support(user, group='hugetlb', nr_hugepages=256,
- max_map_count=65536, mnt_point='/run/hugepages/kvm',
- pagesize='2MB', mount=True, set_shmmax=False):
- """Enable hugepages on system.
-
- Args:
- user (str) -- Username to allow access to hugepages to
- group (str) -- Group name to own hugepages
- nr_hugepages (int) -- Number of pages to reserve
- max_map_count (int) -- Number of Virtual Memory Areas a process can own
- mnt_point (str) -- Directory to mount hugepages on
- pagesize (str) -- Size of hugepages
- mount (bool) -- Whether to Mount hugepages
- """
- group_info = add_group(group)
- gid = group_info.gr_gid
- add_user_to_group(user, group)
- if max_map_count < 2 * nr_hugepages:
- max_map_count = 2 * nr_hugepages
- sysctl_settings = {
- 'vm.nr_hugepages': nr_hugepages,
- 'vm.max_map_count': max_map_count,
- 'vm.hugetlb_shm_group': gid,
- }
- if set_shmmax:
- shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
- shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
- if shmmax_minsize > shmmax_current:
- sysctl_settings['kernel.shmmax'] = shmmax_minsize
- sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
- mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
- lfstab = fstab.Fstab()
- fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
- if fstab_entry:
- lfstab.remove_entry(fstab_entry)
- entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
- 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
- lfstab.add_entry(entry)
- if mount:
- fstab_mount(mnt_point)
diff --git a/tests/charmhelpers/core/kernel.py b/tests/charmhelpers/core/kernel.py
deleted file mode 100644
index 2d40452..0000000
--- a/tests/charmhelpers/core/kernel.py
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import re
-import subprocess
-
-from charmhelpers.osplatform import get_platform
-from charmhelpers.core.hookenv import (
- log,
- INFO
-)
-
-__platform__ = get_platform()
-if __platform__ == "ubuntu":
- from charmhelpers.core.kernel_factory.ubuntu import (
- persistent_modprobe,
- update_initramfs,
- ) # flake8: noqa -- ignore F401 for this import
-elif __platform__ == "centos":
- from charmhelpers.core.kernel_factory.centos import (
- persistent_modprobe,
- update_initramfs,
- ) # flake8: noqa -- ignore F401 for this import
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def modprobe(module, persist=True):
- """Load a kernel module and configure for auto-load on reboot."""
- cmd = ['modprobe', module]
-
- log('Loading kernel module %s' % module, level=INFO)
-
- subprocess.check_call(cmd)
- if persist:
- persistent_modprobe(module)
-
-
-def rmmod(module, force=False):
- """Remove a module from the linux kernel"""
- cmd = ['rmmod']
- if force:
- cmd.append('-f')
- cmd.append(module)
- log('Removing kernel module %s' % module, level=INFO)
- return subprocess.check_call(cmd)
-
-
-def lsmod():
- """Shows what kernel modules are currently loaded"""
- return subprocess.check_output(['lsmod'],
- universal_newlines=True)
-
-
-def is_module_loaded(module):
- """Checks if a kernel module is already loaded"""
- matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
- return len(matches) > 0
diff --git a/tests/charmhelpers/core/kernel_factory/__init__.py b/tests/charmhelpers/core/kernel_factory/__init__.py
deleted file mode 100644
index e69de29..0000000
--- a/tests/charmhelpers/core/kernel_factory/__init__.py
+++ /dev/null
diff --git a/tests/charmhelpers/core/kernel_factory/centos.py b/tests/charmhelpers/core/kernel_factory/centos.py
deleted file mode 100644
index 1c402c1..0000000
--- a/tests/charmhelpers/core/kernel_factory/centos.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import subprocess
-import os
-
-
-def persistent_modprobe(module):
- """Load a kernel module and configure for auto-load on reboot."""
- if not os.path.exists('/etc/rc.modules'):
- open('/etc/rc.modules', 'a')
- os.chmod('/etc/rc.modules', 111)
- with open('/etc/rc.modules', 'r+') as modules:
- if module not in modules.read():
- modules.write('modprobe %s\n' % module)
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image."""
- return subprocess.check_call(["dracut", "-f", version])
diff --git a/tests/charmhelpers/core/kernel_factory/ubuntu.py b/tests/charmhelpers/core/kernel_factory/ubuntu.py
deleted file mode 100644
index 3de372f..0000000
--- a/tests/charmhelpers/core/kernel_factory/ubuntu.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import subprocess
-
-
-def persistent_modprobe(module):
- """Load a kernel module and configure for auto-load on reboot."""
- with open('/etc/modules', 'r+') as modules:
- if module not in modules.read():
- modules.write(module + "\n")
-
-
-def update_initramfs(version='all'):
- """Updates an initramfs image."""
- return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
diff --git a/tests/charmhelpers/core/services/__init__.py b/tests/charmhelpers/core/services/__init__.py
deleted file mode 100644
index 61fd074..0000000
--- a/tests/charmhelpers/core/services/__init__.py
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-from .base import * # NOQA
-from .helpers import * # NOQA
diff --git a/tests/charmhelpers/core/services/base.py b/tests/charmhelpers/core/services/base.py
deleted file mode 100644
index 179ad4f..0000000
--- a/tests/charmhelpers/core/services/base.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import json
-from inspect import getargspec
-from collections import Iterable, OrderedDict
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-__all__ = ['ServiceManager', 'ManagerCallback',
- 'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
- 'service_restart', 'service_stop']
-
-
-class ServiceManager(object):
- def __init__(self, services=None):
- """
- Register a list of services, given their definitions.
-
- Service definitions are dicts in the following formats (all keys except
- 'service' are optional)::
-
- {
- "service": <service name>,
- "required_data": <list of required data contexts>,
- "provided_data": <list of provided data contexts>,
- "data_ready": <one or more callbacks>,
- "data_lost": <one or more callbacks>,
- "start": <one or more callbacks>,
- "stop": <one or more callbacks>,
- "ports": <list of ports to manage>,
- }
-
- The 'required_data' list should contain dicts of required data (or
- dependency managers that act like dicts and know how to collect the data).
- Only when all items in the 'required_data' list are populated are the list
- of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
- information.
-
- The 'provided_data' list should contain relation data providers, most likely
- a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
- that will indicate a set of data to set on a given relation.
-
- The 'data_ready' value should be either a single callback, or a list of
- callbacks, to be called when all items in 'required_data' pass `is_ready()`.
- Each callback will be called with the service name as the only parameter.
- After all of the 'data_ready' callbacks are called, the 'start' callbacks
- are fired.
-
- The 'data_lost' value should be either a single callback, or a list of
- callbacks, to be called when a 'required_data' item no longer passes
- `is_ready()`. Each callback will be called with the service name as the
- only parameter. After all of the 'data_lost' callbacks are called,
- the 'stop' callbacks are fired.
-
- The 'start' value should be either a single callback, or a list of
- callbacks, to be called when starting the service, after the 'data_ready'
- callbacks are complete. Each callback will be called with the service
- name as the only parameter. This defaults to
- `[host.service_start, services.open_ports]`.
-
- The 'stop' value should be either a single callback, or a list of
- callbacks, to be called when stopping the service. If the service is
- being stopped because it no longer has all of its 'required_data', this
- will be called after all of the 'data_lost' callbacks are complete.
- Each callback will be called with the service name as the only parameter.
- This defaults to `[services.close_ports, host.service_stop]`.
-
- The 'ports' value should be a list of ports to manage. The default
- 'start' handler will open the ports after the service is started,
- and the default 'stop' handler will close the ports prior to stopping
- the service.
-
-
- Examples:
-
- The following registers an Upstart service called bingod that depends on
- a mongodb relation and which runs a custom `db_migrate` function prior to
- restarting the service, and a Runit service called spadesd::
-
- manager = services.ServiceManager([
- {
- 'service': 'bingod',
- 'ports': [80, 443],
- 'required_data': [MongoRelation(), config(), {'my': 'data'}],
- 'data_ready': [
- services.template(source='bingod.conf'),
- services.template(source='bingod.ini',
- target='/etc/bingod.ini',
- owner='bingo', perms=0400),
- ],
- },
- {
- 'service': 'spadesd',
- 'data_ready': services.template(source='spadesd_run.j2',
- target='/etc/sv/spadesd/run',
- perms=0555),
- 'start': runit_start,
- 'stop': runit_stop,
- },
- ])
- manager.manage()
- """
- self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
- self._ready = None
- self.services = OrderedDict()
- for service in services or []:
- service_name = service['service']
- self.services[service_name] = service
-
- def manage(self):
- """
- Handle the current hook by doing The Right Thing with the registered services.
- """
- hookenv._run_atstart()
- try:
- hook_name = hookenv.hook_name()
- if hook_name == 'stop':
- self.stop_services()
- else:
- self.reconfigure_services()
- self.provide_data()
- except SystemExit as x:
- if x.code is None or x.code == 0:
- hookenv._run_atexit()
- hookenv._run_atexit()
-
- def provide_data(self):
- """
- Set the relation data for each provider in the ``provided_data`` list.
-
- A provider must have a `name` attribute, which indicates which relation
- to set data on, and a `provide_data()` method, which returns a dict of
- data to set.
-
- The `provide_data()` method can optionally accept two parameters:
-
- * ``remote_service`` The name of the remote service that the data will
- be provided to. The `provide_data()` method will be called once
- for each connected service (not unit). This allows the method to
- tailor its data to the given service.
- * ``service_ready`` Whether or not the service definition had all of
- its requirements met, and thus the ``data_ready`` callbacks run.
-
- Note that the ``provided_data`` methods are now called **after** the
- ``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
- a chance to generate any data necessary for the providing to the remote
- services.
- """
- for service_name, service in self.services.items():
- service_ready = self.is_ready(service_name)
- for provider in service.get('provided_data', []):
- for relid in hookenv.relation_ids(provider.name):
- units = hookenv.related_units(relid)
- if not units:
- continue
- remote_service = units[0].split('/')[0]
- argspec = getargspec(provider.provide_data)
- if len(argspec.args) > 1:
- data = provider.provide_data(remote_service, service_ready)
- else:
- data = provider.provide_data()
- if data:
- hookenv.relation_set(relid, data)
-
- def reconfigure_services(self, *service_names):
- """
- Update all files for one or more registered services, and,
- if ready, optionally restart them.
-
- If no service names are given, reconfigures all registered services.
- """
- for service_name in service_names or self.services.keys():
- if self.is_ready(service_name):
- self.fire_event('data_ready', service_name)
- self.fire_event('start', service_name, default=[
- service_restart,
- manage_ports])
- self.save_ready(service_name)
- else:
- if self.was_ready(service_name):
- self.fire_event('data_lost', service_name)
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
- self.save_lost(service_name)
-
- def stop_services(self, *service_names):
- """
- Stop one or more registered services, by name.
-
- If no service names are given, stops all registered services.
- """
- for service_name in service_names or self.services.keys():
- self.fire_event('stop', service_name, default=[
- manage_ports,
- service_stop])
-
- def get_service(self, service_name):
- """
- Given the name of a registered service, return its service definition.
- """
- service = self.services.get(service_name)
- if not service:
- raise KeyError('Service not registered: %s' % service_name)
- return service
-
- def fire_event(self, event_name, service_name, default=None):
- """
- Fire a data_ready, data_lost, start, or stop event on a given service.
- """
- service = self.get_service(service_name)
- callbacks = service.get(event_name, default)
- if not callbacks:
- return
- if not isinstance(callbacks, Iterable):
- callbacks = [callbacks]
- for callback in callbacks:
- if isinstance(callback, ManagerCallback):
- callback(self, service_name, event_name)
- else:
- callback(service_name)
-
- def is_ready(self, service_name):
- """
- Determine if a registered service is ready, by checking its 'required_data'.
-
- A 'required_data' item can be any mapping type, and is considered ready
- if `bool(item)` evaluates as True.
- """
- service = self.get_service(service_name)
- reqs = service.get('required_data', [])
- return all(bool(req) for req in reqs)
-
- def _load_ready_file(self):
- if self._ready is not None:
- return
- if os.path.exists(self._ready_file):
- with open(self._ready_file) as fp:
- self._ready = set(json.load(fp))
- else:
- self._ready = set()
-
- def _save_ready_file(self):
- if self._ready is None:
- return
- with open(self._ready_file, 'w') as fp:
- json.dump(list(self._ready), fp)
-
- def save_ready(self, service_name):
- """
- Save an indicator that the given service is now data_ready.
- """
- self._load_ready_file()
- self._ready.add(service_name)
- self._save_ready_file()
-
- def save_lost(self, service_name):
- """
- Save an indicator that the given service is no longer data_ready.
- """
- self._load_ready_file()
- self._ready.discard(service_name)
- self._save_ready_file()
-
- def was_ready(self, service_name):
- """
- Determine if the given service was previously data_ready.
- """
- self._load_ready_file()
- return service_name in self._ready
-
-
-class ManagerCallback(object):
- """
- Special case of a callback that takes the `ServiceManager` instance
- in addition to the service name.
-
- Subclasses should implement `__call__` which should accept three parameters:
-
- * `manager` The `ServiceManager` instance
- * `service_name` The name of the service it's being triggered for
- * `event_name` The name of the event that this callback is handling
- """
- def __call__(self, manager, service_name, event_name):
- raise NotImplementedError()
-
-
-class PortManagerCallback(ManagerCallback):
- """
- Callback class that will open or close ports, for use as either
- a start or stop action.
- """
- def __call__(self, manager, service_name, event_name):
- service = manager.get_service(service_name)
- # turn this generator into a list,
- # as we'll be going over it multiple times
- new_ports = list(service.get('ports', []))
- port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
- if os.path.exists(port_file):
- with open(port_file) as fp:
- old_ports = fp.read().split(',')
- for old_port in old_ports:
- if bool(old_port) and not self.ports_contains(old_port, new_ports):
- hookenv.close_port(old_port)
- with open(port_file, 'w') as fp:
- fp.write(','.join(str(port) for port in new_ports))
- for port in new_ports:
- # A port is either a number or 'ICMP'
- protocol = 'TCP'
- if str(port).upper() == 'ICMP':
- protocol = 'ICMP'
- if event_name == 'start':
- hookenv.open_port(port, protocol)
- elif event_name == 'stop':
- hookenv.close_port(port, protocol)
-
- def ports_contains(self, port, ports):
- if not bool(port):
- return False
- if str(port).upper() != 'ICMP':
- port = int(port)
- return port in ports
-
-
-def service_stop(service_name):
- """
- Wrapper around host.service_stop to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_running(service_name):
- host.service_stop(service_name)
-
-
-def service_restart(service_name):
- """
- Wrapper around host.service_restart to prevent spurious "unknown service"
- messages in the logs.
- """
- if host.service_available(service_name):
- if host.service_running(service_name):
- host.service_restart(service_name)
- else:
- host.service_start(service_name)
-
-
-# Convenience aliases
-open_ports = close_ports = manage_ports = PortManagerCallback()
diff --git a/tests/charmhelpers/core/services/helpers.py b/tests/charmhelpers/core/services/helpers.py
deleted file mode 100644
index 3e6e30d..0000000
--- a/tests/charmhelpers/core/services/helpers.py
+++ /dev/null
@@ -1,290 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import yaml
-
-from charmhelpers.core import hookenv
-from charmhelpers.core import host
-from charmhelpers.core import templating
-
-from charmhelpers.core.services.base import ManagerCallback
-
-
-__all__ = ['RelationContext', 'TemplateCallback',
- 'render_template', 'template']
-
-
-class RelationContext(dict):
- """
- Base class for a context generator that gets relation data from juju.
-
- Subclasses must provide the attributes `name`, which is the name of the
- interface of interest, `interface`, which is the type of the interface of
- interest, and `required_keys`, which is the set of keys required for the
- relation to be considered complete. The data for all interfaces matching
- the `name` attribute that are complete will used to populate the dictionary
- values (see `get_data`, below).
-
- The generated context will be namespaced under the relation :attr:`name`,
- to prevent potential naming conflicts.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = None
- interface = None
-
- def __init__(self, name=None, additional_required_keys=None):
- if not hasattr(self, 'required_keys'):
- self.required_keys = []
-
- if name is not None:
- self.name = name
- if additional_required_keys:
- self.required_keys.extend(additional_required_keys)
- self.get_data()
-
- def __bool__(self):
- """
- Returns True if all of the required_keys are available.
- """
- return self.is_ready()
-
- __nonzero__ = __bool__
-
- def __repr__(self):
- return super(RelationContext, self).__repr__()
-
- def is_ready(self):
- """
- Returns True if all of the `required_keys` are available from any units.
- """
- ready = len(self.get(self.name, [])) > 0
- if not ready:
- hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
- return ready
-
- def _is_ready(self, unit_data):
- """
- Helper method that tests a set of relation data and returns True if
- all of the `required_keys` are present.
- """
- return set(unit_data.keys()).issuperset(set(self.required_keys))
-
- def get_data(self):
- """
- Retrieve the relation data for each unit involved in a relation and,
- if complete, store it in a list under `self[self.name]`. This
- is automatically called when the RelationContext is instantiated.
-
- The units are sorted lexographically first by the service ID, then by
- the unit ID. Thus, if an interface has two other services, 'db:1'
- and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
- and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
- set of data, the relation data for the units will be stored in the
- order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
-
- If you only care about a single unit on the relation, you can just
- access it as `{{ interface[0]['key'] }}`. However, if you can at all
- support multiple units on a relation, you should iterate over the list,
- like::
-
- {% for unit in interface -%}
- {{ unit['key'] }}{% if not loop.last %},{% endif %}
- {%- endfor %}
-
- Note that since all sets of relation data from all related services and
- units are in a single list, if you need to know which service or unit a
- set of data came from, you'll need to extend this class to preserve
- that information.
- """
- if not hookenv.relation_ids(self.name):
- return
-
- ns = self.setdefault(self.name, [])
- for rid in sorted(hookenv.relation_ids(self.name)):
- for unit in sorted(hookenv.related_units(rid)):
- reldata = hookenv.relation_get(rid=rid, unit=unit)
- if self._is_ready(reldata):
- ns.append(reldata)
-
- def provide_data(self):
- """
- Return data to be relation_set for this interface.
- """
- return {}
-
-
-class MysqlRelation(RelationContext):
- """
- Relation context for the `mysql` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'db'
- interface = 'mysql'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'user', 'password', 'database']
- RelationContext.__init__(self, *args, **kwargs)
-
-
-class HttpRelation(RelationContext):
- """
- Relation context for the `http` interface.
-
- :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
- :param list additional_required_keys: Extend the list of :attr:`required_keys`
- """
- name = 'website'
- interface = 'http'
-
- def __init__(self, *args, **kwargs):
- self.required_keys = ['host', 'port']
- RelationContext.__init__(self, *args, **kwargs)
-
- def provide_data(self):
- return {
- 'host': hookenv.unit_get('private-address'),
- 'port': 80,
- }
-
-
-class RequiredConfig(dict):
- """
- Data context that loads config options with one or more mandatory options.
-
- Once the required options have been changed from their default values, all
- config options will be available, namespaced under `config` to prevent
- potential naming conflicts (for example, between a config option and a
- relation property).
-
- :param list *args: List of options that must be changed from their default values.
- """
-
- def __init__(self, *args):
- self.required_options = args
- self['config'] = hookenv.config()
- with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
- self.config = yaml.load(fp).get('options', {})
-
- def __bool__(self):
- for option in self.required_options:
- if option not in self['config']:
- return False
- current_value = self['config'][option]
- default_value = self.config[option].get('default')
- if current_value == default_value:
- return False
- if current_value in (None, '') and default_value in (None, ''):
- return False
- return True
-
- def __nonzero__(self):
- return self.__bool__()
-
-
-class StoredContext(dict):
- """
- A data context that always returns the data that it was first created with.
-
- This is useful to do a one-time generation of things like passwords, that
- will thereafter use the same value that was originally generated, instead
- of generating a new value each time it is run.
- """
- def __init__(self, file_name, config_data):
- """
- If the file exists, populate `self` with the data from the file.
- Otherwise, populate with the given data and persist it to the file.
- """
- if os.path.exists(file_name):
- self.update(self.read_context(file_name))
- else:
- self.store_context(file_name, config_data)
- self.update(config_data)
-
- def store_context(self, file_name, config_data):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'w') as file_stream:
- os.fchmod(file_stream.fileno(), 0o600)
- yaml.dump(config_data, file_stream)
-
- def read_context(self, file_name):
- if not os.path.isabs(file_name):
- file_name = os.path.join(hookenv.charm_dir(), file_name)
- with open(file_name, 'r') as file_stream:
- data = yaml.load(file_stream)
- if not data:
- raise OSError("%s is empty" % file_name)
- return data
-
-
-class TemplateCallback(ManagerCallback):
- """
- Callback class that will render a Jinja2 template, for use as a ready
- action.
-
- :param str source: The template source file, relative to
- `$CHARM_DIR/templates`
-
- :param str target: The target to write the rendered template to (or None)
- :param str owner: The owner of the rendered file
- :param str group: The group of the rendered file
- :param int perms: The permissions of the rendered file
- :param partial on_change_action: functools partial to be executed when
- rendered file changes
- :param jinja2 loader template_loader: A jinja2 template loader
-
- :return str: The rendered template
- """
- def __init__(self, source, target,
- owner='root', group='root', perms=0o444,
- on_change_action=None, template_loader=None):
- self.source = source
- self.target = target
- self.owner = owner
- self.group = group
- self.perms = perms
- self.on_change_action = on_change_action
- self.template_loader = template_loader
-
- def __call__(self, manager, service_name, event_name):
- pre_checksum = ''
- if self.on_change_action and os.path.isfile(self.target):
- pre_checksum = host.file_hash(self.target)
- service = manager.get_service(service_name)
- context = {'ctx': {}}
- for ctx in service.get('required_data', []):
- context.update(ctx)
- context['ctx'].update(ctx)
-
- result = templating.render(self.source, self.target, context,
- self.owner, self.group, self.perms,
- template_loader=self.template_loader)
- if self.on_change_action:
- if pre_checksum == host.file_hash(self.target):
- hookenv.log(
- 'No change detected: {}'.format(self.target),
- hookenv.DEBUG)
- else:
- self.on_change_action()
-
- return result
-
-
-# Convenience aliases for templates
-render_template = template = TemplateCallback
diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py
deleted file mode 100644
index e8df045..0000000
--- a/tests/charmhelpers/core/strutils.py
+++ /dev/null
@@ -1,129 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import six
-import re
-
-
-def bool_from_string(value):
- """Interpret string value as boolean.
-
- Returns True if value translates to True otherwise False.
- """
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
- value = value.strip().lower()
-
- if value in ['y', 'yes', 'true', 't', 'on']:
- return True
- elif value in ['n', 'no', 'false', 'f', 'off']:
- return False
-
- msg = "Unable to interpret string value '%s' as boolean" % (value)
- raise ValueError(msg)
-
-
-def bytes_from_string(value):
- """Interpret human readable string value as bytes.
-
- Returns int
- """
- BYTE_POWER = {
- 'K': 1,
- 'KB': 1,
- 'M': 2,
- 'MB': 2,
- 'G': 3,
- 'GB': 3,
- 'T': 4,
- 'TB': 4,
- 'P': 5,
- 'PB': 5,
- }
- if isinstance(value, six.string_types):
- value = six.text_type(value)
- else:
- msg = "Unable to interpret non-string value '%s' as bytes" % (value)
- raise ValueError(msg)
- matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if matches:
- size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
- else:
- # Assume that value passed in is bytes
- try:
- size = int(value)
- except ValueError:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return size
-
-
-class BasicStringComparator(object):
- """Provides a class that will compare strings from an iterator type object.
- Used to provide > and < comparisons on strings that may not necessarily be
- alphanumerically ordered. e.g. OpenStack or Ubuntu releases AFTER the
- z-wrap.
- """
-
- _list = None
-
- def __init__(self, item):
- if self._list is None:
- raise Exception("Must define the _list in the class definition!")
- try:
- self.index = self._list.index(item)
- except Exception:
- raise KeyError("Item '{}' is not in list '{}'"
- .format(item, self._list))
-
- def __eq__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index == self._list.index(other)
-
- def __ne__(self, other):
- return not self.__eq__(other)
-
- def __lt__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index < self._list.index(other)
-
- def __ge__(self, other):
- return not self.__lt__(other)
-
- def __gt__(self, other):
- assert isinstance(other, str) or isinstance(other, self.__class__)
- return self.index > self._list.index(other)
-
- def __le__(self, other):
- return not self.__gt__(other)
-
- def __str__(self):
- """Always give back the item at the index so it can be used in
- comparisons like:
-
- s_mitaka = CompareOpenStack('mitaka')
- s_newton = CompareOpenstack('newton')
-
- assert s_newton > s_mitaka
-
- @returns: <string>
- """
- return self._list[self.index]
diff --git a/tests/charmhelpers/core/sysctl.py b/tests/charmhelpers/core/sysctl.py
deleted file mode 100644
index 1f188d8..0000000
--- a/tests/charmhelpers/core/sysctl.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import yaml
-
-from subprocess import check_call
-
-from charmhelpers.core.hookenv import (
- log,
- DEBUG,
- ERROR,
-)
-
-__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-
-
-def create(sysctl_dict, sysctl_file):
- """Creates a sysctl.conf file from a YAML associative array
-
- :param sysctl_dict: a dict or YAML-formatted string of sysctl
- options eg "{ 'kernel.max_pid': 1337 }"
- :type sysctl_dict: str
- :param sysctl_file: path to the sysctl file to be saved
- :type sysctl_file: str or unicode
- :returns: None
- """
- if type(sysctl_dict) is not dict:
- try:
- sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
- except yaml.YAMLError:
- log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
- level=ERROR)
- return
- else:
- sysctl_dict_parsed = sysctl_dict
-
- with open(sysctl_file, "w") as fd:
- for key, value in sysctl_dict_parsed.items():
- fd.write("{}={}\n".format(key, value))
-
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
- level=DEBUG)
-
- check_call(["sysctl", "-p", sysctl_file])
diff --git a/tests/charmhelpers/core/templating.py b/tests/charmhelpers/core/templating.py
deleted file mode 100644
index 9014015..0000000
--- a/tests/charmhelpers/core/templating.py
+++ /dev/null
@@ -1,93 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import sys
-
-from charmhelpers.core import host
-from charmhelpers.core import hookenv
-
-
-def render(source, target, context, owner='root', group='root',
- perms=0o444, templates_dir=None, encoding='UTF-8',
- template_loader=None, config_template=None):
- """
- Render a template.
-
- The `source` path, if not absolute, is relative to the `templates_dir`.
-
- The `target` path should be absolute. It can also be `None`, in which
- case no file will be written.
-
- The context should be a dict containing the values to be replaced in the
- template.
-
- config_template may be provided to render from a provided template instead
- of loading from a file.
-
- The `owner`, `group`, and `perms` options will be passed to `write_file`.
-
- If omitted, `templates_dir` defaults to the `templates` folder in the charm.
-
- The rendered template will be written to the file as well as being returned
- as a string.
-
- Note: Using this requires python-jinja2 or python3-jinja2; if it is not
- installed, calling this will attempt to use charmhelpers.fetch.apt_install
- to install it.
- """
- try:
- from jinja2 import FileSystemLoader, Environment, exceptions
- except ImportError:
- try:
- from charmhelpers.fetch import apt_install
- except ImportError:
- hookenv.log('Could not import jinja2, and could not import '
- 'charmhelpers.fetch to install it',
- level=hookenv.ERROR)
- raise
- if sys.version_info.major == 2:
- apt_install('python-jinja2', fatal=True)
- else:
- apt_install('python3-jinja2', fatal=True)
- from jinja2 import FileSystemLoader, Environment, exceptions
-
- if template_loader:
- template_env = Environment(loader=template_loader)
- else:
- if templates_dir is None:
- templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
- template_env = Environment(loader=FileSystemLoader(templates_dir))
-
- # load from a string if provided explicitly
- if config_template is not None:
- template = template_env.from_string(config_template)
- else:
- try:
- source = source
- template = template_env.get_template(source)
- except exceptions.TemplateNotFound as e:
- hookenv.log('Could not load template %s from %s.' %
- (source, templates_dir),
- level=hookenv.ERROR)
- raise e
- content = template.render(context)
- if target is not None:
- target_dir = os.path.dirname(target)
- if not os.path.exists(target_dir):
- # This is a terrible default directory permission, as the file
- # or its siblings will often contain secrets.
- host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
- host.write_file(target, content.encode(encoding), owner, group, perms)
- return content
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
deleted file mode 100644
index ab55432..0000000
--- a/tests/charmhelpers/core/unitdata.py
+++ /dev/null
@@ -1,525 +0,0 @@
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-#
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# Authors:
-# Kapil Thangavelu <kapil.foss@gmail.com>
-#
-"""
-Intro
------
-
-A simple way to store state in units. This provides a key value
-storage with support for versioned, transactional operation,
-and can calculate deltas from previous values to simplify unit logic
-when processing changes.
-
-
-Hook Integration
-----------------
-
-There are several extant frameworks for hook execution, including
-
- - charmhelpers.core.hookenv.Hooks
- - charmhelpers.core.services.ServiceManager
-
-The storage classes are framework agnostic, one simple integration is
-via the HookData contextmanager. It will record the current hook
-execution environment (including relation data, config data, etc.),
-setup a transaction and allow easy access to the changes from
-previously seen values. One consequence of the integration is the
-reservation of particular keys ('rels', 'unit', 'env', 'config',
-'charm_revisions') for their respective values.
-
-Here's a fully worked integration example using hookenv.Hooks::
-
- from charmhelper.core import hookenv, unitdata
-
- hook_data = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # Print all changes to configuration from previously seen
- # values.
- for changed, (prev, cur) in hook_data.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- # Directly access all charm config as a mapping.
- conf = db.getrange('config', True)
-
- # Directly access all relation data as a mapping
- rels = db.getrange('rels', True)
-
- if __name__ == '__main__':
- with hook_data():
- hook.execute()
-
-
-A more basic integration is via the hook_scope context manager which simply
-manages transaction scope (and records hook name, and timestamp)::
-
- >>> from unitdata import kv
- >>> db = kv()
- >>> with db.hook_scope('install'):
- ... # do work, in transactional scope.
- ... db.set('x', 1)
- >>> db.get('x')
- 1
-
-
-Usage
------
-
-Values are automatically json de/serialized to preserve basic typing
-and complex data struct capabilities (dicts, lists, ints, booleans, etc).
-
-Individual values can be manipulated via get/set::
-
- >>> kv.set('y', True)
- >>> kv.get('y')
- True
-
- # We can set complex values (dicts, lists) as a single key.
- >>> kv.set('config', {'a': 1, 'b': True'})
-
- # Also supports returning dictionaries as a record which
- # provides attribute access.
- >>> config = kv.get('config', record=True)
- >>> config.b
- True
-
-
-Groups of keys can be manipulated with update/getrange::
-
- >>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
- >>> kv.getrange('gui.', strip=True)
- {'z': 1, 'y': 2}
-
-When updating values, its very helpful to understand which values
-have actually changed and how have they changed. The storage
-provides a delta method to provide for this::
-
- >>> data = {'debug': True, 'option': 2}
- >>> delta = kv.delta(data, 'config.')
- >>> delta.debug.previous
- None
- >>> delta.debug.current
- True
- >>> delta
- {'debug': (None, True), 'option': (None, 2)}
-
-Note the delta method does not persist the actual change, it needs to
-be explicitly saved via 'update' method::
-
- >>> kv.update(data, 'config.')
-
-Values modified in the context of a hook scope retain historical values
-associated to the hookname.
-
- >>> with db.hook_scope('config-changed'):
- ... db.set('x', 42)
- >>> db.gethistory('x')
- [(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
- (2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
-
-"""
-
-import collections
-import contextlib
-import datetime
-import itertools
-import json
-import os
-import pprint
-import sqlite3
-import sys
-
-__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
-
-
-class Storage(object):
- """Simple key value database for local unit state within charms.
-
- Modifications are not persisted unless :meth:`flush` is called.
-
- To support dicts, lists, integer, floats, and booleans values
- are automatically json encoded/decoded.
-
- Note: to facilitate unit testing, ':memory:' can be passed as the
- path parameter which causes sqlite3 to only build the db in memory.
- This should only be used for testing purposes.
- """
- def __init__(self, path=None):
- self.db_path = path
- if path is None:
- if 'UNIT_STATE_DB' in os.environ:
- self.db_path = os.environ['UNIT_STATE_DB']
- else:
- self.db_path = os.path.join(
- os.environ.get('CHARM_DIR', ''), '.unit-state.db')
- if self.db_path != ':memory:':
- with open(self.db_path, 'a') as f:
- os.fchmod(f.fileno(), 0o600)
- self.conn = sqlite3.connect('%s' % self.db_path)
- self.cursor = self.conn.cursor()
- self.revision = None
- self._closed = False
- self._init()
-
- def close(self):
- if self._closed:
- return
- self.flush(False)
- self.cursor.close()
- self.conn.close()
- self._closed = True
-
- def get(self, key, default=None, record=False):
- self.cursor.execute('select data from kv where key=?', [key])
- result = self.cursor.fetchone()
- if not result:
- return default
- if record:
- return Record(json.loads(result[0]))
- return json.loads(result[0])
-
- def getrange(self, key_prefix, strip=False):
- """
- Get a range of keys starting with a common prefix as a mapping of
- keys to values.
-
- :param str key_prefix: Common prefix among all keys
- :param bool strip: Optionally strip the common prefix from the key
- names in the returned dict
- :return dict: A (possibly empty) dict of key-value mappings
- """
- self.cursor.execute("select key, data from kv where key like ?",
- ['%s%%' % key_prefix])
- result = self.cursor.fetchall()
-
- if not result:
- return {}
- if not strip:
- key_prefix = ''
- return dict([
- (k[len(key_prefix):], json.loads(v)) for k, v in result])
-
- def update(self, mapping, prefix=""):
- """
- Set the values of multiple keys at once.
-
- :param dict mapping: Mapping of keys to values
- :param str prefix: Optional prefix to apply to all keys in `mapping`
- before setting
- """
- for k, v in mapping.items():
- self.set("%s%s" % (prefix, k), v)
-
- def unset(self, key):
- """
- Remove a key from the database entirely.
- """
- self.cursor.execute('delete from kv where key=?', [key])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- [key, self.revision, json.dumps('DELETED')])
-
- def unsetrange(self, keys=None, prefix=""):
- """
- Remove a range of keys starting with a common prefix, from the database
- entirely.
-
- :param list keys: List of keys to remove.
- :param str prefix: Optional prefix to apply to all keys in ``keys``
- before removing.
- """
- if keys is not None:
- keys = ['%s%s' % (prefix, key) for key in keys]
- self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
- list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
- else:
- self.cursor.execute('delete from kv where key like ?',
- ['%s%%' % prefix])
- if self.revision and self.cursor.rowcount:
- self.cursor.execute(
- 'insert into kv_revisions values (?, ?, ?)',
- ['%s%%' % prefix, self.revision, json.dumps('DELETED')])
-
- def set(self, key, value):
- """
- Set a value in the database.
-
- :param str key: Key to set the value for
- :param value: Any JSON-serializable value to be set
- """
- serialized = json.dumps(value)
-
- self.cursor.execute('select data from kv where key=?', [key])
- exists = self.cursor.fetchone()
-
- # Skip mutations to the same value
- if exists:
- if exists[0] == serialized:
- return value
-
- if not exists:
- self.cursor.execute(
- 'insert into kv (key, data) values (?, ?)',
- (key, serialized))
- else:
- self.cursor.execute('''
- update kv
- set data = ?
- where key = ?''', [serialized, key])
-
- # Save
- if not self.revision:
- return value
-
- self.cursor.execute(
- 'select 1 from kv_revisions where key=? and revision=?',
- [key, self.revision])
- exists = self.cursor.fetchone()
-
- if not exists:
- self.cursor.execute(
- '''insert into kv_revisions (
- revision, key, data) values (?, ?, ?)''',
- (self.revision, key, serialized))
- else:
- self.cursor.execute(
- '''
- update kv_revisions
- set data = ?
- where key = ?
- and revision = ?''',
- [serialized, key, self.revision])
-
- return value
-
- def delta(self, mapping, prefix):
- """
- return a delta containing values that have changed.
- """
- previous = self.getrange(prefix, strip=True)
- if not previous:
- pk = set()
- else:
- pk = set(previous.keys())
- ck = set(mapping.keys())
- delta = DeltaSet()
-
- # added
- for k in ck.difference(pk):
- delta[k] = Delta(None, mapping[k])
-
- # removed
- for k in pk.difference(ck):
- delta[k] = Delta(previous[k], None)
-
- # changed
- for k in pk.intersection(ck):
- c = mapping[k]
- p = previous[k]
- if c != p:
- delta[k] = Delta(p, c)
-
- return delta
-
- @contextlib.contextmanager
- def hook_scope(self, name=""):
- """Scope all future interactions to the current hook execution
- revision."""
- assert not self.revision
- self.cursor.execute(
- 'insert into hooks (hook, date) values (?, ?)',
- (name or sys.argv[0],
- datetime.datetime.utcnow().isoformat()))
- self.revision = self.cursor.lastrowid
- try:
- yield self.revision
- self.revision = None
- except Exception:
- self.flush(False)
- self.revision = None
- raise
- else:
- self.flush()
-
- def flush(self, save=True):
- if save:
- self.conn.commit()
- elif self._closed:
- return
- else:
- self.conn.rollback()
-
- def _init(self):
- self.cursor.execute('''
- create table if not exists kv (
- key text,
- data text,
- primary key (key)
- )''')
- self.cursor.execute('''
- create table if not exists kv_revisions (
- key text,
- revision integer,
- data text,
- primary key (key, revision)
- )''')
- self.cursor.execute('''
- create table if not exists hooks (
- version integer primary key autoincrement,
- hook text,
- date text
- )''')
- self.conn.commit()
-
- def gethistory(self, key, deserialize=False):
- self.cursor.execute(
- '''
- select kv.revision, kv.key, kv.data, h.hook, h.date
- from kv_revisions kv,
- hooks h
- where kv.key=?
- and kv.revision = h.version
- ''', [key])
- if deserialize is False:
- return self.cursor.fetchall()
- return map(_parse_history, self.cursor.fetchall())
-
- def debug(self, fh=sys.stderr):
- self.cursor.execute('select * from kv')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
- self.cursor.execute('select * from kv_revisions')
- pprint.pprint(self.cursor.fetchall(), stream=fh)
-
-
-def _parse_history(d):
- return (d[0], d[1], json.loads(d[2]), d[3],
- datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
-
-
-class HookData(object):
- """Simple integration for existing hook exec frameworks.
-
- Records all unit information, and stores deltas for processing
- by the hook.
-
- Sample::
-
- from charmhelper.core import hookenv, unitdata
-
- changes = unitdata.HookData()
- db = unitdata.kv()
- hooks = hookenv.Hooks()
-
- @hooks.hook
- def config_changed():
- # View all changes to configuration
- for changed, (prev, cur) in changes.conf.items():
- print('config changed', changed,
- 'previous value', prev,
- 'current value', cur)
-
- # Get some unit specific bookeeping
- if not db.get('pkg_key'):
- key = urllib.urlopen('https://example.com/pkg_key').read()
- db.set('pkg_key', key)
-
- if __name__ == '__main__':
- with changes():
- hook.execute()
-
- """
- def __init__(self):
- self.kv = kv()
- self.conf = None
- self.rels = None
-
- @contextlib.contextmanager
- def __call__(self):
- from charmhelpers.core import hookenv
- hook_name = hookenv.hook_name()
-
- with self.kv.hook_scope(hook_name):
- self._record_charm_version(hookenv.charm_dir())
- delta_config, delta_relation = self._record_hook(hookenv)
- yield self.kv, delta_config, delta_relation
-
- def _record_charm_version(self, charm_dir):
- # Record revisions.. charm revisions are meaningless
- # to charm authors as they don't control the revision.
- # so logic dependnent on revision is not particularly
- # useful, however it is useful for debugging analysis.
- charm_rev = open(
- os.path.join(charm_dir, 'revision')).read().strip()
- charm_rev = charm_rev or '0'
- revs = self.kv.get('charm_revisions', [])
- if charm_rev not in revs:
- revs.append(charm_rev.strip() or '0')
- self.kv.set('charm_revisions', revs)
-
- def _record_hook(self, hookenv):
- data = hookenv.execution_environment()
- self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
- self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
- self.kv.set('env', dict(data['env']))
- self.kv.set('unit', data['unit'])
- self.kv.set('relid', data.get('relid'))
- return conf_delta, rels_delta
-
-
-class Record(dict):
-
- __slots__ = ()
-
- def __getattr__(self, k):
- if k in self:
- return self[k]
- raise AttributeError(k)
-
-
-class DeltaSet(Record):
-
- __slots__ = ()
-
-
-Delta = collections.namedtuple('Delta', ['previous', 'current'])
-
-
-_KV = None
-
-
-def kv():
- global _KV
- if _KV is None:
- _KV = Storage()
- return _KV
diff --git a/tests/charmhelpers/osplatform.py b/tests/charmhelpers/osplatform.py
deleted file mode 100644
index d9a4d5c..0000000
--- a/tests/charmhelpers/osplatform.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import platform
-
-
-def get_platform():
- """Return the current OS platform.
-
- For example: if current os platform is Ubuntu then a string "ubuntu"
- will be returned (which is the name of the module).
- This string is used to decide which platform module should be imported.
- """
- # linux_distribution is deprecated and will be removed in Python 3.7
- # Warings *not* disabled, as we certainly need to fix this.
- tuple_platform = platform.linux_distribution()
- current_platform = tuple_platform[0]
- if "Ubuntu" in current_platform:
- return "ubuntu"
- elif "CentOS" in current_platform:
- return "centos"
- elif "debian" in current_platform:
- # Stock Python does not detect Ubuntu and instead returns debian.
- # Or at least it does in some build environments like Travis CI
- return "ubuntu"
- else:
- raise RuntimeError("This module is not supported on {}."
- .format(current_platform))

This mirror site include all the OpenStack related repositories under: openstack, openstack-dev and openstack-infra.

NOTE: All repositories are updated every one hour.

Usage

For Git Clone
 git clone http://git.trystack.cn/openstack/nova.git 
For DevStack

Add GIT_BASE, NOVNC_REPO and SPICE_REPO variables to local.conf file.

[[local|localrc]]

# use TryStack git mirror
GIT_BASE=http://git.trystack.cn
NOVNC_REPO=http://git.trystack.cn/kanaka/noVNC.git
SPICE_REPO=http://git.trystack.cn/git/spice/spice-html5.git