summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris MacNaughton <chris.macnaughton@canonical.com>2019-04-04 10:23:50 +0200
committerChris MacNaughton <chris.macnaughton@canonical.com>2019-04-04 10:24:06 +0200
commitade8687ad831c0922dc707d198b3bc9b8a34c374 (patch)
tree16491ad42cc2262a9aaa03e10ce878e7be9a3955
parent6d0022b967e7ebe04ab6a8a7dbd752a766186794 (diff)
downloadcharm-nova-lxd-ade8687ad831c0922dc707d198b3bc9b8a34c374.zip
charm-nova-lxd-ade8687ad831c0922dc707d198b3bc9b8a34c374.tar.gz
charm-nova-lxd-ade8687ad831c0922dc707d198b3bc9b8a34c374.tar.bz2
Sync charm-helpers for Stein release
As a part of the Stein release, we need to ensure that charmhelpers is up to date. Change-Id: I322710380622515e279b5f1c7cdbddfe7d7d3140
-rw-r--r--charm-helpers.yaml2
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py16
-rw-r--r--hooks/charmhelpers/contrib/openstack/cert_utils.py18
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py47
-rw-r--r--hooks/charmhelpers/contrib/openstack/ip.py5
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py2
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py22
-rw-r--r--hooks/charmhelpers/contrib/python/__init__.py13
-rw-r--r--hooks/charmhelpers/contrib/python/packages.py154
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py218
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py41
-rw-r--r--hooks/charmhelpers/core/hookenv.py74
-rw-r--r--hooks/charmhelpers/core/host.py2
-rw-r--r--hooks/charmhelpers/core/host_factory/ubuntu.py22
-rw-r--r--hooks/charmhelpers/core/sysctl.py13
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py183
16 files changed, 529 insertions, 303 deletions
diff --git a/charm-helpers.yaml b/charm-helpers.yaml
index 3a19797..2390835 100644
--- a/charm-helpers.yaml
+++ b/charm-helpers.yaml
@@ -10,6 +10,6 @@ include:
- apache
- cluster
- contrib.network
- - contrib.python.packages
+ - contrib.python
- payload.execd
- contrib.charmsupport
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index ea1fd8f..53fa650 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -88,14 +88,14 @@ class OpenStackAmuletUtils(AmuletUtils):
validation_function = self.validate_v2_endpoint_data
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
if openstack_release and openstack_release >= xenial_queens:
- validation_function = self.validate_v3_endpoint_data
- expected = {
- 'id': expected['id'],
- 'region': expected['region'],
- 'region_id': 'RegionOne',
- 'url': self.valid_url,
- 'interface': self.not_null,
- 'service_id': expected['service_id']}
+ validation_function = self.validate_v3_endpoint_data
+ expected = {
+ 'id': expected['id'],
+ 'region': expected['region'],
+ 'region_id': 'RegionOne',
+ 'url': self.valid_url,
+ 'interface': self.not_null,
+ 'service_id': expected['service_id']}
return validation_function(endpoints, admin_port, internal_port,
public_port, expected)
diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py
index 3a3c6de..47b8603 100644
--- a/hooks/charmhelpers/contrib/openstack/cert_utils.py
+++ b/hooks/charmhelpers/contrib/openstack/cert_utils.py
@@ -180,13 +180,17 @@ def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
os.symlink(hostname_key, custom_key)
-def install_certs(ssl_dir, certs, chain=None):
+def install_certs(ssl_dir, certs, chain=None, user='root', group='root'):
"""Install the certs passed into the ssl dir and append the chain if
provided.
:param ssl_dir: str Directory to create symlinks in
:param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
:param chain: str Chain to be appended to certs
+ :param user: (Optional) Owner of certificate files. Defaults to 'root'
+ :type user: str
+ :param group: (Optional) Group of certificate files. Defaults to 'root'
+ :type group: str
"""
for cn, bundle in certs.items():
cert_filename = 'cert_{}'.format(cn)
@@ -197,21 +201,25 @@ def install_certs(ssl_dir, certs, chain=None):
# trust certs signed by an intermediate in the chain
cert_data = cert_data + os.linesep + chain
write_file(
- path=os.path.join(ssl_dir, cert_filename),
+ path=os.path.join(ssl_dir, cert_filename), owner=user, group=group,
content=cert_data, perms=0o640)
write_file(
- path=os.path.join(ssl_dir, key_filename),
+ path=os.path.join(ssl_dir, key_filename), owner=user, group=group,
content=bundle['key'], perms=0o640)
def process_certificates(service_name, relation_id, unit,
- custom_hostname_link=None):
+ custom_hostname_link=None, user='root', group='root'):
"""Process the certificates supplied down the relation
:param service_name: str Name of service the certifcates are for.
:param relation_id: str Relation id providing the certs
:param unit: str Unit providing the certs
:param custom_hostname_link: str Name of custom link to create
+ :param user: (Optional) Owner of certificate files. Defaults to 'root'
+ :type user: str
+ :param group: (Optional) Group of certificate files. Defaults to 'root'
+ :type group: str
"""
data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
@@ -223,7 +231,7 @@ def process_certificates(service_name, relation_id, unit,
if certs:
certs = json.loads(certs)
install_ca_cert(ca.encode())
- install_certs(ssl_dir, certs, chain)
+ install_certs(ssl_dir, certs, chain, user=user, group=group)
create_ip_cert_links(
ssl_dir,
custom_hostname_link=custom_hostname_link)
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index 8a20375..d513371 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -29,6 +29,7 @@ from charmhelpers.fetch import (
filter_installed_packages,
)
from charmhelpers.core.hookenv import (
+ NoNetworkBinding,
config,
is_relation_made,
local_unit,
@@ -791,6 +792,7 @@ class ApacheSSLContext(OSContextGenerator):
# and service namespace accordingly.
external_ports = []
service_namespace = None
+ user = group = 'root'
def enable_modules(self):
cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http', 'headers']
@@ -809,9 +811,11 @@ class ApacheSSLContext(OSContextGenerator):
key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert), perms=0o640)
+ content=b64decode(cert), owner=self.user,
+ group=self.group, perms=0o640)
write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key), perms=0o640)
+ content=b64decode(key), owner=self.user,
+ group=self.group, perms=0o640)
def configure_ca(self):
ca_cert = get_ca_cert()
@@ -868,7 +872,7 @@ class ApacheSSLContext(OSContextGenerator):
addr = network_get_primary_address(
ADDRESS_MAP[net_type]['binding']
)
- except NotImplementedError:
+ except (NotImplementedError, NoNetworkBinding):
addr = fallback
endpoint = resolve_address(net_type)
@@ -1427,11 +1431,11 @@ class ZeroMQContext(OSContextGenerator):
ctxt = {}
if is_relation_made('zeromq-configuration', 'host'):
for rid in relation_ids('zeromq-configuration'):
- for unit in related_units(rid):
- ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
- ctxt['zmq_host'] = relation_get('host', unit, rid)
- ctxt['zmq_redis_address'] = relation_get(
- 'zmq_redis_address', unit, rid)
+ for unit in related_units(rid):
+ ctxt['zmq_nonce'] = relation_get('nonce', unit, rid)
+ ctxt['zmq_host'] = relation_get('host', unit, rid)
+ ctxt['zmq_redis_address'] = relation_get(
+ 'zmq_redis_address', unit, rid)
return ctxt
@@ -1931,3 +1935,30 @@ class VersionsContext(OSContextGenerator):
return {
'openstack_release': ostack,
'operating_system_release': osystem}
+
+
+class LogrotateContext(OSContextGenerator):
+ """Common context generator for logrotate."""
+
+ def __init__(self, location, interval, count):
+ """
+ :param location: Absolute path for the logrotate config file
+ :type location: str
+ :param interval: The interval for the rotations. Valid values are
+ 'daily', 'weekly', 'monthly', 'yearly'
+ :type interval: str
+ :param count: The logrotate count option configures the 'count' times
+ the log files are being rotated before being
+ :type count: int
+ """
+ self.location = location
+ self.interval = interval
+ self.count = 'rotate {}'.format(count)
+
+ def __call__(self):
+ ctxt = {
+ 'logrotate_logs_location': self.location,
+ 'logrotate_interval': self.interval,
+ 'logrotate_count': self.count,
+ }
+ return ctxt
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index 73102af..723aebc 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -13,6 +13,7 @@
# limitations under the License.
from charmhelpers.core.hookenv import (
+ NoNetworkBinding,
config,
unit_get,
service_name,
@@ -158,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
- except NotImplementedError:
+ except (NotImplementedError, NoNetworkBinding):
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
@@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
- except NotImplementedError:
+ except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:
diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py
index a623315..050f8af 100644
--- a/hooks/charmhelpers/contrib/openstack/templating.py
+++ b/hooks/charmhelpers/contrib/openstack/templating.py
@@ -183,7 +183,7 @@ class OSConfigRenderer(object):
/tmp/templates/grizzly/api-paste.ini
/tmp/templates/havana/api-paste.ini
- Since it was registered with the grizzly release, it first seraches
+ Since it was registered with the grizzly release, it first searches
the grizzly directory for nova.conf, then the templates dir.
When writing api-paste.ini, it will find the template in the grizzly
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 4e432a2..e5e2536 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -83,7 +83,8 @@ from charmhelpers.fetch import (
add_source as fetch_add_source,
SourceConfigError,
GPGKeyError,
- get_upstream_version
+ get_upstream_version,
+ filter_missing_packages
)
from charmhelpers.fetch.snap import (
@@ -193,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
('rocky',
['2.18.0', '2.19.0']),
('stein',
- ['2.19.0']),
+ ['2.20.0']),
])
# >= Liberty version->codename mapping
@@ -309,6 +310,15 @@ def error_out(msg):
sys.exit(1)
+def get_installed_semantic_versioned_packages():
+ '''Get a list of installed packages which have OpenStack semantic versioning
+
+ :returns List of installed packages
+ :rtype: [pkg1, pkg2, ...]
+ '''
+ return filter_missing_packages(PACKAGE_CODENAMES.keys())
+
+
def get_os_codename_install_source(src):
'''Derive OpenStack release codename from a given installation source.'''
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
@@ -646,7 +656,7 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
- return apt.version_compare(avail_vers, cur_vers) == 1
+ return apt.version_compare(avail_vers, cur_vers) >= 1
def ensure_block_device(block_device):
@@ -972,7 +982,9 @@ def _ows_check_charm_func(state, message, charm_func_with_configs):
"""
if charm_func_with_configs:
charm_state, charm_message = charm_func_with_configs()
- if charm_state != 'active' and charm_state != 'unknown':
+ if (charm_state != 'active' and
+ charm_state != 'unknown' and
+ charm_state is not None):
state = workload_state_compare(state, charm_state)
if message:
charm_message = charm_message.replace("Incomplete relations: ",
@@ -1241,7 +1253,7 @@ def remote_restart(rel_name, remote_service=None):
def check_actually_paused(services=None, ports=None):
- """Check that services listed in the services object and and ports
+ """Check that services listed in the services object and ports
are actually closed (not listened to), to verify that the unit is
properly paused.
diff --git a/hooks/charmhelpers/contrib/python/__init__.py b/hooks/charmhelpers/contrib/python/__init__.py
deleted file mode 100644
index d7567b8..0000000
--- a/hooks/charmhelpers/contrib/python/__init__.py
+++ /dev/null
@@ -1,13 +0,0 @@
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
diff --git a/hooks/charmhelpers/contrib/python/packages.py b/hooks/charmhelpers/contrib/python/packages.py
deleted file mode 100644
index 6e95028..0000000
--- a/hooks/charmhelpers/contrib/python/packages.py
+++ /dev/null
@@ -1,154 +0,0 @@
-#!/usr/bin/env python
-# coding: utf-8
-
-# Copyright 2014-2015 Canonical Limited.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-import os
-import six
-import subprocess
-import sys
-
-from charmhelpers.fetch import apt_install, apt_update
-from charmhelpers.core.hookenv import charm_dir, log
-
-__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
-
-
-def pip_execute(*args, **kwargs):
- """Overriden pip_execute() to stop sys.path being changed.
-
- The act of importing main from the pip module seems to cause add wheels
- from the /usr/share/python-wheels which are installed by various tools.
- This function ensures that sys.path remains the same after the call is
- executed.
- """
- try:
- _path = sys.path
- try:
- from pip import main as _pip_execute
- except ImportError:
- apt_update()
- if six.PY2:
- apt_install('python-pip')
- else:
- apt_install('python3-pip')
- from pip import main as _pip_execute
- _pip_execute(*args, **kwargs)
- finally:
- sys.path = _path
-
-
-def parse_options(given, available):
- """Given a set of options, check if available"""
- for key, value in sorted(given.items()):
- if not value:
- continue
- if key in available:
- yield "--{0}={1}".format(key, value)
-
-
-def pip_install_requirements(requirements, constraints=None, **options):
- """Install a requirements file.
-
- :param constraints: Path to pip constraints file.
- http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
- """
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- command.append("-r {0}".format(requirements))
- if constraints:
- command.append("-c {0}".format(constraints))
- log("Installing from file: {} with constraints {} "
- "and options: {}".format(requirements, constraints, command))
- else:
- log("Installing from file: {} with options: {}".format(requirements,
- command))
- pip_execute(command)
-
-
-def pip_install(package, fatal=False, upgrade=False, venv=None,
- constraints=None, **options):
- """Install a python package"""
- if venv:
- venv_python = os.path.join(venv, 'bin/pip')
- command = [venv_python, "install"]
- else:
- command = ["install"]
-
- available_options = ('proxy', 'src', 'log', 'index-url', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if upgrade:
- command.append('--upgrade')
-
- if constraints:
- command.extend(['-c', constraints])
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Installing {} package with options: {}".format(package,
- command))
- if venv:
- subprocess.check_call(command)
- else:
- pip_execute(command)
-
-
-def pip_uninstall(package, **options):
- """Uninstall a python package"""
- command = ["uninstall", "-q", "-y"]
-
- available_options = ('proxy', 'log', )
- for option in parse_options(options, available_options):
- command.append(option)
-
- if isinstance(package, list):
- command.extend(package)
- else:
- command.append(package)
-
- log("Uninstalling {} package with options: {}".format(package,
- command))
- pip_execute(command)
-
-
-def pip_list():
- """Returns the list of current python installed packages
- """
- return pip_execute(["list"])
-
-
-def pip_create_virtualenv(path=None):
- """Create an isolated Python environment."""
- if six.PY2:
- apt_install('python-virtualenv')
- else:
- apt_install('python3-virtualenv')
-
- if path:
- venv_path = path
- else:
- venv_path = os.path.join(charm_dir(), 'venv')
-
- if not os.path.exists(venv_path):
- subprocess.check_call(['virtualenv', venv_path])
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 7682820..2c62092 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -59,6 +59,7 @@ from charmhelpers.core.host import (
service_stop,
service_running,
umount,
+ cmp_pkgrevno,
)
from charmhelpers.fetch import (
apt_install,
@@ -178,7 +179,6 @@ class Pool(object):
"""
# read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool)
- version = ceph_version()
if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward']
- if version >= '10.1':
+ if cmp_pkgrevno('ceph-common', '10.1') >= 0:
# Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it')
@@ -196,7 +196,8 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
- def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
+ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
+ device_class=None):
"""Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when
@@ -229,6 +230,9 @@ class Pool(object):
increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools.
+ :param device_class: str. class of storage to use for basis of pgs
+ calculation; ceph supports nvme, ssd and hdd by default based
+ on presence of devices of each type in the deployment.
:return: int. The number of pgs to use.
"""
@@ -243,17 +247,20 @@ class Pool(object):
# If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count
- osd_list = get_osds(self.service)
+ osd_list = get_osds(self.service, device_class)
expected = config('expected-osd-count') or 0
if osd_list:
- osd_count = max(expected, len(osd_list))
+ if device_class:
+ osd_count = len(osd_list)
+ else:
+ osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself.
- if expected and osd_count != expected:
+ if not device_class and expected and osd_count != expected:
log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO)
elif expected:
@@ -575,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
raise
-# max_bytes should be an int or long
-def set_pool_quota(service, pool_name, max_bytes):
+def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
"""
- :param service: six.string_types. The Ceph user name to run the command under
- :param pool_name: six.string_types
- :param max_bytes: int or long
- :return: None. Can raise CalledProcessError
+ :param service: The Ceph user name to run the command under
+ :type service: str
+ :param pool_name: Name of pool
+ :type pool_name: str
+ :param max_bytes: Maximum bytes quota to apply
+ :type max_bytes: int
+ :param max_objects: Maximum objects quota to apply
+ :type max_objects: int
+ :raises: subprocess.CalledProcessError
"""
- # Set a byte quota on a RADOS pool in ceph.
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
- 'max_bytes', str(max_bytes)]
- try:
- check_call(cmd)
- except CalledProcessError:
- raise
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
+ if max_bytes:
+ cmd = cmd + ['max_bytes', str(max_bytes)]
+ if max_objects:
+ cmd = cmd + ['max_objects', str(max_objects)]
+ check_call(cmd)
def remove_pool_quota(service, pool_name):
@@ -626,7 +636,8 @@ def remove_erasure_profile(service, profile_name):
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1,
- locality=None, durability_estimator=None):
+ locality=None, durability_estimator=None,
+ device_class=None):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@@ -640,10 +651,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param coding_chunks: int
:param locality: int
:param durability_estimator: int
+ :param device_class: six.string_types
:return: None. Can raise CalledProcessError
"""
- version = ceph_version()
-
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@@ -654,12 +664,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
+ luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
# failure_domain changed in luminous
- if version and version >= '12.0.0':
+ if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain)
else:
cmd.append('ruleset-failure-domain=' + failure_domain)
+ # device class new in luminous
+ if luminous_or_later and device_class:
+ cmd.append('crush-device-class={}'.format(device_class))
+ else:
+ log('Skipping device class configuration (ceph < 12.0.0)',
+ level=DEBUG)
+
# Add plugin specific information
if locality is not None:
# For local erasure codes
@@ -744,20 +762,26 @@ def pool_exists(service, name):
return name in out.split()
-def get_osds(service):
+def get_osds(service, device_class=None):
"""Return a list of all Ceph Object Storage Daemons currently in the
- cluster.
+ cluster (optionally filtered by storage device class).
+
+ :param device_class: Class of storage device for OSD's
+ :type device_class: str
"""
- version = ceph_version()
- if version and version >= '0.56':
+ luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
+ if luminous_or_later and device_class:
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'crush', 'class',
+ 'ls-osd', device_class,
+ '--format=json'])
+ else:
out = check_output(['ceph', '--id', service,
'osd', 'ls',
'--format=json'])
- if six.PY3:
- out = out.decode('UTF-8')
- return json.loads(out)
-
- return None
+ if six.PY3:
+ out = out.decode('UTF-8')
+ return json.loads(out)
def install():
@@ -811,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails
"""
- if ceph_version() >= '12.0.0':
+ if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
@@ -856,12 +880,22 @@ def _keyring_path(service):
return KEYRING.format(service)
-def create_keyring(service, key):
- """Create a new Ceph keyring containing key."""
+def add_key(service, key):
+ """
+ Add a key to a keyring.
+
+ Creates the keyring if it doesn't already exist.
+
+ Logs and returns if the key is already in the keyring.
+ """
keyring = _keyring_path(service)
if os.path.exists(keyring):
- log('Ceph keyring exists at %s.' % keyring, level=WARNING)
- return
+ with open(keyring, 'r') as ring:
+ if key in ring.read():
+ log('Ceph keyring exists at %s and has not changed.' % keyring,
+ level=DEBUG)
+ return
+ log('Updating existing keyring %s.' % keyring, level=DEBUG)
cmd = ['ceph-authtool', keyring, '--create-keyring',
'--name=client.{}'.format(service), '--add-key={}'.format(key)]
@@ -869,6 +903,11 @@ def create_keyring(service, key):
log('Created new ceph keyring at %s.' % keyring, level=DEBUG)
+def create_keyring(service, key):
+ """Deprecated. Please use the more accurately named 'add_key'"""
+ return add_key(service, key)
+
+
def delete_keyring(service):
"""Delete an existing Ceph keyring."""
keyring = _keyring_path(service)
@@ -905,7 +944,7 @@ def get_ceph_nodes(relation='ceph'):
def configure(service, key, auth, use_syslog):
"""Perform basic configuration of Ceph."""
- create_keyring(service, key)
+ add_key(service, key)
create_key_file(service, key)
hosts = get_ceph_nodes()
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
@@ -1068,7 +1107,7 @@ def ensure_ceph_keyring(service, user=None, group=None,
if not key:
return False
- create_keyring(service=service, key=key)
+ add_key(service=service, key=key)
keyring = _keyring_path(service)
if user and group:
check_call(['chown', '%s.%s' % (user, group), keyring])
@@ -1076,22 +1115,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
return True
-def ceph_version():
- """Retrieve the local version of ceph."""
- if os.path.exists('/usr/bin/ceph'):
- cmd = ['ceph', '-v']
- output = check_output(cmd)
- if six.PY3:
- output = output.decode('UTF-8')
- output = output.split()
- if len(output) > 3:
- return output[2]
- else:
- return None
- else:
- return None
-
-
class CephBrokerRq(object):
"""Ceph broker request.
@@ -1132,14 +1155,47 @@ class CephBrokerRq(object):
'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
- weight=None, group=None, namespace=None):
- """Adds an operation to create a pool.
-
- @param pg_num setting: optional setting. If not provided, this value
- will be calculated by the broker based on how many OSDs are in the
- cluster at the time of creation. Note that, if provided, this value
- will be capped at the current available maximum.
- @param weight: the percentage of data the pool makes up
+ weight=None, group=None, namespace=None,
+ app_name=None, max_bytes=None, max_objects=None):
+ """DEPRECATED: Use ``add_op_create_replicated_pool()`` or
+ ``add_op_create_erasure_pool()`` instead.
+ """
+ return self.add_op_create_replicated_pool(
+ name, replica_count=replica_count, pg_num=pg_num, weight=weight,
+ group=group, namespace=namespace, app_name=app_name,
+ max_bytes=max_bytes, max_objects=max_objects)
+
+ def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
+ weight=None, group=None, namespace=None,
+ app_name=None, max_bytes=None,
+ max_objects=None):
+ """Adds an operation to create a replicated pool.
+
+ :param name: Name of pool to create
+ :type name: str
+ :param replica_count: Number of copies Ceph should keep of your data.
+ :type replica_count: int
+ :param pg_num: Request specific number of Placement Groups to create
+ for pool.
+ :type pg_num: int
+ :param weight: The percentage of data that is expected to be contained
+ in the pool from the total available space on the OSDs.
+ Used to calculate number of Placement Groups to create
+ for pool.
+ :type weight: float
+ :param group: Group to add pool to
+ :type group: str
+ :param namespace: Group namespace
+ :type namespace: str
+ :param app_name: (Optional) Tag pool with application name. Note that
+ there is certain protocols emerging upstream with
+ regard to meaningful application names to use.
+ Examples are ``rbd`` and ``rgw``.
+ :type app_name: str
+ :param max_bytes: Maximum bytes quota to apply
+ :type max_bytes: int
+ :param max_objects: Maximum objects quota to apply
+ :type max_objects: int
"""
if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive')
@@ -1147,7 +1203,41 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group,
- 'group-namespace': namespace})
+ 'group-namespace': namespace, 'app-name': app_name,
+ 'max-bytes': max_bytes, 'max-objects': max_objects})
+
+ def add_op_create_erasure_pool(self, name, erasure_profile=None,
+ weight=None, group=None, app_name=None,
+ max_bytes=None, max_objects=None):
+ """Adds an operation to create a erasure coded pool.
+
+ :param name: Name of pool to create
+ :type name: str
+ :param erasure_profile: Name of erasure code profile to use. If not
+ set the ceph-mon unit handling the broker
+ request will set its default value.
+ :type erasure_profile: str
+ :param weight: The percentage of data that is expected to be contained
+ in the pool from the total available space on the OSDs.
+ :type weight: float
+ :param group: Group to add pool to
+ :type group: str
+ :param app_name: (Optional) Tag pool with application name. Note that
+ there is certain protocols emerging upstream with
+ regard to meaningful application names to use.
+ Examples are ``rbd`` and ``rgw``.
+ :type app_name: str
+ :param max_bytes: Maximum bytes quota to apply
+ :type max_bytes: int
+ :param max_objects: Maximum objects quota to apply
+ :type max_objects: int
+ """
+ self.ops.append({'op': 'create-pool', 'name': name,
+ 'pool-type': 'erasure',
+ 'erasure-profile': erasure_profile,
+ 'weight': weight,
+ 'group': group, 'app-name': app_name,
+ 'max-bytes': max_bytes, 'max-objects': max_objects})
def set_ops(self, ops):
"""Set request ops to provided value.
diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py
index 6f846b0..c57aaf3 100644
--- a/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ b/hooks/charmhelpers/contrib/storage/linux/utils.py
@@ -17,12 +17,53 @@ import re
from stat import S_ISBLK
from subprocess import (
+ CalledProcessError,
check_call,
check_output,
call
)
+def _luks_uuid(dev):
+ """
+ Check to see if dev is a LUKS encrypted volume, returning the UUID
+ of volume if it is.
+
+ :param: dev: path to block device to check.
+ :returns: str. UUID of LUKS device or None if not a LUKS device
+ """
+ try:
+ cmd = ['cryptsetup', 'luksUUID', dev]
+ return check_output(cmd).decode('UTF-8').strip()
+ except CalledProcessError:
+ return None
+
+
+def is_luks_device(dev):
+ """
+ Determine if dev is a LUKS-formatted block device.
+
+ :param: dev: A full path to a block device to check for LUKS header
+ presence
+ :returns: boolean: indicates whether a device is used based on LUKS header.
+ """
+ return True if _luks_uuid(dev) else False
+
+
+def is_mapped_luks_device(dev):
+ """
+ Determine if dev is a mapped LUKS device
+ :param: dev: A full path to a block device to be checked
+ :returns: boolean: indicates whether a device is mapped
+ """
+ _, dirs, _ = next(os.walk(
+ '/sys/class/block/{}/holders/'
+ .format(os.path.basename(os.path.realpath(dev))))
+ )
+ is_held = len(dirs) > 0
+ return is_held and is_luks_device(dev)
+
+
def is_block_device(path):
'''
Confirm device at path is a valid block device node.
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 2e28765..4744eb4 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -50,6 +50,11 @@ TRACE = "TRACE"
MARKER = object()
SH_MAX_ARG = 131071
+
+RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
+ 'This may not be compatible with software you are '
+ 'running in your shell.')
+
cache = {}
@@ -1414,3 +1419,72 @@ def unit_doomed(unit=None):
# I don't think 'dead' units ever show up in the goal-state, but
# check anyway in addition to 'dying'.
return units[unit]['status'] in ('dying', 'dead')
+
+
+def env_proxy_settings(selected_settings=None):
+ """Get proxy settings from process environment variables.
+
+ Get charm proxy settings from environment variables that correspond to
+ juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
+ see lp:1782236) in a format suitable for passing to an application that
+ reacts to proxy settings passed as environment variables. Some applications
+ support lowercase or uppercase notation (e.g. curl), some support only
+ lowercase (e.g. wget), there are also subjectively rare cases of only
+ uppercase notation support. no_proxy CIDR and wildcard support also varies
+ between runtimes and applications as there is no enforced standard.
+
+ Some applications may connect to multiple destinations and expose config
+ options that would affect only proxy settings for a specific destination
+ these should be handled in charms in an application-specific manner.
+
+ :param selected_settings: format only a subset of possible settings
+ :type selected_settings: list
+ :rtype: Option(None, dict[str, str])
+ """
+ SUPPORTED_SETTINGS = {
+ 'http': 'HTTP_PROXY',
+ 'https': 'HTTPS_PROXY',
+ 'no_proxy': 'NO_PROXY',
+ 'ftp': 'FTP_PROXY'
+ }
+ if selected_settings is None:
+ selected_settings = SUPPORTED_SETTINGS
+
+ selected_vars = [v for k, v in SUPPORTED_SETTINGS.items()
+ if k in selected_settings]
+ proxy_settings = {}
+ for var in selected_vars:
+ var_val = os.getenv(var)
+ if var_val:
+ proxy_settings[var] = var_val
+ proxy_settings[var.lower()] = var_val
+ # Now handle juju-prefixed environment variables. The legacy vs new
+ # environment variable usage is mutually exclusive
+ charm_var_val = os.getenv('JUJU_CHARM_{}'.format(var))
+ if charm_var_val:
+ proxy_settings[var] = charm_var_val
+ proxy_settings[var.lower()] = charm_var_val
+ if 'no_proxy' in proxy_settings:
+ if _contains_range(proxy_settings['no_proxy']):
+ log(RANGE_WARNING, level=WARNING)
+ return proxy_settings if proxy_settings else None
+
+
+def _contains_range(addresses):
+ """Check for cidr or wildcard domain in a string.
+
+ Given a string comprising a comma seperated list of ip addresses
+ and domain names, determine whether the string contains IP ranges
+ or wildcard domains.
+
+ :param addresses: comma seperated list of domains and ip addresses.
+ :type addresses: str
+ """
+ return (
+ # Test for cidr (e.g. 10.20.20.0/24)
+ "/" in addresses or
+ # Test for wildcard domains (*.foo.com or .foo.com)
+ "*" in addresses or
+ addresses.startswith(".") or
+ ",." in addresses or
+ " ." in addresses)
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 79953a4..32754ff 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -46,6 +46,8 @@ if __platform__ == "ubuntu":
lsb_release,
cmp_pkgrevno,
CompareHostReleases,
+ get_distrib_codename,
+ arch
) # flake8: noqa -- ignore F401 for this import
elif __platform__ == "centos":
from charmhelpers.core.host_factory.centos import ( # NOQA:F401
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index a6d375a..a3162fa 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -1,5 +1,6 @@
import subprocess
+from charmhelpers.core.hookenv import cached
from charmhelpers.core.strutils import BasicStringComparator
@@ -72,6 +73,14 @@ def lsb_release():
return d
+def get_distrib_codename():
+ """Return the codename of the distribution
+ :returns: The codename
+ :rtype: str
+ """
+ return lsb_release()['DISTRIB_CODENAME'].lower()
+
+
def cmp_pkgrevno(package, revno, pkgcache=None):
"""Compare supplied revno with the revno of the installed package.
@@ -89,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
pkgcache = apt_cache()
pkg = pkgcache[package]
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
+
+
+@cached
+def arch():
+ """Return the package architecture as a string.
+
+ :returns: the architecture
+ :rtype: str
+ :raises: subprocess.CalledProcessError if dpkg command fails
+ """
+ return subprocess.check_output(
+ ['dpkg', '--print-architecture']
+ ).rstrip().decode('UTF-8')
diff --git a/hooks/charmhelpers/core/sysctl.py b/hooks/charmhelpers/core/sysctl.py
index 1f188d8..f1f4a28 100644
--- a/hooks/charmhelpers/core/sysctl.py
+++ b/hooks/charmhelpers/core/sysctl.py
@@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
-def create(sysctl_dict, sysctl_file):
+def create(sysctl_dict, sysctl_file, ignore=False):
"""Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a dict or YAML-formatted string of sysctl
@@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file):
:type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode
+ :param ignore: If True, ignore "unknown variable" errors.
+ :type ignore: bool
:returns: None
"""
if type(sysctl_dict) is not dict:
@@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file):
for key, value in sysctl_dict_parsed.items():
fd.write("{}={}\n".format(key, value))
- log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
+ log("Updating sysctl_file: {} values: {}".format(sysctl_file,
+ sysctl_dict_parsed),
level=DEBUG)
- check_call(["sysctl", "-p", sysctl_file])
+ call = ["sysctl", "-p", sysctl_file]
+ if ignore:
+ call.append("-e")
+
+ check_call(call)
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 8a5cadf..c6d9341 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -19,15 +19,14 @@ import re
import six
import time
import subprocess
-from tempfile import NamedTemporaryFile
-from charmhelpers.core.host import (
- lsb_release
-)
+from charmhelpers.core.host import get_distrib_codename
+
from charmhelpers.core.hookenv import (
log,
DEBUG,
WARNING,
+ env_proxy_settings,
)
from charmhelpers.fetch import SourceConfigError, GPGKeyError
@@ -303,12 +302,17 @@ def import_key(key):
"""Import an ASCII Armor key.
A Radix64 format keyid is also supported for backwards
- compatibility, but should never be used; the key retrieval
- mechanism is insecure and subject to man-in-the-middle attacks
- voiding all signature checks using that key.
-
- :param keyid: The key in ASCII armor format,
- including BEGIN and END markers.
+ compatibility. In this case Ubuntu keyserver will be
+ queried for a key via HTTPS by its keyid. This method
+ is less preferrable because https proxy servers may
+ require traffic decryption which is equivalent to a
+ man-in-the-middle attack (a proxy server impersonates
+ keyserver TLS certificates and has to be explicitly
+ trusted by the system).
+
+ :param key: A GPG key in ASCII armor format,
+ including BEGIN and END markers or a keyid.
+ :type key: (bytes, str)
:raises: GPGKeyError if the key could not be imported
"""
key = key.strip()
@@ -319,35 +323,131 @@ def import_key(key):
log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
if ('-----BEGIN PGP PUBLIC KEY BLOCK-----' in key and
'-----END PGP PUBLIC KEY BLOCK-----' in key):
- log("Importing ASCII Armor PGP key", level=DEBUG)
- with NamedTemporaryFile() as keyfile:
- with open(keyfile.name, 'w') as fd:
- fd.write(key)
- fd.write("\n")
- cmd = ['apt-key', 'add', keyfile.name]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- error = "Error importing PGP key '{}'".format(key)
- log(error)
- raise GPGKeyError(error)
+ log("Writing provided PGP key in the binary format", level=DEBUG)
+ if six.PY3:
+ key_bytes = key.encode('utf-8')
+ else:
+ key_bytes = key
+ key_name = _get_keyid_by_gpg_key(key_bytes)
+ key_gpg = _dearmor_gpg_key(key_bytes)
+ _write_apt_gpg_keyfile(key_name=key_name, key_material=key_gpg)
else:
raise GPGKeyError("ASCII armor markers missing from GPG key")
else:
- # We should only send things obviously not a keyid offsite
- # via this unsecured protocol, as it may be a secret or part
- # of one.
log("PGP key found (looks like Radix64 format)", level=WARNING)
- log("INSECURLY importing PGP key from keyserver; "
+ log("SECURELY importing PGP key from keyserver; "
"full key not provided.", level=WARNING)
- cmd = ['apt-key', 'adv', '--keyserver',
- 'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
- try:
- _run_with_retries(cmd)
- except subprocess.CalledProcessError:
- error = "Error importing PGP key '{}'".format(key)
- log(error)
- raise GPGKeyError(error)
+ # as of bionic add-apt-repository uses curl with an HTTPS keyserver URL
+ # to retrieve GPG keys. `apt-key adv` command is deprecated as is
+ # apt-key in general as noted in its manpage. See lp:1433761 for more
+ # history. Instead, /etc/apt/trusted.gpg.d is used directly to drop
+ # gpg
+ key_asc = _get_key_by_keyid(key)
+ # write the key in GPG format so that apt-key list shows it
+ key_gpg = _dearmor_gpg_key(key_asc)
+ _write_apt_gpg_keyfile(key_name=key, key_material=key_gpg)
+
+
+def _get_keyid_by_gpg_key(key_material):
+ """Get a GPG key fingerprint by GPG key material.
+ Gets a GPG key fingerprint (40-digit, 160-bit) by the ASCII armor-encoded
+ or binary GPG key material. Can be used, for example, to generate file
+ names for keys passed via charm options.
+
+ :param key_material: ASCII armor-encoded or binary GPG key material
+ :type key_material: bytes
+ :raises: GPGKeyError if invalid key material has been provided
+ :returns: A GPG key fingerprint
+ :rtype: str
+ """
+ # Use the same gpg command for both Xenial and Bionic
+ cmd = 'gpg --with-colons --with-fingerprint'
+ ps = subprocess.Popen(cmd.split(),
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = ps.communicate(input=key_material)
+ if six.PY3:
+ out = out.decode('utf-8')
+ err = err.decode('utf-8')
+ if 'gpg: no valid OpenPGP data found.' in err:
+ raise GPGKeyError('Invalid GPG key material provided')
+ # from gnupg2 docs: fpr :: Fingerprint (fingerprint is in field 10)
+ return re.search(r"^fpr:{9}([0-9A-F]{40}):$", out, re.MULTILINE).group(1)
+
+
+def _get_key_by_keyid(keyid):
+ """Get a key via HTTPS from the Ubuntu keyserver.
+ Different key ID formats are supported by SKS keyservers (the longer ones
+ are more secure, see "dead beef attack" and https://evil32.com/). Since
+ HTTPS is used, if SSLBump-like HTTPS proxies are in place, they will
+ impersonate keyserver.ubuntu.com and generate a certificate with
+ keyserver.ubuntu.com in the CN field or in SubjAltName fields of a
+ certificate. If such proxy behavior is expected it is necessary to add the
+ CA certificate chain containing the intermediate CA of the SSLBump proxy to
+ every machine that this code runs on via ca-certs cloud-init directive (via
+ cloudinit-userdata model-config) or via other means (such as through a
+ custom charm option). Also note that DNS resolution for the hostname in a
+ URL is done at a proxy server - not at the client side.
+
+ 8-digit (32 bit) key ID
+ https://keyserver.ubuntu.com/pks/lookup?search=0x4652B4E6
+ 16-digit (64 bit) key ID
+ https://keyserver.ubuntu.com/pks/lookup?search=0x6E85A86E4652B4E6
+ 40-digit key ID:
+ https://keyserver.ubuntu.com/pks/lookup?search=0x35F77D63B5CEC106C577ED856E85A86E4652B4E6
+
+ :param keyid: An 8, 16 or 40 hex digit keyid to find a key for
+ :type keyid: (bytes, str)
+ :returns: A key material for the specified GPG key id
+ :rtype: (str, bytes)
+ :raises: subprocess.CalledProcessError
+ """
+ # options=mr - machine-readable output (disables html wrappers)
+ keyserver_url = ('https://keyserver.ubuntu.com'
+ '/pks/lookup?op=get&options=mr&exact=on&search=0x{}')
+ curl_cmd = ['curl', keyserver_url.format(keyid)]
+ # use proxy server settings in order to retrieve the key
+ return subprocess.check_output(curl_cmd,
+ env=env_proxy_settings(['https']))
+
+
+def _dearmor_gpg_key(key_asc):
+ """Converts a GPG key in the ASCII armor format to the binary format.
+
+ :param key_asc: A GPG key in ASCII armor format.
+ :type key_asc: (str, bytes)
+ :returns: A GPG key in binary format
+ :rtype: (str, bytes)
+ :raises: GPGKeyError
+ """
+ ps = subprocess.Popen(['gpg', '--dearmor'],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ stdin=subprocess.PIPE)
+ out, err = ps.communicate(input=key_asc)
+ # no need to decode output as it is binary (invalid utf-8), only error
+ if six.PY3:
+ err = err.decode('utf-8')
+ if 'gpg: no valid OpenPGP data found.' in err:
+ raise GPGKeyError('Invalid GPG key material. Check your network setup'
+ ' (MTU, routing, DNS) and/or proxy server settings'
+ ' as well as destination keyserver status.')
+ else:
+ return out
+
+
+def _write_apt_gpg_keyfile(key_name, key_material):
+ """Writes GPG key material into a file at a provided path.
+
+ :param key_name: A key name to use for a key file (could be a fingerprint)
+ :type key_name: str
+ :param key_material: A GPG key material (binary)
+ :type key_material: (str, bytes)
+ """
+ with open('/etc/apt/trusted.gpg.d/{}.gpg'.format(key_name),
+ 'wb') as keyf:
+ keyf.write(key_material)
def add_source(source, key=None, fail_invalid=False):
@@ -442,13 +542,13 @@ def add_source(source, key=None, fail_invalid=False):
def _add_proposed():
"""Add the PROPOSED_POCKET as /etc/apt/source.list.d/proposed.list
- Uses lsb_release()['DISTRIB_CODENAME'] to determine the correct staza for
+ Uses get_distrib_codename to determine the correct stanza for
the deb line.
For intel architecutres PROPOSED_POCKET is used for the release, but for
other architectures PROPOSED_PORTS_POCKET is used for the release.
"""
- release = lsb_release()['DISTRIB_CODENAME']
+ release = get_distrib_codename()
arch = platform.machine()
if arch not in six.iterkeys(ARCH_TO_PROPOSED_POCKET):
raise SourceConfigError("Arch {} not supported for (distro-)proposed"
@@ -461,11 +561,16 @@ def _add_apt_repository(spec):
"""Add the spec using add_apt_repository
:param spec: the parameter to pass to add_apt_repository
+ :type spec: str
"""
if '{series}' in spec:
- series = lsb_release()['DISTRIB_CODENAME']
+ series = get_distrib_codename()
spec = spec.replace('{series}', series)
- _run_with_retries(['add-apt-repository', '--yes', spec])
+ # software-properties package for bionic properly reacts to proxy settings
+ # passed as environment variables (See lp:1433761). This is not the case
+ # LTS and non-LTS releases below bionic.
+ _run_with_retries(['add-apt-repository', '--yes', spec],
+ cmd_env=env_proxy_settings(['https']))
def _add_cloud_pocket(pocket):
@@ -534,7 +639,7 @@ def _verify_is_ubuntu_rel(release, os_release):
:raises: SourceConfigError if the release is not the same as the ubuntu
release.
"""
- ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
+ ubuntu_rel = get_distrib_codename()
if release != ubuntu_rel:
raise SourceConfigError(
'Invalid Cloud Archive release specified: {}-{} on this Ubuntu'

This mirror site include all the OpenStack related repositories under: openstack, openstack-dev and openstack-infra.

NOTE: All repositories are updated every one hour.

Usage

For Git Clone
 git clone http://git.trystack.cn/openstack/nova.git 
For DevStack

Add GIT_BASE, NOVNC_REPO and SPICE_REPO variables to local.conf file.

[[local|localrc]]

# use TryStack git mirror
GIT_BASE=http://git.trystack.cn
NOVNC_REPO=http://git.trystack.cn/kanaka/noVNC.git
SPICE_REPO=http://git.trystack.cn/git/spice/spice-html5.git