summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRyan Beisner <ryan.beisner@canonical.com>2018-01-19 12:09:15 +0000
committerRyan Beisner <ryan.beisner@canonical.com>2018-01-19 12:09:20 +0000
commit8257e3dd3066f498d9481ac9b4ff3d4b24b808e0 (patch)
tree7722c3b16d33da9c80ae50dcda5e60087b8aff63
parentc048ce6240494f36710018ee5058285d11df6a24 (diff)
downloadcharm-nova-lxd-8257e3dd3066f498d9481ac9b4ff3d4b24b808e0.zip
charm-nova-lxd-8257e3dd3066f498d9481ac9b4ff3d4b24b808e0.tar.gz
charm-nova-lxd-8257e3dd3066f498d9481ac9b4ff3d4b24b808e0.tar.bz2
Sync charm-helpers
Notable issues resolved: openstack_upgrade_available() broken for swift https://bugs.launchpad.net/charm-swift-proxy/+bug/1743847 haproxy context doesn't consider bindings https://bugs.launchpad.net/charm-helpers/+bug/1735421 regression in haproxy check https://bugs.launchpad.net/charm-helpers/+bug/1743287 Change-Id: Ieb48079184f507e4513dde20e32b3c7051de7c04
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py12
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py9
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py115
-rwxr-xr-xhooks/charmhelpers/contrib/openstack/files/check_haproxy.sh2
-rwxr-xr-xhooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh2
-rw-r--r--hooks/charmhelpers/contrib/openstack/ha/utils.py175
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg8
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf15
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py500
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py50
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/lvm.py50
-rw-r--r--hooks/charmhelpers/core/hookenv.py2
-rw-r--r--hooks/charmhelpers/core/host.py2
-rw-r--r--hooks/charmhelpers/core/host_factory/ubuntu.py1
-rw-r--r--hooks/charmhelpers/core/unitdata.py2
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/deployment.py12
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py9
-rw-r--r--tests/charmhelpers/core/hookenv.py2
-rw-r--r--tests/charmhelpers/core/host.py2
-rw-r--r--tests/charmhelpers/core/host_factory/ubuntu.py1
-rw-r--r--tests/charmhelpers/core/unitdata.py2
21 files changed, 415 insertions, 558 deletions
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index e37f283..5afbbd8 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+import os
import re
import sys
import six
@@ -185,7 +186,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
+ include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
@@ -215,7 +216,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
- self.log.info('Waiting for extended status on units...')
+ if not timeout:
+ timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
+ self.log.info('Waiting for extended status on units for {}s...'
+ ''.format(timeout))
all_services = self.d.services.keys()
@@ -252,9 +256,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
service_messages = {service: message for service in services}
# Check for idleness
- self.d.sentry.wait()
+ self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
- self.d.sentry.wait_for_status(self.d.juju_env, services)
+ self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index b71b2b1..87f364d 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
+ for pool in df['pools']:
+ if pool['id'] == pool_id:
+ pool_name = pool['name']
+ obj_count = pool['stats']['objects']
+ kb_used = pool['stats']['kb_used']
+
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index e6c0e9f..7ada276 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -93,14 +93,14 @@ from charmhelpers.contrib.network.ip import (
format_ipv6_addr,
is_bridge_member,
is_ipv6_disabled,
+ get_relation_ip,
)
from charmhelpers.contrib.openstack.utils import (
config_flags_parser,
- get_host_ip,
- git_determine_usr_bin,
- git_determine_python_path,
enable_memcache,
snap_install_requested,
+ CompareOpenStackReleases,
+ os_release,
)
from charmhelpers.core.unitdata import kv
@@ -332,10 +332,7 @@ class IdentityServiceContext(OSContextGenerator):
self.rel_name = rel_name
self.interfaces = [self.rel_name]
- def __call__(self):
- log('Generating template context for ' + self.rel_name, level=DEBUG)
- ctxt = {}
-
+ def _setup_pki_cache(self):
if self.service and self.service_user:
# This is required for pki token signing if we don't want /tmp to
# be used.
@@ -345,6 +342,15 @@ class IdentityServiceContext(OSContextGenerator):
mkdir(path=cachedir, owner=self.service_user,
group=self.service_user, perms=0o700)
+ return cachedir
+ return None
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ cachedir = self._setup_pki_cache()
+ if cachedir:
ctxt['signing_dir'] = cachedir
for rid in relation_ids(self.rel_name):
@@ -383,6 +389,62 @@ class IdentityServiceContext(OSContextGenerator):
return {}
+class IdentityCredentialsContext(IdentityServiceContext):
+ '''Context for identity-credentials interface type'''
+
+ def __init__(self,
+ service=None,
+ service_user=None,
+ rel_name='identity-credentials'):
+ super(IdentityCredentialsContext, self).__init__(service,
+ service_user,
+ rel_name)
+
+ def __call__(self):
+ log('Generating template context for ' + self.rel_name, level=DEBUG)
+ ctxt = {}
+
+ cachedir = self._setup_pki_cache()
+ if cachedir:
+ ctxt['signing_dir'] = cachedir
+
+ for rid in relation_ids(self.rel_name):
+ self.related = True
+ for unit in related_units(rid):
+ rdata = relation_get(rid=rid, unit=unit)
+ credentials_host = rdata.get('credentials_host')
+ credentials_host = (
+ format_ipv6_addr(credentials_host) or credentials_host
+ )
+ auth_host = rdata.get('auth_host')
+ auth_host = format_ipv6_addr(auth_host) or auth_host
+ svc_protocol = rdata.get('credentials_protocol') or 'http'
+ auth_protocol = rdata.get('auth_protocol') or 'http'
+ api_version = rdata.get('api_version') or '2.0'
+ ctxt.update({
+ 'service_port': rdata.get('credentials_port'),
+ 'service_host': credentials_host,
+ 'auth_host': auth_host,
+ 'auth_port': rdata.get('auth_port'),
+ 'admin_tenant_name': rdata.get('credentials_project'),
+ 'admin_tenant_id': rdata.get('credentials_project_id'),
+ 'admin_user': rdata.get('credentials_username'),
+ 'admin_password': rdata.get('credentials_password'),
+ 'service_protocol': svc_protocol,
+ 'auth_protocol': auth_protocol,
+ 'api_version': api_version
+ })
+
+ if float(api_version) > 2:
+ ctxt.update({'admin_domain_name':
+ rdata.get('domain')})
+
+ if self.context_complete(ctxt):
+ return ctxt
+
+ return {}
+
+
class AMQPContext(OSContextGenerator):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
@@ -564,11 +626,6 @@ class HAProxyContext(OSContextGenerator):
if not relation_ids('cluster') and not self.singlenode_mode:
return {}
- if config('prefer-ipv6'):
- addr = get_ipv6_addr(exc_list=[config('vip')])[0]
- else:
- addr = get_host_ip(unit_get('private-address'))
-
l_unit = local_unit().replace('/', '-')
cluster_hosts = {}
@@ -576,7 +633,15 @@ class HAProxyContext(OSContextGenerator):
# and associated backends
for addr_type in ADDRESS_TYPES:
cfg_opt = 'os-{}-network'.format(addr_type)
- laddr = get_address_in_network(config(cfg_opt))
+ # NOTE(thedac) For some reason the ADDRESS_MAP uses 'int' rather
+ # than 'internal'
+ if addr_type == 'internal':
+ _addr_map_type = INTERNAL
+ else:
+ _addr_map_type = addr_type
+ # Network spaces aware
+ laddr = get_relation_ip(ADDRESS_MAP[_addr_map_type]['binding'],
+ config(cfg_opt))
if laddr:
netmask = get_netmask_for_address(laddr)
cluster_hosts[laddr] = {
@@ -587,15 +652,19 @@ class HAProxyContext(OSContextGenerator):
}
for rid in relation_ids('cluster'):
for unit in sorted(related_units(rid)):
+ # API Charms will need to set {addr_type}-address with
+ # get_relation_ip(addr_type)
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
_unit = unit.replace('/', '-')
cluster_hosts[laddr]['backends'][_unit] = _laddr
- # NOTE(jamespage) add backend based on private address - this
- # with either be the only backend or the fallback if no acls
+ # NOTE(jamespage) add backend based on get_relation_ip - this
+ # will either be the only backend or the fallback if no acls
# match in the frontend
+ # Network spaces aware
+ addr = get_relation_ip('cluster')
cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr)
cluster_hosts[addr] = {
@@ -605,6 +674,8 @@ class HAProxyContext(OSContextGenerator):
}
for rid in relation_ids('cluster'):
for unit in sorted(related_units(rid)):
+ # API Charms will need to set their private-address with
+ # get_relation_ip('cluster')
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
@@ -1321,8 +1392,6 @@ class WSGIWorkerConfigContext(WorkerConfigContext):
"public_processes": int(math.ceil(self.public_process_weight *
total_processes)),
"threads": 1,
- "usr_bin": git_determine_usr_bin(),
- "python_path": git_determine_python_path(),
}
return ctxt
@@ -1566,8 +1635,18 @@ class InternalEndpointContext(OSContextGenerator):
endpoints by default so this allows admins to optionally use internal
endpoints.
"""
+ def __init__(self, ost_rel_check_pkg_name):
+ self.ost_rel_check_pkg_name = ost_rel_check_pkg_name
+
def __call__(self):
- return {'use_internal_endpoints': config('use-internal-endpoints')}
+ ctxt = {'use_internal_endpoints': config('use-internal-endpoints')}
+ rel = os_release(self.ost_rel_check_pkg_name, base='icehouse')
+ if CompareOpenStackReleases(rel) >= 'pike':
+ ctxt['volume_api_version'] = '3'
+ else:
+ ctxt['volume_api_version'] = '2'
+
+ return ctxt
class AppArmorContext(OSContextGenerator):
diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
index 7aab129..1df55db 100755
--- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -9,7 +9,7 @@
CRITICAL=0
NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
index 3ebb532..91ce024 100755
--- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
+++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy_queue_depth.sh
@@ -10,7 +10,7 @@
CURRQthrsh=0
MAXQthrsh=100
-AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $3}')
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py
index 9a4d79c..6060ae5 100644
--- a/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py
@@ -23,6 +23,8 @@
Helpers for high availability.
"""
+import json
+
import re
from charmhelpers.core.hookenv import (
@@ -32,6 +34,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
+ WARNING,
)
from charmhelpers.core.host import (
@@ -40,6 +43,23 @@ from charmhelpers.core.host import (
from charmhelpers.contrib.openstack.ip import (
resolve_address,
+ is_ipv6,
+)
+
+from charmhelpers.contrib.network.ip import (
+ get_iface_for_address,
+ get_netmask_for_address,
+)
+
+from charmhelpers.contrib.hahelpers.cluster import (
+ get_hacluster_config
+)
+
+JSON_ENCODE_OPTIONS = dict(
+ sort_keys=True,
+ allow_nan=False,
+ indent=None,
+ separators=(',', ':'),
)
@@ -53,8 +73,8 @@ class DNSHAException(Exception):
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
- """ Check for os-*-hostname settings and update resource dictionaries for
- the HA relation.
+ """ Configure DNS-HA resources based on provided configuration and
+ update resource dictionaries for the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@@ -64,7 +84,85 @@ def update_dns_ha_resource_params(resources, resource_params,
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
+ _relation_data = {'resources': {}, 'resource_params': {}}
+ update_hacluster_dns_ha(charm_name(),
+ _relation_data,
+ crm_ocf)
+ resources.update(_relation_data['resources'])
+ resource_params.update(_relation_data['resource_params'])
+ relation_set(relation_id=relation_id, groups=_relation_data['groups'])
+
+
+def assert_charm_supports_dns_ha():
+ """Validate prerequisites for DNS HA
+ The MAAS client is only available on Xenial or greater
+
+ :raises DNSHAException: if release is < 16.04
+ """
+ if lsb_release().get('DISTRIB_RELEASE') < '16.04':
+ msg = ('DNS HA is only supported on 16.04 and greater '
+ 'versions of Ubuntu.')
+ status_set('blocked', msg)
+ raise DNSHAException(msg)
+ return True
+
+
+def expect_ha():
+ """ Determine if the unit expects to be in HA
+
+ Check for VIP or dns-ha settings which indicate the unit should expect to
+ be related to hacluster.
+
+ @returns boolean
+ """
+ return config('vip') or config('dns-ha')
+
+
+def generate_ha_relation_data(service):
+ """ Generate relation data for ha relation
+
+ Based on configuration options and unit interfaces, generate a json
+ encoded dict of relation data items for the hacluster relation,
+ providing configuration for DNS HA or VIP's + haproxy clone sets.
+
+ @returns dict: json encoded data for use with relation_set
+ """
+ _haproxy_res = 'res_{}_haproxy'.format(service)
+ _relation_data = {
+ 'resources': {
+ _haproxy_res: 'lsb:haproxy',
+ },
+ 'resource_params': {
+ _haproxy_res: 'op monitor interval="5s"'
+ },
+ 'init_services': {
+ _haproxy_res: 'haproxy'
+ },
+ 'clones': {
+ 'cl_{}_haproxy'.format(service): _haproxy_res
+ },
+ }
+
+ if config('dns-ha'):
+ update_hacluster_dns_ha(service, _relation_data)
+ else:
+ update_hacluster_vip(service, _relation_data)
+
+ return {
+ 'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
+ for k, v in _relation_data.items() if v
+ }
+
+def update_hacluster_dns_ha(service, relation_data,
+ crm_ocf='ocf:maas:dns'):
+ """ Configure DNS-HA resources based on provided configuration
+
+ @param service: Name of the service being configured
+ @param relation_data: Pointer to dictionary of relation data.
+ @param crm_ocf: Corosync Open Cluster Framework resource agent to use for
+ DNS HA
+ """
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
@@ -93,7 +191,7 @@ def update_dns_ha_resource_params(resources, resource_params,
status_set('blocked', msg)
raise DNSHAException(msg)
- hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
+ hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
@@ -101,42 +199,67 @@ def update_dns_ha_resource_params(resources, resource_params,
continue
hostname_group.append(hostname_key)
- resources[hostname_key] = crm_ocf
- resource_params[hostname_key] = (
- 'params fqdn="{}" ip_address="{}" '
- ''.format(hostname, resolve_address(endpoint_type=endpoint_type,
- override=False)))
+ relation_data['resources'][hostname_key] = crm_ocf
+ relation_data['resource_params'][hostname_key] = (
+ 'params fqdn="{}" ip_address="{}"'
+ .format(hostname, resolve_address(endpoint_type=endpoint_type,
+ override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
- relation_set(relation_id=relation_id, groups={
- 'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
+ relation_data['groups'] = {
+ 'grp_{}_hostnames'.format(service): ' '.join(hostname_group)
+ }
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)
-def assert_charm_supports_dns_ha():
- """Validate prerequisites for DNS HA
- The MAAS client is only available on Xenial or greater
+def update_hacluster_vip(service, relation_data):
+ """ Configure VIP resources based on provided configuration
+
+ @param service: Name of the service being configured
+ @param relation_data: Pointer to dictionary of relation data.
"""
- if lsb_release().get('DISTRIB_RELEASE') < '16.04':
- msg = ('DNS HA is only supported on 16.04 and greater '
- 'versions of Ubuntu.')
- status_set('blocked', msg)
- raise DNSHAException(msg)
- return True
+ cluster_config = get_hacluster_config()
+ vip_group = []
+ for vip in cluster_config['vip'].split():
+ if is_ipv6(vip):
+ res_neutron_vip = 'ocf:heartbeat:IPv6addr'
+ vip_params = 'ipv6addr'
+ else:
+ res_neutron_vip = 'ocf:heartbeat:IPaddr2'
+ vip_params = 'ip'
+ iface = (get_iface_for_address(vip) or
+ config('vip_iface'))
+ netmask = (get_netmask_for_address(vip) or
+ config('vip_cidr'))
-def expect_ha():
- """ Determine if the unit expects to be in HA
+ if iface is not None:
+ vip_key = 'res_{}_{}_vip'.format(service, iface)
+ if vip_key in vip_group:
+ if vip not in relation_data['resource_params'][vip_key]:
+ vip_key = '{}_{}'.format(vip_key, vip_params)
+ else:
+ log("Resource '%s' (vip='%s') already exists in "
+ "vip group - skipping" % (vip_key, vip), WARNING)
+ continue
- Check for VIP or dns-ha settings which indicate the unit should expect to
- be related to hacluster.
+ relation_data['resources'][vip_key] = res_neutron_vip
+ relation_data['resource_params'][vip_key] = (
+ 'params {ip}="{vip}" cidr_netmask="{netmask}" '
+ 'nic="{iface}"'.format(ip=vip_params,
+ vip=vip,
+ iface=iface,
+ netmask=netmask)
+ )
+ vip_group.append(vip_key)
- @returns boolean
- """
- return config('vip') or config('dns-ha')
+ if len(vip_group) >= 1:
+ relation_data['groups'] = {
+ 'grp_{}_vips'.format(service): ' '.join(vip_group)
+ }
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
index ebc8a68..d36af2a 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -17,22 +17,22 @@ defaults
{%- if haproxy_queue_timeout %}
timeout queue {{ haproxy_queue_timeout }}
{%- else %}
- timeout queue 5000
+ timeout queue 9000
{%- endif %}
{%- if haproxy_connect_timeout %}
timeout connect {{ haproxy_connect_timeout }}
{%- else %}
- timeout connect 5000
+ timeout connect 9000
{%- endif %}
{%- if haproxy_client_timeout %}
timeout client {{ haproxy_client_timeout }}
{%- else %}
- timeout client 30000
+ timeout client 90000
{%- endif %}
{%- if haproxy_server_timeout %}
timeout server {{ haproxy_server_timeout }}
{%- else %}
- timeout server 30000
+ timeout server 90000
{%- endif %}
listen stats
diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
index 315b2a3..e2e73b2 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
@@ -15,9 +15,6 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@@ -29,7 +26,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
- <Directory {{ usr_bin }}>
+ <Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
@@ -44,9 +41,6 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@@ -58,7 +52,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
- <Directory {{ usr_bin }}>
+ <Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
@@ -73,9 +67,6 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
-{% if python_path -%}
- python-path={{ python_path }} \
-{% endif -%}
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
@@ -87,7 +78,7 @@ Listen {{ public_port }}
ErrorLog /var/log/apache2/{{ service_name }}_error.log
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
- <Directory {{ usr_bin }}>
+ <Directory /usr/bin>
<IfVersion >= 2.4>
Require all granted
</IfVersion>
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 8a541d4..b753275 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -23,7 +23,6 @@ import sys
import re
import itertools
import functools
-import shutil
import six
import traceback
@@ -47,7 +46,6 @@ from charmhelpers.core.hookenv import (
related_units,
relation_ids,
relation_set,
- service_name,
status_set,
hook_name,
application_version_set,
@@ -68,11 +66,6 @@ from charmhelpers.contrib.network.ip import (
port_has_listener,
)
-from charmhelpers.contrib.python.packages import (
- pip_create_virtualenv,
- pip_install,
-)
-
from charmhelpers.core.host import (
lsb_release,
mounts,
@@ -84,7 +77,6 @@ from charmhelpers.core.host import (
)
from charmhelpers.fetch import (
apt_cache,
- install_remote,
import_key as fetch_import_key,
add_source as fetch_add_source,
SourceConfigError,
@@ -278,27 +270,6 @@ PACKAGE_CODENAMES = {
]),
}
-GIT_DEFAULT_REPOS = {
- 'requirements': 'git://github.com/openstack/requirements',
- 'cinder': 'git://github.com/openstack/cinder',
- 'glance': 'git://github.com/openstack/glance',
- 'horizon': 'git://github.com/openstack/horizon',
- 'keystone': 'git://github.com/openstack/keystone',
- 'networking-hyperv': 'git://github.com/openstack/networking-hyperv',
- 'neutron': 'git://github.com/openstack/neutron',
- 'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
- 'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
- 'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
- 'nova': 'git://github.com/openstack/nova',
-}
-
-GIT_DEFAULT_BRANCHES = {
- 'liberty': 'stable/liberty',
- 'mitaka': 'stable/mitaka',
- 'newton': 'stable/newton',
- 'master': 'master',
-}
-
DEFAULT_LOOPBACK_SIZE = '5G'
@@ -392,6 +363,8 @@ def get_swift_codename(version):
releases = UBUNTU_OPENSTACK_RELEASE
release = [k for k, v in six.iteritems(releases) if codename in v]
ret = subprocess.check_output(['apt-cache', 'policy', 'swift'])
+ if six.PY3:
+ ret = ret.decode('UTF-8')
if codename in ret or release[0] in ret:
return codename
elif len(codenames) == 1:
@@ -528,7 +501,6 @@ def os_release(package, base='essex', reset_cache=False):
if _os_rel:
return _os_rel
_os_rel = (
- git_os_codename_install_source(config('openstack-origin-git')) or
get_os_codename_package(package, fatal=False) or
get_os_codename_install_source(config('openstack-origin')) or
base)
@@ -654,11 +626,6 @@ def openstack_upgrade_available(package):
else:
avail_vers = get_os_version_install_source(src)
apt.init()
- if "swift" in package:
- major_cur_vers = cur_vers.split('.', 1)[0]
- major_avail_vers = avail_vers.split('.', 1)[0]
- major_diff = apt.version_compare(major_avail_vers, major_cur_vers)
- return avail_vers > cur_vers and (major_diff == 1 or major_diff == 0)
return apt.version_compare(avail_vers, cur_vers) == 1
@@ -769,417 +736,6 @@ def os_requires_version(ostack_release, pkg):
return wrap
-def git_install_requested():
- """
- Returns true if openstack-origin-git is specified.
- """
- return config('openstack-origin-git') is not None
-
-
-def git_os_codename_install_source(projects_yaml):
- """
- Returns OpenStack codename of release being installed from source.
- """
- if git_install_requested():
- projects = _git_yaml_load(projects_yaml)
-
- if projects in GIT_DEFAULT_BRANCHES.keys():
- if projects == 'master':
- return 'ocata'
- return projects
-
- if 'release' in projects:
- if projects['release'] == 'master':
- return 'ocata'
- return projects['release']
-
- return None
-
-
-def git_default_repos(projects_yaml):
- """
- Returns default repos if a default openstack-origin-git value is specified.
- """
- service = service_name()
- core_project = service
-
- for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
- if projects_yaml == default:
-
- # add the requirements repo first
- repo = {
- 'name': 'requirements',
- 'repository': GIT_DEFAULT_REPOS['requirements'],
- 'branch': branch,
- }
- repos = [repo]
-
- # neutron-* and nova-* charms require some additional repos
- if service in ['neutron-api', 'neutron-gateway',
- 'neutron-openvswitch']:
- core_project = 'neutron'
- if service == 'neutron-api':
- repo = {
- 'name': 'networking-hyperv',
- 'repository': GIT_DEFAULT_REPOS['networking-hyperv'],
- 'branch': branch,
- }
- repos.append(repo)
- for project in ['neutron-fwaas', 'neutron-lbaas',
- 'neutron-vpnaas', 'nova']:
- repo = {
- 'name': project,
- 'repository': GIT_DEFAULT_REPOS[project],
- 'branch': branch,
- }
- repos.append(repo)
-
- elif service in ['nova-cloud-controller', 'nova-compute']:
- core_project = 'nova'
- repo = {
- 'name': 'neutron',
- 'repository': GIT_DEFAULT_REPOS['neutron'],
- 'branch': branch,
- }
- repos.append(repo)
- elif service == 'openstack-dashboard':
- core_project = 'horizon'
-
- # finally add the current service's core project repo
- repo = {
- 'name': core_project,
- 'repository': GIT_DEFAULT_REPOS[core_project],
- 'branch': branch,
- }
- repos.append(repo)
-
- return yaml.dump(dict(repositories=repos, release=default))
-
- return projects_yaml
-
-
-def _git_yaml_load(projects_yaml):
- """
- Load the specified yaml into a dictionary.
- """
- if not projects_yaml:
- return None
-
- return yaml.load(projects_yaml)
-
-
-requirements_dir = None
-
-
-def git_clone_and_install(projects_yaml, core_project):
- """
- Clone/install all specified OpenStack repositories.
-
- The expected format of projects_yaml is:
-
- repositories:
- - {name: keystone,
- repository: 'git://git.openstack.org/openstack/keystone.git',
- branch: 'stable/icehouse'}
- - {name: requirements,
- repository: 'git://git.openstack.org/openstack/requirements.git',
- branch: 'stable/icehouse'}
-
- directory: /mnt/openstack-git
- http_proxy: squid-proxy-url
- https_proxy: squid-proxy-url
-
- The directory, http_proxy, and https_proxy keys are optional.
-
- """
- global requirements_dir
- parent_dir = '/mnt/openstack-git'
- http_proxy = None
-
- projects = _git_yaml_load(projects_yaml)
- _git_validate_projects_yaml(projects, core_project)
-
- old_environ = dict(os.environ)
-
- if 'http_proxy' in projects.keys():
- http_proxy = projects['http_proxy']
- os.environ['http_proxy'] = projects['http_proxy']
- if 'https_proxy' in projects.keys():
- os.environ['https_proxy'] = projects['https_proxy']
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
-
- # Upgrade setuptools and pip from default virtualenv versions. The default
- # versions in trusty break master OpenStack branch deployments.
- for p in ['pip', 'setuptools']:
- pip_install(p, upgrade=True, proxy=http_proxy,
- venv=os.path.join(parent_dir, 'venv'))
-
- constraints = None
- for p in projects['repositories']:
- repo = p['repository']
- branch = p['branch']
- depth = '1'
- if 'depth' in p.keys():
- depth = p['depth']
- if p['name'] == 'requirements':
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=False)
- requirements_dir = repo_dir
- constraints = os.path.join(repo_dir, "upper-constraints.txt")
- # upper-constraints didn't exist until after icehouse
- if not os.path.isfile(constraints):
- constraints = None
- # use constraints unless project yaml sets use_constraints to false
- if 'use_constraints' in projects.keys():
- if not projects['use_constraints']:
- constraints = None
- else:
- repo_dir = _git_clone_and_install_single(repo, branch, depth,
- parent_dir, http_proxy,
- update_requirements=True,
- constraints=constraints)
-
- os.environ = old_environ
-
-
-def _git_validate_projects_yaml(projects, core_project):
- """
- Validate the projects yaml.
- """
- _git_ensure_key_exists('repositories', projects)
-
- for project in projects['repositories']:
- _git_ensure_key_exists('name', project.keys())
- _git_ensure_key_exists('repository', project.keys())
- _git_ensure_key_exists('branch', project.keys())
-
- if projects['repositories'][0]['name'] != 'requirements':
- error_out('{} git repo must be specified first'.format('requirements'))
-
- if projects['repositories'][-1]['name'] != core_project:
- error_out('{} git repo must be specified last'.format(core_project))
-
- _git_ensure_key_exists('release', projects)
-
-
-def _git_ensure_key_exists(key, keys):
- """
- Ensure that key exists in keys.
- """
- if key not in keys:
- error_out('openstack-origin-git key \'{}\' is missing'.format(key))
-
-
-def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
- update_requirements, constraints=None):
- """
- Clone and install a single git repository.
- """
- if not os.path.exists(parent_dir):
- juju_log('Directory already exists at {}. '
- 'No need to create directory.'.format(parent_dir))
- os.mkdir(parent_dir)
-
- juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
- repo_dir = install_remote(
- repo, dest=parent_dir, branch=branch, depth=depth)
-
- venv = os.path.join(parent_dir, 'venv')
-
- if update_requirements:
- if not requirements_dir:
- error_out('requirements repo must be cloned before '
- 'updating from global requirements.')
- _git_update_requirements(venv, repo_dir, requirements_dir)
-
- juju_log('Installing git repo from dir: {}'.format(repo_dir))
- if http_proxy:
- pip_install(repo_dir, proxy=http_proxy, venv=venv,
- constraints=constraints)
- else:
- pip_install(repo_dir, venv=venv, constraints=constraints)
-
- return repo_dir
-
-
-def _git_update_requirements(venv, package_dir, reqs_dir):
- """
- Update from global requirements.
-
- Update an OpenStack git directory's requirements.txt and
- test-requirements.txt from global-requirements.txt.
- """
- orig_dir = os.getcwd()
- os.chdir(reqs_dir)
- python = os.path.join(venv, 'bin/python')
- cmd = [python, 'update.py', package_dir]
- try:
- subprocess.check_call(cmd)
- except subprocess.CalledProcessError:
- package = os.path.basename(package_dir)
- error_out("Error updating {} from "
- "global-requirements.txt".format(package))
- os.chdir(orig_dir)
-
-
-def git_pip_venv_dir(projects_yaml):
- """
- Return the pip virtualenv path.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- return os.path.join(parent_dir, 'venv')
-
-
-def git_src_dir(projects_yaml, project):
- """
- Return the directory where the specified project's source is located.
- """
- parent_dir = '/mnt/openstack-git'
-
- projects = _git_yaml_load(projects_yaml)
-
- if 'directory' in projects.keys():
- parent_dir = projects['directory']
-
- for p in projects['repositories']:
- if p['name'] == project:
- return os.path.join(parent_dir, os.path.basename(p['repository']))
-
- return None
-
-
-def git_yaml_value(projects_yaml, key):
- """
- Return the value in projects_yaml for the specified key.
- """
- projects = _git_yaml_load(projects_yaml)
-
- if key in projects.keys():
- return projects[key]
-
- return None
-
-
-def git_generate_systemd_init_files(templates_dir):
- """
- Generate systemd init files.
-
- Generates and installs systemd init units and script files based on the
- *.init.in files contained in the templates_dir directory.
-
- This code is based on the openstack-pkg-tools package and its init
- script generation, which is used by the OpenStack packages.
- """
- for f in os.listdir(templates_dir):
- # Create the init script and systemd unit file from the template
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- init_source = os.path.join(templates_dir, init_file)
- service_source = os.path.join(templates_dir, service_file)
-
- init_dest = os.path.join('/etc/init.d', init_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(init_in_source, init_source)
- with open(init_source, 'a') as outfile:
- template = ('/usr/share/openstack-pkg-tools/'
- 'init-script-template')
- with open(template) as infile:
- outfile.write('\n\n{}'.format(infile.read()))
-
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(init_dest):
- os.remove(init_dest)
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(init_source, init_dest)
- shutil.copyfile(service_source, service_dest)
- os.chmod(init_dest, 0o755)
-
- for f in os.listdir(templates_dir):
- # If there's a service.in file, use it instead of the generated one
- if f.endswith(".service.in"):
- service_in_file = f
- service_file = f[:-3]
-
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- shutil.copyfile(service_in_source, service_source)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
- for f in os.listdir(templates_dir):
- # Generate the systemd unit if there's no existing .service.in
- if f.endswith(".init.in"):
- init_in_file = f
- init_file = f[:-8]
- service_in_file = "{}.service.in".format(init_file)
- service_file = "{}.service".format(init_file)
-
- init_in_source = os.path.join(templates_dir, init_in_file)
- service_in_source = os.path.join(templates_dir, service_in_file)
- service_source = os.path.join(templates_dir, service_file)
- service_dest = os.path.join('/lib/systemd/system', service_file)
-
- if not os.path.exists(service_in_source):
- cmd = ['pkgos-gen-systemd-unit', init_in_source]
- subprocess.check_call(cmd)
-
- if os.path.exists(service_dest):
- os.remove(service_dest)
- shutil.copyfile(service_source, service_dest)
-
-
-def git_determine_usr_bin():
- """Return the /usr/bin path for Apache2 config.
-
- The /usr/bin path will be located in the virtualenv if the charm
- is configured to deploy from source.
- """
- if git_install_requested():
- projects_yaml = config('openstack-origin-git')
- projects_yaml = git_default_repos(projects_yaml)
- return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
- else:
- return '/usr/bin'
-
-
-def git_determine_python_path():
- """Return the python-path for Apache2 config.
-
- Returns 'None' unless the charm is configured to deploy from source,
- in which case the path of the virtualenv's site-packages is returned.
- """
- if git_install_requested():
- projects_yaml = config('openstack-origin-git')
- projects_yaml = git_default_repos(projects_yaml)
- return os.path.join(git_pip_venv_dir(projects_yaml),
- 'lib/python2.7/site-packages')
- else:
- return None
-
-
def os_workload_status(configs, required_interfaces, charm_func=None):
"""
Decorator to set workload status based on complete contexts
@@ -1613,27 +1169,24 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
"""
ret = False
- if git_install_requested():
- action_set({'outcome': 'installed from source, skipped upgrade.'})
- else:
- if openstack_upgrade_available(package):
- if config('action-managed-upgrade'):
- juju_log('Upgrading OpenStack release')
-
- try:
- upgrade_callback(configs=configs)
- action_set({'outcome': 'success, upgrade completed.'})
- ret = True
- except Exception:
- action_set({'outcome': 'upgrade failed, see traceback.'})
- action_set({'traceback': traceback.format_exc()})
- action_fail('do_openstack_upgrade resulted in an '
- 'unexpected error')
- else:
- action_set({'outcome': 'action-managed-upgrade config is '
- 'False, skipped upgrade.'})
+ if openstack_upgrade_available(package):
+ if config('action-managed-upgrade'):
+ juju_log('Upgrading OpenStack release')
+
+ try:
+ upgrade_callback(configs=configs)
+ action_set({'outcome': 'success, upgrade completed.'})
+ ret = True
+ except Exception:
+ action_set({'outcome': 'upgrade failed, see traceback.'})
+ action_set({'traceback': traceback.format_exc()})
+ action_fail('do_openstack_upgrade resulted in an '
+ 'unexpected error')
else:
- action_set({'outcome': 'no upgrade available.'})
+ action_set({'outcome': 'action-managed-upgrade config is '
+ 'False, skipped upgrade.'})
+ else:
+ action_set({'outcome': 'no upgrade available.'})
return ret
@@ -2043,14 +1596,25 @@ def token_cache_pkgs(source=None, release=None):
def update_json_file(filename, items):
"""Updates the json `filename` with a given dict.
- :param filename: json filename (i.e.: /etc/glance/policy.json)
+ :param filename: path to json file (e.g. /etc/glance/policy.json)
:param items: dict of items to update
"""
+ if not items:
+ return
+
with open(filename) as fd:
policy = json.load(fd)
+
+ # Compare before and after and if nothing has changed don't write the file
+ # since that could cause unnecessary service restarts.
+ before = json.dumps(policy, indent=4, sort_keys=True)
policy.update(items)
+ after = json.dumps(policy, indent=4, sort_keys=True)
+ if before == after:
+ return
+
with open(filename, "w") as fd:
- fd.write(json.dumps(policy, indent=4))
+ fd.write(after)
@cached
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 3923161..e13e60a 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -113,7 +113,7 @@ def validator(value, valid_type, valid_range=None):
assert isinstance(valid_range, list), \
"valid_range must be a list, was given {}".format(valid_range)
# If we're dealing with strings
- if valid_type is six.string_types:
+ if isinstance(value, six.string_types):
assert value in valid_range, \
"{} is not in the list {}".format(value, valid_range)
# Integer, float should have a min and max
@@ -377,12 +377,12 @@ def get_mon_map(service):
try:
return json.loads(mon_status)
except ValueError as v:
- log("Unable to parse mon_status json: {}. Error: {}".format(
- mon_status, v.message))
+ log("Unable to parse mon_status json: {}. Error: {}"
+ .format(mon_status, str(v)))
raise
except CalledProcessError as e:
- log("mon_status command failed with message: {}".format(
- e.message))
+ log("mon_status command failed with message: {}"
+ .format(str(e)))
raise
@@ -517,7 +517,8 @@ def pool_set(service, pool_name, key, value):
:param value:
:return: None. Can raise CalledProcessError
"""
- cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key, value]
+ cmd = ['ceph', '--id', service, 'osd', 'pool', 'set', pool_name, key,
+ str(value).lower()]
try:
check_call(cmd)
except CalledProcessError:
@@ -621,16 +622,24 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param durability_estimator: int
:return: None. Can raise CalledProcessError
"""
+ version = ceph_version()
+
# Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'set', profile_name,
- 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks),
- 'ruleset_failure_domain=' + failure_domain]
+ 'plugin=' + erasure_plugin_name, 'k=' + str(data_chunks), 'm=' + str(coding_chunks)
+ ]
if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
+ # failure_domain changed in luminous
+ if version and version >= '12.0.0':
+ cmd.append('crush-failure-domain=' + failure_domain)
+ else:
+ cmd.append('ruleset-failure-domain=' + failure_domain)
+
# Add plugin specific information
if locality is not None:
# For local erasure codes
@@ -1064,14 +1073,24 @@ class CephBrokerRq(object):
self.ops = []
def add_op_request_access_to_group(self, name, namespace=None,
- permission=None, key_name=None):
+ permission=None, key_name=None,
+ object_prefix_permissions=None):
"""
Adds the requested permissions to the current service's Ceph key,
- allowing the key to access only the specified pools
+ allowing the key to access only the specified pools or
+ object prefixes. object_prefix_permissions should be a dictionary
+ keyed on the permission with the corresponding value being a list
+ of prefixes to apply that permission to.
+ {
+ 'rwx': ['prefix1', 'prefix2'],
+ 'class-read': ['prefix3']}
"""
- self.ops.append({'op': 'add-permissions-to-key', 'group': name,
- 'namespace': namespace, 'name': key_name or service_name(),
- 'group-permission': permission})
+ self.ops.append({
+ 'op': 'add-permissions-to-key', 'group': name,
+ 'namespace': namespace,
+ 'name': key_name or service_name(),
+ 'group-permission': permission,
+ 'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None):
@@ -1107,7 +1126,10 @@ class CephBrokerRq(object):
def _ops_equal(self, other):
if len(self.ops) == len(other.ops):
for req_no in range(0, len(self.ops)):
- for key in ['replicas', 'name', 'op', 'pg_num', 'weight']:
+ for key in [
+ 'replicas', 'name', 'op', 'pg_num', 'weight',
+ 'group', 'group-namespace', 'group-permission',
+ 'object-prefix-permissions']:
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
return False
else:
diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py
index 7f2a060..79a7a24 100644
--- a/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import functools
from subprocess import (
CalledProcessError,
check_call,
@@ -101,3 +102,52 @@ def create_lvm_volume_group(volume_group, block_device):
:block_device: str: Full path of PV-initialized block device.
'''
check_call(['vgcreate', volume_group, block_device])
+
+
+def list_logical_volumes(select_criteria=None, path_mode=False):
+ '''
+ List logical volumes
+
+ :param select_criteria: str: Limit list to those volumes matching this
+ criteria (see 'lvs -S help' for more details)
+ :param path_mode: bool: return logical volume name in 'vg/lv' format, this
+ format is required for some commands like lvextend
+ :returns: [str]: List of logical volumes
+ '''
+ lv_diplay_attr = 'lv_name'
+ if path_mode:
+ # Parsing output logic relies on the column order
+ lv_diplay_attr = 'vg_name,' + lv_diplay_attr
+ cmd = ['lvs', '--options', lv_diplay_attr, '--noheadings']
+ if select_criteria:
+ cmd.extend(['--select', select_criteria])
+ lvs = []
+ for lv in check_output(cmd).decode('UTF-8').splitlines():
+ if not lv:
+ continue
+ if path_mode:
+ lvs.append('/'.join(lv.strip().split()))
+ else:
+ lvs.append(lv.strip())
+ return lvs
+
+
+list_thin_logical_volume_pools = functools.partial(
+ list_logical_volumes,
+ select_criteria='lv_attr =~ ^t')
+
+list_thin_logical_volumes = functools.partial(
+ list_logical_volumes,
+ select_criteria='lv_attr =~ ^V')
+
+
+def extend_logical_volume_by_device(lv_name, block_device):
+ '''
+ Extends the size of logical volume lv_name by the amount of free space on
+ physical volume block_device.
+
+ :param lv_name: str: name of logical volume to be extended (vg/lv format)
+ :param block_device: str: name of block_device to be allocated to lv_name
+ '''
+ cmd = ['lvextend', lv_name, block_device]
+ check_call(cmd)
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 5a88f79..211ae87 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -39,6 +39,7 @@ if not six.PY3:
else:
from collections import UserDict
+
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@@ -344,6 +345,7 @@ class Config(dict):
"""
with open(self.path, 'w') as f:
+ os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
def _implicit_save(self):
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 5cc5c86..fd14d60 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
+ if six.PY3 and isinstance(content, six.string_types):
+ content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
diff --git a/hooks/charmhelpers/core/host_factory/ubuntu.py b/hooks/charmhelpers/core/host_factory/ubuntu.py
index d8dc378..99451b5 100644
--- a/hooks/charmhelpers/core/host_factory/ubuntu.py
+++ b/hooks/charmhelpers/core/host_factory/ubuntu.py
@@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
+ 'bionic',
)
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index 7af875c..6d7b494 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -175,6 +175,8 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ with open(self.db_path, 'a') as f:
+ os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index e37f283..5afbbd8 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -13,6 +13,7 @@
# limitations under the License.
import logging
+import os
import re
import sys
import six
@@ -185,7 +186,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.d.configure(service, config)
def _auto_wait_for_status(self, message=None, exclude_services=None,
- include_only=None, timeout=1800):
+ include_only=None, timeout=None):
"""Wait for all units to have a specific extended status, except
for any defined as excluded. Unless specified via message, any
status containing any case of 'ready' will be considered a match.
@@ -215,7 +216,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
:param timeout: Maximum time in seconds to wait for status match
:returns: None. Raises if timeout is hit.
"""
- self.log.info('Waiting for extended status on units...')
+ if not timeout:
+ timeout = int(os.environ.get('AMULET_SETUP_TIMEOUT', 1800))
+ self.log.info('Waiting for extended status on units for {}s...'
+ ''.format(timeout))
all_services = self.d.services.keys()
@@ -252,9 +256,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
service_messages = {service: message for service in services}
# Check for idleness
- self.d.sentry.wait()
+ self.d.sentry.wait(timeout=timeout)
# Check for error states and bail early
- self.d.sentry.wait_for_status(self.d.juju_env, services)
+ self.d.sentry.wait_for_status(self.d.juju_env, services, timeout=timeout)
# Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index b71b2b1..87f364d 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -858,9 +858,12 @@ class OpenStackAmuletUtils(AmuletUtils):
:returns: List of pool name, object count, kb disk space used
"""
df = self.get_ceph_df(sentry_unit)
- pool_name = df['pools'][pool_id]['name']
- obj_count = df['pools'][pool_id]['stats']['objects']
- kb_used = df['pools'][pool_id]['stats']['kb_used']
+ for pool in df['pools']:
+ if pool['id'] == pool_id:
+ pool_name = pool['name']
+ obj_count = pool['stats']['objects']
+ kb_used = pool['stats']['kb_used']
+
self.log.debug('Ceph {} pool (ID {}): {} objects, '
'{} kb used'.format(pool_name, pool_id,
obj_count, kb_used))
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 5a88f79..211ae87 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -39,6 +39,7 @@ if not six.PY3:
else:
from collections import UserDict
+
CRITICAL = "CRITICAL"
ERROR = "ERROR"
WARNING = "WARNING"
@@ -344,6 +345,7 @@ class Config(dict):
"""
with open(self.path, 'w') as f:
+ os.fchmod(f.fileno(), 0o600)
json.dump(self, f)
def _implicit_save(self):
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 5cc5c86..fd14d60 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -549,6 +549,8 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
with open(path, 'wb') as target:
os.fchown(target.fileno(), uid, gid)
os.fchmod(target.fileno(), perms)
+ if six.PY3 and isinstance(content, six.string_types):
+ content = content.encode('UTF-8')
target.write(content)
return
# the contents were the same, but we might still need to change the
diff --git a/tests/charmhelpers/core/host_factory/ubuntu.py b/tests/charmhelpers/core/host_factory/ubuntu.py
index d8dc378..99451b5 100644
--- a/tests/charmhelpers/core/host_factory/ubuntu.py
+++ b/tests/charmhelpers/core/host_factory/ubuntu.py
@@ -20,6 +20,7 @@ UBUNTU_RELEASES = (
'yakkety',
'zesty',
'artful',
+ 'bionic',
)
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index 7af875c..6d7b494 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -175,6 +175,8 @@ class Storage(object):
else:
self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
+ with open(self.db_path, 'a') as f:
+ os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor()
self.revision = None

This mirror site include all the OpenStack related repositories under: openstack, openstack-dev and openstack-infra.

NOTE: All repositories are updated every one hour.

Usage

For Git Clone
 git clone http://git.trystack.cn/openstack/nova.git 
For DevStack

Add GIT_BASE, NOVNC_REPO and SPICE_REPO variables to local.conf file.

[[local|localrc]]

# use TryStack git mirror
GIT_BASE=http://git.trystack.cn
NOVNC_REPO=http://git.trystack.cn/kanaka/noVNC.git
SPICE_REPO=http://git.trystack.cn/git/spice/spice-html5.git