summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrew McLeod <andrew.mcleod@canonical.com>2017-11-18 13:21:11 +1300
committerAndrew McLeod <andrew.mcleod@canonical.com>2017-11-18 13:21:37 +1300
commitc6ae48be4a33fbabe424a002bb5918d2b33f1999 (patch)
tree54dc904ab80661b0131e89570f488487ef667b6b
parent5bf92f314231e12779a888485450dfd984e925d3 (diff)
downloadcharm-nova-lxd-c6ae48be4a33fbabe424a002bb5918d2b33f1999.zip
charm-nova-lxd-c6ae48be4a33fbabe424a002bb5918d2b33f1999.tar.gz
charm-nova-lxd-c6ae48be4a33fbabe424a002bb5918d2b33f1999.tar.bz2
Enable xenial-pike amulet test
Make default func27-smoke xenial-pike Charm-helpers sync Change-Id: I2af5a7bdcb956889f9e7140e67ec51a969eee379
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/nrpe.py10
-rw-r--r--hooks/charmhelpers/contrib/hahelpers/cluster.py30
-rw-r--r--hooks/charmhelpers/contrib/network/ip.py4
-rw-r--r--hooks/charmhelpers/contrib/openstack/alternatives.py13
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py36
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py36
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py40
-rwxr-xr-xhooks/charmhelpers/contrib/openstack/files/check_haproxy.sh2
-rw-r--r--hooks/charmhelpers/contrib/openstack/ha/utils.py11
-rw-r--r--hooks/charmhelpers/contrib/openstack/neutron.py61
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/ceph.conf4
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg2
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache6
-rw-r--r--hooks/charmhelpers/contrib/openstack/templating.py2
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py42
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/ceph.py42
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/lvm.py8
-rw-r--r--hooks/charmhelpers/contrib/storage/linux/utils.py2
-rw-r--r--hooks/charmhelpers/core/hookenv.py112
-rw-r--r--hooks/charmhelpers/core/host.py73
-rw-r--r--hooks/charmhelpers/core/strutils.py16
-rw-r--r--hooks/charmhelpers/core/unitdata.py2
-rw-r--r--hooks/charmhelpers/fetch/snap.py16
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py2
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/deployment.py36
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py36
-rw-r--r--tests/charmhelpers/core/hookenv.py112
-rw-r--r--tests/charmhelpers/core/host.py73
-rw-r--r--tests/charmhelpers/core/strutils.py16
-rw-r--r--tests/charmhelpers/core/unitdata.py2
-rwxr-xr-xtests/gate-basic-xenial-pike25
-rw-r--r--tox.ini2
32 files changed, 696 insertions, 178 deletions
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index 80d574d..1c55b30 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -30,6 +30,7 @@ import yaml
from charmhelpers.core.hookenv import (
config,
+ hook_name,
local_unit,
log,
relation_ids,
@@ -285,7 +286,7 @@ class NRPE(object):
try:
nagios_uid = pwd.getpwnam('nagios').pw_uid
nagios_gid = grp.getgrnam('nagios').gr_gid
- except:
+ except Exception:
log("Nagios user not set up, nrpe checks not updated")
return
@@ -302,7 +303,12 @@ class NRPE(object):
"command": nrpecheck.command,
}
- service('restart', 'nagios-nrpe-server')
+ # update-status hooks are configured to firing every 5 minutes by
+ # default. When nagios-nrpe-server is restarted, the nagios server
+ # reports checks failing causing unneccessary alerts. Let's not restart
+ # on update-status hooks.
+ if not hook_name() == 'update-status':
+ service('restart', 'nagios-nrpe-server')
monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master")
diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py
index e02350e..4207e42 100644
--- a/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py
@@ -27,6 +27,7 @@ clustering-related helpers.
import subprocess
import os
+import time
from socket import gethostname as get_unit_hostname
@@ -45,6 +46,9 @@ from charmhelpers.core.hookenv import (
is_leader as juju_is_leader,
status_set,
)
+from charmhelpers.core.host import (
+ modulo_distribution,
+)
from charmhelpers.core.decorators import (
retry_on_exception,
)
@@ -361,3 +365,29 @@ def canonical_url(configs, vip_setting='vip'):
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
+
+
+def distributed_wait(modulo=None, wait=None, operation_name='operation'):
+ ''' Distribute operations by waiting based on modulo_distribution
+
+ If modulo and or wait are not set, check config_get for those values.
+
+ :param modulo: int The modulo number creates the group distribution
+ :param wait: int The constant time wait value
+ :param operation_name: string Operation name for status message
+ i.e. 'restart'
+ :side effect: Calls config_get()
+ :side effect: Calls log()
+ :side effect: Calls status_set()
+ :side effect: Calls time.sleep()
+ '''
+ if modulo is None:
+ modulo = config_get('modulo-nodes')
+ if wait is None:
+ wait = config_get('known-wait')
+ calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
+ msg = "Waiting {} seconds for {} ...".format(calculated_wait,
+ operation_name)
+ log(msg, DEBUG)
+ status_set('maintenance', msg)
+ time.sleep(calculated_wait)
diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py
index d7e6deb..a871ce3 100644
--- a/hooks/charmhelpers/contrib/network/ip.py
+++ b/hooks/charmhelpers/contrib/network/ip.py
@@ -490,7 +490,7 @@ def get_host_ip(hostname, fallback=None):
if not ip_addr:
try:
ip_addr = socket.gethostbyname(hostname)
- except:
+ except Exception:
log("Failed to resolve hostname '%s'" % (hostname),
level=WARNING)
return fallback
@@ -518,7 +518,7 @@ def get_hostname(address, fqdn=True):
if not result:
try:
result = socket.gethostbyaddr(address)[0]
- except:
+ except Exception:
return None
else:
result = address
diff --git a/hooks/charmhelpers/contrib/openstack/alternatives.py b/hooks/charmhelpers/contrib/openstack/alternatives.py
index 1501641..547de09 100644
--- a/hooks/charmhelpers/contrib/openstack/alternatives.py
+++ b/hooks/charmhelpers/contrib/openstack/alternatives.py
@@ -29,3 +29,16 @@ def install_alternative(name, target, source, priority=50):
target, name, source, str(priority)
]
subprocess.check_call(cmd)
+
+
+def remove_alternative(name, source):
+ """Remove an installed alternative configuration file
+
+ :param name: string name of the alternative to remove
+ :param source: string full path to alternative to remove
+ """
+ cmd = [
+ 'update-alternatives', '--remove',
+ name, source
+ ]
+ subprocess.check_call(cmd)
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index 5c041d2..e37f283 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -250,7 +250,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
+
+ # Check for idleness
+ self.d.sentry.wait()
+ # Check for error states and bail early
+ self.d.sentry.wait_for_status(self.d.juju_env, services)
+ # Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
+
self.log.info('OK')
def _get_openstack_release(self):
@@ -263,7 +270,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
- self.xenial_pike, self.artful_pike) = range(11)
+ self.xenial_pike, self.artful_pike, self.xenial_queens,
+ self.bionic_queens,) = range(13)
releases = {
('trusty', None): self.trusty_icehouse,
@@ -274,9 +282,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
+ ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
+ ('bionic', None): self.bionic_queens,
}
return releases[(self.series, self.openstack)]
@@ -291,6 +301,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
+ ('bionic', 'queens'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
@@ -303,20 +314,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
+ if self._get_openstack_release() == self.trusty_icehouse:
+ # Icehouse
pools = [
+ 'data',
+ 'metadata',
'rbd',
- 'cinder',
+ 'cinder-ceph',
'glance'
]
- else:
- # Juno or earlier
+ elif (self.trusty_kilo <= self._get_openstack_release() <=
+ self.zesty_ocata):
+ # Kilo through Ocata
pools = [
- 'data',
- 'metadata',
'rbd',
- 'cinder',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ else:
+ # Pike and later
+ pools = [
+ 'cinder-ceph',
'glance'
]
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index c8edbf6..b71b2b1 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -23,6 +23,7 @@ import urllib
import urlparse
import cinderclient.v1.client as cinder_client
+import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
@@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
-from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
@@ -310,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
- @retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@@ -326,7 +325,7 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service',
relation_name)
self.log.debug('keystone relation data: {}'.format(rel))
- if rel['api_version'] != str(api_version):
+ if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version))
@@ -348,15 +347,19 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
+ deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
+ password, tenant, api_version=2):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
+ _clients = {
+ 1: cinder_client.Client,
+ 2: cinder_clientv2.Client}
+ return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
@@ -617,13 +620,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
- except:
+ except Exception:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
+ def _get_cinder_obj_name(self, cinder_object):
+ """Retrieve name of cinder object.
+
+ :param cinder_object: cinder snapshot or volume object
+ :returns: str cinder object name
+ """
+ # v1 objects store name in 'display_name' attr but v2+ use 'name'
+ try:
+ return cinder_object.display_name
+ except AttributeError:
+ return cinder_object.name
+
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
@@ -674,6 +689,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
+ except TypeError:
+ vol_new = cinder.volumes.create(name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
@@ -688,7 +710,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume
self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
+ val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index f67f326..ece75df 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import collections
import glob
import json
import math
@@ -578,11 +579,14 @@ class HAProxyContext(OSContextGenerator):
laddr = get_address_in_network(config(cfg_opt))
if laddr:
netmask = get_netmask_for_address(laddr)
- cluster_hosts[laddr] = {'network': "{}/{}".format(laddr,
- netmask),
- 'backends': {l_unit: laddr}}
+ cluster_hosts[laddr] = {
+ 'network': "{}/{}".format(laddr,
+ netmask),
+ 'backends': collections.OrderedDict([(l_unit,
+ laddr)])
+ }
for rid in relation_ids('cluster'):
- for unit in related_units(rid):
+ for unit in sorted(related_units(rid)):
_laddr = relation_get('{}-address'.format(addr_type),
rid=rid, unit=unit)
if _laddr:
@@ -594,10 +598,13 @@ class HAProxyContext(OSContextGenerator):
# match in the frontend
cluster_hosts[addr] = {}
netmask = get_netmask_for_address(addr)
- cluster_hosts[addr] = {'network': "{}/{}".format(addr, netmask),
- 'backends': {l_unit: addr}}
+ cluster_hosts[addr] = {
+ 'network': "{}/{}".format(addr, netmask),
+ 'backends': collections.OrderedDict([(l_unit,
+ addr)])
+ }
for rid in relation_ids('cluster'):
- for unit in related_units(rid):
+ for unit in sorted(related_units(rid)):
_laddr = relation_get('private-address',
rid=rid, unit=unit)
if _laddr:
@@ -628,6 +635,8 @@ class HAProxyContext(OSContextGenerator):
ctxt['local_host'] = '127.0.0.1'
ctxt['haproxy_host'] = '0.0.0.0'
+ ctxt['ipv6_enabled'] = not is_ipv6_disabled()
+
ctxt['stat_port'] = '8888'
db = kv()
@@ -802,8 +811,9 @@ class ApacheSSLContext(OSContextGenerator):
else:
# Expect cert/key provided in config (currently assumed that ca
# uses ip for cn)
- cn = resolve_address(endpoint_type=INTERNAL)
- self.configure_cert(cn)
+ for net_type in (INTERNAL, ADMIN, PUBLIC):
+ cn = resolve_address(endpoint_type=net_type)
+ self.configure_cert(cn)
addresses = self.get_network_addresses()
for address, endpoint in addresses:
@@ -843,15 +853,6 @@ class NeutronContext(OSContextGenerator):
for pkgs in self.packages:
ensure_packages(pkgs)
- def _save_flag_file(self):
- if self.network_manager == 'quantum':
- _file = '/etc/nova/quantum_plugin.conf'
- else:
- _file = '/etc/nova/neutron_plugin.conf'
-
- with open(_file, 'wb') as out:
- out.write(self.plugin + '\n')
-
def ovs_ctxt(self):
driver = neutron_plugin_attribute(self.plugin, 'driver',
self.network_manager)
@@ -996,7 +997,6 @@ class NeutronContext(OSContextGenerator):
flags = config_flags_parser(alchemy_flags)
ctxt['neutron_alchemy_flags'] = flags
- self._save_flag_file()
return ctxt
@@ -1176,7 +1176,7 @@ class SubordinateConfigContext(OSContextGenerator):
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
- except:
+ except Exception:
log('Could not parse JSON from '
'subordinate_configuration setting from %s'
% rid, level=ERROR)
diff --git a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
index 0df0717..7aab129 100755
--- a/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
+++ b/hooks/charmhelpers/contrib/openstack/files/check_haproxy.sh
@@ -9,7 +9,7 @@
CRITICAL=0
NOTACTIVE=''
LOGFILE=/var/log/nagios/check_haproxy.log
-AUTH=$(grep -r "stats auth" /etc/haproxy | awk 'NR=1{print $4}')
+AUTH=$(grep -r "stats auth" /etc/haproxy/haproxy.cfg | awk 'NR=1{print $4}')
typeset -i N_INSTANCES=0
for appserver in $(awk '/^\s+server/{print $2}' /etc/haproxy/haproxy.cfg)
diff --git a/hooks/charmhelpers/contrib/openstack/ha/utils.py b/hooks/charmhelpers/contrib/openstack/ha/utils.py
index 254a90e..9a4d79c 100644
--- a/hooks/charmhelpers/contrib/openstack/ha/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/ha/utils.py
@@ -82,15 +82,18 @@ def update_dns_ha_resource_params(resources, resource_params,
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
- networkspace = m.group(1)
+ endpoint_type = m.group(1)
+ # resolve_address's ADDRESS_MAP uses 'int' not 'internal'
+ if endpoint_type == 'internal':
+ endpoint_type = 'int'
else:
msg = ('Unexpected DNS hostname setting: {}. '
- 'Cannot determine network space name'
+ 'Cannot determine endpoint_type name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
- hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
+ hostname_key = 'res_{}_{}_hostname'.format(charm_name(), endpoint_type)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
@@ -101,7 +104,7 @@ def update_dns_ha_resource_params(resources, resource_params,
resources[hostname_key] = crm_ocf
resource_params[hostname_key] = (
'params fqdn="{}" ip_address="{}" '
- ''.format(hostname, resolve_address(endpoint_type=networkspace,
+ ''.format(hostname, resolve_address(endpoint_type=endpoint_type,
override=False)))
if len(hostname_group) >= 1:
diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py
index 37fa0eb..0f847f5 100644
--- a/hooks/charmhelpers/contrib/openstack/neutron.py
+++ b/hooks/charmhelpers/contrib/openstack/neutron.py
@@ -59,18 +59,13 @@ def determine_dkms_package():
def quantum_plugins():
- from charmhelpers.contrib.openstack import context
return {
'ovs': {
'config': '/etc/quantum/plugins/openvswitch/'
'ovs_quantum_plugin.ini',
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
'OVSQuantumPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
+ 'contexts': [],
'services': ['quantum-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['quantum-plugin-openvswitch-agent']],
@@ -82,11 +77,7 @@ def quantum_plugins():
'config': '/etc/quantum/plugins/nicira/nvp.ini',
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
'QuantumPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=QUANTUM_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['quantum-server',
@@ -100,7 +91,6 @@ NEUTRON_CONF_DIR = '/etc/neutron'
def neutron_plugins():
- from charmhelpers.contrib.openstack import context
release = os_release('nova-common')
plugins = {
'ovs': {
@@ -108,11 +98,7 @@ def neutron_plugins():
'ovs_neutron_plugin.ini',
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
'OVSNeutronPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': ['neutron-plugin-openvswitch-agent'],
'packages': [determine_dkms_package(),
['neutron-plugin-openvswitch-agent']],
@@ -124,11 +110,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/nicira/nvp.ini',
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
'NeutronPlugin.NvpPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@@ -138,11 +120,7 @@ def neutron_plugins():
'nsx': {
'config': '/etc/neutron/plugins/vmware/nsx.ini',
'driver': 'vmware',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server',
@@ -152,11 +130,7 @@ def neutron_plugins():
'n1kv': {
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [determine_dkms_package(),
['neutron-plugin-cisco']],
@@ -167,11 +141,7 @@ def neutron_plugins():
'Calico': {
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': ['calico-felix',
'bird',
'neutron-dhcp-agent',
@@ -189,11 +159,7 @@ def neutron_plugins():
'vsp': {
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [],
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
@@ -203,10 +169,7 @@ def neutron_plugins():
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
'driver': ('neutron.plugins.plumgrid.plumgrid_plugin'
'.plumgrid_plugin.NeutronPluginPLUMgridV2'),
- 'contexts': [
- context.SharedDBContext(user=config('database-user'),
- database=config('database'),
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': ['plumgrid-lxc',
'iovisor-dkms'],
@@ -217,11 +180,7 @@ def neutron_plugins():
'midonet': {
'config': '/etc/neutron/plugins/midonet/midonet.ini',
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
- 'contexts': [
- context.SharedDBContext(user=config('neutron-database-user'),
- database=config('neutron-database'),
- relation_prefix='neutron',
- ssl_dir=NEUTRON_CONF_DIR)],
+ 'contexts': [],
'services': [],
'packages': [determine_dkms_package()],
'server_packages': ['neutron-server',
diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
index ed5c4f1..a11ce8a 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -18,7 +18,7 @@ rbd default features = {{ rbd_features }}
[client]
{% if rbd_client_cache_settings -%}
-{% for key, value in rbd_client_cache_settings.iteritems() -%}
+{% for key, value in rbd_client_cache_settings.items() -%}
{{ key }} = {{ value }}
{% endfor -%}
-{%- endif %} \ No newline at end of file
+{%- endif %}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
index 2e66045..ebc8a68 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
+++ b/hooks/charmhelpers/contrib/openstack/templates/haproxy.cfg
@@ -48,7 +48,9 @@ listen stats
{% for service, ports in service_ports.items() -%}
frontend tcp-in_{{ service }}
bind *:{{ ports[0] }}
+ {% if ipv6_enabled -%}
bind :::{{ ports[0] }}
+ {% endif -%}
{% for frontend in frontends -%}
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache
new file mode 100644
index 0000000..e056a32
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/section-oslo-cache
@@ -0,0 +1,6 @@
+[cache]
+{% if memcache_url %}
+enabled = true
+backend = oslo_cache.memcache_pool
+memcache_servers = {{ memcache_url }}
+{% endif %}
diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py
index d8c1fc7..77490e4 100644
--- a/hooks/charmhelpers/contrib/openstack/templating.py
+++ b/hooks/charmhelpers/contrib/openstack/templating.py
@@ -272,6 +272,8 @@ class OSConfigRenderer(object):
raise OSConfigException
_out = self.render(config_file)
+ if six.PY3:
+ _out = _out.encode('UTF-8')
with open(config_file, 'wb') as out:
out.write(_out)
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 837a167..8a541d4 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -95,7 +95,7 @@ from charmhelpers.fetch import (
from charmhelpers.fetch.snap import (
snap_install,
snap_refresh,
- SNAP_CHANNELS,
+ valid_snap_channel,
)
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
@@ -140,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
+ ('bionic', 'queens'),
])
@@ -157,6 +158,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2016.2', 'newton'),
('2017.1', 'ocata'),
('2017.2', 'pike'),
+ ('2018.1', 'queens'),
])
# The ugly duckling - must list releases oldest to newest
@@ -187,6 +189,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.11.0', '2.12.0', '2.13.0']),
('pike',
['2.13.0', '2.15.0']),
+ ('queens',
+ ['2.16.0']),
])
# >= Liberty version->codename mapping
@@ -412,6 +416,8 @@ def get_os_codename_package(package, fatal=True):
cmd = ['snap', 'list', package]
try:
out = subprocess.check_output(cmd)
+ if six.PY3:
+ out = out.decode('UTF-8')
except subprocess.CalledProcessError as e:
return None
lines = out.split('\n')
@@ -426,7 +432,7 @@ def get_os_codename_package(package, fatal=True):
try:
pkg = cache[package]
- except:
+ except Exception:
if not fatal:
return None
# the package is unknown to the current apt cache.
@@ -579,6 +585,9 @@ def configure_installation_source(source_plus_key):
Note that the behaviour on error is to log the error to the juju log and
then call sys.exit(1).
"""
+ if source_plus_key.startswith('snap'):
+ # Do nothing for snap installs
+ return
# extract the key if there is one, denoted by a '|' in the rel
source, key = get_source_and_pgp_key(source_plus_key)
@@ -615,7 +624,7 @@ def save_script_rc(script_path="scripts/scriptrc", **env_vars):
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
if not os.path.exists(os.path.dirname(juju_rc_path)):
os.mkdir(os.path.dirname(juju_rc_path))
- with open(juju_rc_path, 'wb') as rc_script:
+ with open(juju_rc_path, 'wt') as rc_script:
rc_script.write(
"#!/bin/bash\n")
[rc_script.write('export %s=%s\n' % (u, p))
@@ -794,7 +803,7 @@ def git_default_repos(projects_yaml):
service = service_name()
core_project = service
- for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
+ for default, branch in six.iteritems(GIT_DEFAULT_BRANCHES):
if projects_yaml == default:
# add the requirements repo first
@@ -1615,7 +1624,7 @@ def do_action_openstack_upgrade(package, upgrade_callback, configs):
upgrade_callback(configs=configs)
action_set({'outcome': 'success, upgrade completed.'})
ret = True
- except:
+ except Exception:
action_set({'outcome': 'upgrade failed, see traceback.'})
action_set({'traceback': traceback.format_exc()})
action_fail('do_openstack_upgrade resulted in an '
@@ -1720,7 +1729,7 @@ def is_unit_paused_set():
kv = t[0]
# transform something truth-y into a Boolean.
return not(not(kv.get('unit-paused')))
- except:
+ except Exception:
return False
@@ -2048,7 +2057,7 @@ def update_json_file(filename, items):
def snap_install_requested():
""" Determine if installing from snaps
- If openstack-origin is of the form snap:channel-series-release
+ If openstack-origin is of the form snap:track/channel[/branch]
and channel is in SNAPS_CHANNELS return True.
"""
origin = config('openstack-origin') or ""
@@ -2056,10 +2065,12 @@ def snap_install_requested():
return False
_src = origin[5:]
- channel, series, release = _src.split('-')
- if channel.lower() in SNAP_CHANNELS:
- return True
- return False
+ if '/' in _src:
+ channel = _src.split('/')[1]
+ else:
+ # Handle snap:track with no channel
+ channel = 'stable'
+ return valid_snap_channel(channel)
def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@@ -2067,7 +2078,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
@param snaps: List of snaps
@param src: String of openstack-origin or source of the form
- snap:channel-series-track
+ snap:track/channel
@param mode: String classic, devmode or jailmode
@returns: Dictionary of snaps with channels and modes
"""
@@ -2077,8 +2088,7 @@ def get_snaps_install_info_from_origin(snaps, src, mode='classic'):
return {}
_src = src[5:]
- _channel, _series, _release = _src.split('-')
- channel = '--channel={}/{}'.format(_release, _channel)
+ channel = '--channel={}'.format(_src)
return {snap: {'channel': channel, 'mode': mode}
for snap in snaps}
@@ -2090,8 +2100,8 @@ def install_os_snaps(snaps, refresh=False):
@param snaps: Dictionary of snaps with channels and modes of the form:
{'snap_name': {'channel': 'snap_channel',
'mode': 'snap_mode'}}
- Where channel a snapstore channel and mode is --classic, --devmode or
- --jailmode.
+ Where channel is a snapstore channel and mode is --classic, --devmode
+ or --jailmode.
@param post_snap_install: Callback function to run after snaps have been
installed
"""
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index e5a01b1..3923161 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -370,9 +370,10 @@ def get_mon_map(service):
Also raises CalledProcessError if our ceph command fails
"""
try:
- mon_status = check_output(
- ['ceph', '--id', service,
- 'mon_status', '--format=json'])
+ mon_status = check_output(['ceph', '--id', service,
+ 'mon_status', '--format=json'])
+ if six.PY3:
+ mon_status = mon_status.decode('UTF-8')
try:
return json.loads(mon_status)
except ValueError as v:
@@ -457,7 +458,7 @@ def monitor_key_get(service, key):
try:
output = check_output(
['ceph', '--id', service,
- 'config-key', 'get', str(key)])
+ 'config-key', 'get', str(key)]).decode('UTF-8')
return output
except CalledProcessError as e:
log("Monitor config-key get failed with message: {}".format(
@@ -500,6 +501,8 @@ def get_erasure_profile(service, name):
out = check_output(['ceph', '--id', service,
'osd', 'erasure-code-profile', 'get',
name, '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
return json.loads(out)
except (CalledProcessError, OSError, ValueError):
return None
@@ -686,7 +689,10 @@ def get_cache_mode(service, pool_name):
"""
validator(value=service, valid_type=six.string_types)
validator(value=pool_name, valid_type=six.string_types)
- out = check_output(['ceph', '--id', service, 'osd', 'dump', '--format=json'])
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'dump', '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
try:
osd_json = json.loads(out)
for pool in osd_json['pools']:
@@ -700,8 +706,9 @@ def get_cache_mode(service, pool_name):
def pool_exists(service, name):
"""Check to see if a RADOS pool already exists."""
try:
- out = check_output(['rados', '--id', service,
- 'lspools']).decode('UTF-8')
+ out = check_output(['rados', '--id', service, 'lspools'])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -714,9 +721,12 @@ def get_osds(service):
"""
version = ceph_version()
if version and version >= '0.56':
- return json.loads(check_output(['ceph', '--id', service,
- 'osd', 'ls',
- '--format=json']).decode('UTF-8'))
+ out = check_output(['ceph', '--id', service,
+ 'osd', 'ls',
+ '--format=json'])
+ if six.PY3:
+ out = out.decode('UTF-8')
+ return json.loads(out)
return None
@@ -734,7 +744,9 @@ def rbd_exists(service, pool, rbd_img):
"""Check to see if a RADOS block device exists."""
try:
out = check_output(['rbd', 'list', '--id',
- service, '--pool', pool]).decode('UTF-8')
+ service, '--pool', pool])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -859,7 +871,9 @@ def configure(service, key, auth, use_syslog):
def image_mapped(name):
"""Determine whether a RADOS block device is mapped locally."""
try:
- out = check_output(['rbd', 'showmapped']).decode('UTF-8')
+ out = check_output(['rbd', 'showmapped'])
+ if six.PY3:
+ out = out.decode('UTF-8')
except CalledProcessError:
return False
@@ -1018,7 +1032,9 @@ def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
- output = check_output(cmd).decode('US-ASCII')
+ output = check_output(cmd)
+ if six.PY3:
+ output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
diff --git a/hooks/charmhelpers/contrib/storage/linux/lvm.py b/hooks/charmhelpers/contrib/storage/linux/lvm.py
index 4719f53..7f2a060 100644
--- a/hooks/charmhelpers/contrib/storage/linux/lvm.py
+++ b/hooks/charmhelpers/contrib/storage/linux/lvm.py
@@ -74,10 +74,10 @@ def list_lvm_volume_group(block_device):
'''
vg = None
pvd = check_output(['pvdisplay', block_device]).splitlines()
- for l in pvd:
- l = l.decode('UTF-8')
- if l.strip().startswith('VG Name'):
- vg = ' '.join(l.strip().split()[2:])
+ for lvm in pvd:
+ lvm = lvm.decode('UTF-8')
+ if lvm.strip().startswith('VG Name'):
+ vg = ' '.join(lvm.strip().split()[2:])
return vg
diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py
index 3dc0df6..c942889 100644
--- a/hooks/charmhelpers/contrib/storage/linux/utils.py
+++ b/hooks/charmhelpers/contrib/storage/linux/utils.py
@@ -64,6 +64,6 @@ def is_device_mounted(device):
'''
try:
out = check_output(['lsblk', '-P', device]).decode('UTF-8')
- except:
+ except Exception:
return False
return bool(re.search(r'MOUNTPOINT=".+"', out))
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 12f37b2..5a88f79 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -22,6 +22,7 @@ from __future__ import print_function
import copy
from distutils.version import LooseVersion
from functools import wraps
+from collections import namedtuple
import glob
import os
import json
@@ -218,6 +219,8 @@ def principal_unit():
for rid in relation_ids(reltype):
for unit in related_units(rid):
md = _metadata_unit(unit)
+ if not md:
+ continue
subordinate = md.pop('subordinate', None)
if not subordinate:
return unit
@@ -511,7 +514,10 @@ def _metadata_unit(unit):
"""
basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
- with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
+ joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
+ if not os.path.exists(joineddir):
+ return None
+ with open(joineddir) as md:
return yaml.safe_load(md)
@@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'):
return False
+def _port_op(op_name, port, protocol="TCP"):
+ """Open or close a service network port"""
+ _args = [op_name]
+ icmp = protocol.upper() == "ICMP"
+ if icmp:
+ _args.append(protocol)
+ else:
+ _args.append('{}/{}'.format(port, protocol))
+ try:
+ subprocess.check_call(_args)
+ except subprocess.CalledProcessError:
+ # Older Juju pre 2.3 doesn't support ICMP
+ # so treat it as a no-op if it fails.
+ if not icmp:
+ raise
+
+
def open_port(port, protocol="TCP"):
"""Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('open-port', port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('close-port', port, protocol)
def open_ports(start, end, protocol="TCP"):
@@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args)
+def opened_ports():
+ """Get the opened ports
+
+ *Note that this will only show ports opened in a previous hook*
+
+ :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+ """
+ _args = ['opened-ports', '--format=json']
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
@@ -1077,6 +1107,35 @@ def network_get_primary_address(binding):
return subprocess.check_output(cmd).decode('UTF-8').strip()
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get(endpoint, relation_id=None):
+ """
+ Retrieve the network details for a relation endpoint
+
+ :param endpoint: string. The name of a relation endpoint
+ :param relation_id: int. The ID of the relation for the current context.
+ :return: dict. The loaded YAML output of the network-get query.
+ :raise: NotImplementedError if run on Juju < 2.1
+ """
+ cmd = ['network-get', endpoint, '--format', 'yaml']
+ if relation_id:
+ cmd.append('-r')
+ cmd.append(relation_id)
+ try:
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ except CalledProcessError as e:
+ # Early versions of Juju 2.0.x required the --primary-address argument.
+ # We catch that condition here and raise NotImplementedError since
+ # the requested semantics are not available - the caller can then
+ # use the network_get_primary_address() method instead.
+ if '--primary-address is currently required' in e.output.decode('UTF-8'):
+ raise NotImplementedError
+ raise
+ return yaml.safe_load(response)
+
+
def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more
@@ -1106,3 +1165,42 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+ """Iterate through all units in a relation
+
+ Generator that iterates through all the units in a relation and yields
+ a named tuple with rid and unit field names.
+
+ Usage:
+ data = [(u.rid, u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param relation_name: string relation name
+ :yield: Named Tuple with rid and unit field names
+ """
+ RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ for rid in relation_ids(relation_name):
+ for unit in related_units(rid):
+ yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+ """
+ Retrieve the ingress-address from a relation when available. Otherwise,
+ return the private-address. This function is to be used on the consuming
+ side of the relation.
+
+ Usage:
+ addresses = [ingress_address(rid=u.rid, unit=u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: string IP address
+ """
+ settings = relation_get(rid=rid, unit=unit)
+ return (settings.get('ingress-address') or
+ settings.get('private-address'))
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 5656e2f..5cc5c86 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -34,7 +34,7 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
-from .hookenv import log, DEBUG
+from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
@@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd)
+def chage(username, lastday=None, expiredate=None, inactive=None,
+ mindays=None, maxdays=None, root=None, warndays=None):
+ """Change user password expiry information
+
+ :param str username: User to update
+ :param str lastday: Set when password was changed in YYYY-MM-DD format
+ :param str expiredate: Set when user's account will no longer be
+ accessible in YYYY-MM-DD format.
+ -1 will remove an account expiration date.
+ :param str inactive: Set the number of days of inactivity after a password
+ has expired before the account is locked.
+ -1 will remove an account's inactivity.
+ :param str mindays: Set the minimum number of days between password
+ changes to MIN_DAYS.
+ 0 indicates the password can be changed anytime.
+ :param str maxdays: Set the maximum number of days during which a
+ password is valid.
+ -1 as MAX_DAYS will remove checking maxdays
+ :param str root: Apply changes in the CHROOT_DIR directory
+ :param str warndays: Set the number of days of warning before a password
+ change is required
+ :raises subprocess.CalledProcessError: if call to chage fails
+ """
+ cmd = ['chage']
+ if root:
+ cmd.extend(['--root', root])
+ if lastday:
+ cmd.extend(['--lastday', lastday])
+ if expiredate:
+ cmd.extend(['--expiredate', expiredate])
+ if inactive:
+ cmd.extend(['--inactive', inactive])
+ if mindays:
+ cmd.extend(['--mindays', mindays])
+ if maxdays:
+ cmd.extend(['--maxdays', maxdays])
+ if warndays:
+ cmd.extend(['--warndays', warndays])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
@@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
+
+
+def modulo_distribution(modulo=3, wait=30):
+ """ Modulo distribution
+
+ This helper uses the unit number, a modulo value and a constant wait time
+ to produce a calculated wait time distribution. This is useful in large
+ scale deployments to distribute load during an expensive operation such as
+ service restarts.
+
+ If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+ time:
+
+ time.wait(modulo_distribution(modulo=100, wait=60))
+ restart()
+
+ If you need restarts to happen serially set modulo to the exact number of
+ nodes and set a high constant wait time:
+
+ time.wait(modulo_distribution(modulo=10, wait=120))
+ restart()
+
+ @param modulo: int The modulo number creates the group distribution
+ @param wait: int The constant time wait value
+ @return: int Calculated time to wait for unit operation
+ """
+ unit_number = int(local_unit().split('/')[1])
+ return (unit_number % modulo) * wait
diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py
index 685dabd..e8df045 100644
--- a/hooks/charmhelpers/core/strutils.py
+++ b/hooks/charmhelpers/core/strutils.py
@@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ if matches:
+ size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ else:
+ # Assume that value passed in is bytes
+ try:
+ size = int(value)
+ except ValueError:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return size
class BasicStringComparator(object):
diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py
index 54ec969..7af875c 100644
--- a/hooks/charmhelpers/core/unitdata.py
+++ b/hooks/charmhelpers/core/unitdata.py
@@ -358,7 +358,7 @@ class Storage(object):
try:
yield self.revision
self.revision = None
- except:
+ except Exception:
self.flush(False)
self.revision = None
raise
diff --git a/hooks/charmhelpers/fetch/snap.py b/hooks/charmhelpers/fetch/snap.py
index 112a54c..395836c 100644
--- a/hooks/charmhelpers/fetch/snap.py
+++ b/hooks/charmhelpers/fetch/snap.py
@@ -41,6 +41,10 @@ class CouldNotAcquireLockException(Exception):
pass
+class InvalidSnapChannel(Exception):
+ pass
+
+
def _snap_exec(commands):
"""
Execute snap commands.
@@ -132,3 +136,15 @@ def snap_refresh(packages, *flags):
log(message, level='INFO')
return _snap_exec(['refresh'] + flags + packages)
+
+
+def valid_snap_channel(channel):
+ """ Validate snap channel exists
+
+ :raises InvalidSnapChannel: When channel does not exist
+ :return: Boolean
+ """
+ if channel.lower() in SNAP_CHANNELS:
+ return True
+ else:
+ raise InvalidSnapChannel("Invalid Snap Channel: {}".format(channel))
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 40e1cb5..910e96a 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -572,7 +572,7 @@ def get_upstream_version(package):
cache = apt_cache()
try:
pkg = cache[package]
- except:
+ except Exception:
# the package is unknown to the current apt cache.
return None
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index 5c041d2..e37f283 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -250,7 +250,14 @@ class OpenStackAmuletDeployment(AmuletDeployment):
self.log.debug('Waiting up to {}s for extended status on services: '
'{}'.format(timeout, services))
service_messages = {service: message for service in services}
+
+ # Check for idleness
+ self.d.sentry.wait()
+ # Check for error states and bail early
+ self.d.sentry.wait_for_status(self.d.juju_env, services)
+ # Check for ready messages
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
+
self.log.info('OK')
def _get_openstack_release(self):
@@ -263,7 +270,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
- self.xenial_pike, self.artful_pike) = range(11)
+ self.xenial_pike, self.artful_pike, self.xenial_queens,
+ self.bionic_queens,) = range(13)
releases = {
('trusty', None): self.trusty_icehouse,
@@ -274,9 +282,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('xenial', 'cloud:xenial-newton'): self.xenial_newton,
('xenial', 'cloud:xenial-ocata'): self.xenial_ocata,
('xenial', 'cloud:xenial-pike'): self.xenial_pike,
+ ('xenial', 'cloud:xenial-queens'): self.xenial_queens,
('yakkety', None): self.yakkety_newton,
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
+ ('bionic', None): self.bionic_queens,
}
return releases[(self.series, self.openstack)]
@@ -291,6 +301,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('yakkety', 'newton'),
('zesty', 'ocata'),
('artful', 'pike'),
+ ('bionic', 'queens'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
@@ -303,20 +314,27 @@ class OpenStackAmuletDeployment(AmuletDeployment):
test scenario, based on OpenStack release and whether ceph radosgw
is flagged as present or not."""
- if self._get_openstack_release() >= self.trusty_kilo:
- # Kilo or later
+ if self._get_openstack_release() == self.trusty_icehouse:
+ # Icehouse
pools = [
+ 'data',
+ 'metadata',
'rbd',
- 'cinder',
+ 'cinder-ceph',
'glance'
]
- else:
- # Juno or earlier
+ elif (self.trusty_kilo <= self._get_openstack_release() <=
+ self.zesty_ocata):
+ # Kilo through Ocata
pools = [
- 'data',
- 'metadata',
'rbd',
- 'cinder',
+ 'cinder-ceph',
+ 'glance'
+ ]
+ else:
+ # Pike and later
+ pools = [
+ 'cinder-ceph',
'glance'
]
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index c8edbf6..b71b2b1 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -23,6 +23,7 @@ import urllib
import urlparse
import cinderclient.v1.client as cinder_client
+import cinderclient.v2.client as cinder_clientv2
import glanceclient.v1.client as glance_client
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
@@ -42,7 +43,6 @@ import swiftclient
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
-from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.core.host import CompareHostReleases
DEBUG = logging.DEBUG
@@ -310,7 +310,6 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
- @retry_on_exception(5, base_delay=10)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@@ -326,7 +325,7 @@ class OpenStackAmuletUtils(AmuletUtils):
rel = sentry.relation('identity-service',
relation_name)
self.log.debug('keystone relation data: {}'.format(rel))
- if rel['api_version'] != str(api_version):
+ if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version))
@@ -348,15 +347,19 @@ class OpenStackAmuletUtils(AmuletUtils):
config = {'preferred-api-version': api_version}
deployment.d.configure('keystone', config)
+ deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username,
- password, tenant):
+ password, tenant, api_version=2):
"""Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens.
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
- return cinder_client.Client(username, password, tenant, ept)
+ _clients = {
+ 1: cinder_client.Client,
+ 2: cinder_clientv2.Client}
+ return _clients[api_version](username, password, tenant, ept)
def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False,
@@ -617,13 +620,25 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Keypair ({}) already exists, '
'using it.'.format(keypair_name))
return _keypair
- except:
+ except Exception:
self.log.debug('Keypair ({}) does not exist, '
'creating it.'.format(keypair_name))
_keypair = nova.keypairs.create(name=keypair_name)
return _keypair
+ def _get_cinder_obj_name(self, cinder_object):
+ """Retrieve name of cinder object.
+
+ :param cinder_object: cinder snapshot or volume object
+ :returns: str cinder object name
+ """
+ # v1 objects store name in 'display_name' attr but v2+ use 'name'
+ try:
+ return cinder_object.display_name
+ except AttributeError:
+ return cinder_object.name
+
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
img_id=None, src_vol_id=None, snap_id=None):
"""Create cinder volume, optionally from a glance image, OR
@@ -674,6 +689,13 @@ class OpenStackAmuletUtils(AmuletUtils):
source_volid=src_vol_id,
snapshot_id=snap_id)
vol_id = vol_new.id
+ except TypeError:
+ vol_new = cinder.volumes.create(name=vol_name,
+ imageRef=img_id,
+ size=vol_size,
+ source_volid=src_vol_id,
+ snapshot_id=snap_id)
+ vol_id = vol_new.id
except Exception as e:
msg = 'Failed to create volume: {}'.format(e)
amulet.raise_status(amulet.FAIL, msg=msg)
@@ -688,7 +710,7 @@ class OpenStackAmuletUtils(AmuletUtils):
# Re-validate new volume
self.log.debug('Validating volume attributes...')
- val_vol_name = cinder.volumes.get(vol_id).display_name
+ val_vol_name = self._get_cinder_obj_name(cinder.volumes.get(vol_id))
val_vol_boot = cinder.volumes.get(vol_id).bootable
val_vol_stat = cinder.volumes.get(vol_id).status
val_vol_size = cinder.volumes.get(vol_id).size
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 12f37b2..5a88f79 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -22,6 +22,7 @@ from __future__ import print_function
import copy
from distutils.version import LooseVersion
from functools import wraps
+from collections import namedtuple
import glob
import os
import json
@@ -218,6 +219,8 @@ def principal_unit():
for rid in relation_ids(reltype):
for unit in related_units(rid):
md = _metadata_unit(unit)
+ if not md:
+ continue
subordinate = md.pop('subordinate', None)
if not subordinate:
return unit
@@ -511,7 +514,10 @@ def _metadata_unit(unit):
"""
basedir = os.sep.join(charm_dir().split(os.sep)[:-2])
unitdir = 'unit-{}'.format(unit.replace(os.sep, '-'))
- with open(os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')) as md:
+ joineddir = os.path.join(basedir, unitdir, 'charm', 'metadata.yaml')
+ if not os.path.exists(joineddir):
+ return None
+ with open(joineddir) as md:
return yaml.safe_load(md)
@@ -639,18 +645,31 @@ def is_relation_made(relation, keys='private-address'):
return False
+def _port_op(op_name, port, protocol="TCP"):
+ """Open or close a service network port"""
+ _args = [op_name]
+ icmp = protocol.upper() == "ICMP"
+ if icmp:
+ _args.append(protocol)
+ else:
+ _args.append('{}/{}'.format(port, protocol))
+ try:
+ subprocess.check_call(_args)
+ except subprocess.CalledProcessError:
+ # Older Juju pre 2.3 doesn't support ICMP
+ # so treat it as a no-op if it fails.
+ if not icmp:
+ raise
+
+
def open_port(port, protocol="TCP"):
"""Open a service network port"""
- _args = ['open-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('open-port', port, protocol)
def close_port(port, protocol="TCP"):
"""Close a service network port"""
- _args = ['close-port']
- _args.append('{}/{}'.format(port, protocol))
- subprocess.check_call(_args)
+ _port_op('close-port', port, protocol)
def open_ports(start, end, protocol="TCP"):
@@ -667,6 +686,17 @@ def close_ports(start, end, protocol="TCP"):
subprocess.check_call(_args)
+def opened_ports():
+ """Get the opened ports
+
+ *Note that this will only show ports opened in a previous hook*
+
+ :returns: Opened ports as a list of strings: ``['8080/tcp', '8081-8083/tcp']``
+ """
+ _args = ['opened-ports', '--format=json']
+ return json.loads(subprocess.check_output(_args).decode('UTF-8'))
+
+
@cached
def unit_get(attribute):
"""Get the unit ID for the remote unit"""
@@ -1077,6 +1107,35 @@ def network_get_primary_address(binding):
return subprocess.check_output(cmd).decode('UTF-8').strip()
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def network_get(endpoint, relation_id=None):
+ """
+ Retrieve the network details for a relation endpoint
+
+ :param endpoint: string. The name of a relation endpoint
+ :param relation_id: int. The ID of the relation for the current context.
+ :return: dict. The loaded YAML output of the network-get query.
+ :raise: NotImplementedError if run on Juju < 2.1
+ """
+ cmd = ['network-get', endpoint, '--format', 'yaml']
+ if relation_id:
+ cmd.append('-r')
+ cmd.append(relation_id)
+ try:
+ response = subprocess.check_output(
+ cmd,
+ stderr=subprocess.STDOUT).decode('UTF-8').strip()
+ except CalledProcessError as e:
+ # Early versions of Juju 2.0.x required the --primary-address argument.
+ # We catch that condition here and raise NotImplementedError since
+ # the requested semantics are not available - the caller can then
+ # use the network_get_primary_address() method instead.
+ if '--primary-address is currently required' in e.output.decode('UTF-8'):
+ raise NotImplementedError
+ raise
+ return yaml.safe_load(response)
+
+
def add_metric(*args, **kwargs):
"""Add metric values. Values may be expressed with keyword arguments. For
metric names containing dashes, these may be expressed as one or more
@@ -1106,3 +1165,42 @@ def meter_info():
"""Get the meter status information, if running in the meter-status-changed
hook."""
return os.environ.get('JUJU_METER_INFO')
+
+
+def iter_units_for_relation_name(relation_name):
+ """Iterate through all units in a relation
+
+ Generator that iterates through all the units in a relation and yields
+ a named tuple with rid and unit field names.
+
+ Usage:
+ data = [(u.rid, u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param relation_name: string relation name
+ :yield: Named Tuple with rid and unit field names
+ """
+ RelatedUnit = namedtuple('RelatedUnit', 'rid, unit')
+ for rid in relation_ids(relation_name):
+ for unit in related_units(rid):
+ yield RelatedUnit(rid, unit)
+
+
+def ingress_address(rid=None, unit=None):
+ """
+ Retrieve the ingress-address from a relation when available. Otherwise,
+ return the private-address. This function is to be used on the consuming
+ side of the relation.
+
+ Usage:
+ addresses = [ingress_address(rid=u.rid, unit=u.unit)
+ for u in iter_units_for_relation_name(relation_name)]
+
+ :param rid: string relation id
+ :param unit: string unit name
+ :side effect: calls relation_get
+ :return: string IP address
+ """
+ settings = relation_get(rid=rid, unit=unit)
+ return (settings.get('ingress-address') or
+ settings.get('private-address'))
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 5656e2f..5cc5c86 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -34,7 +34,7 @@ import six
from contextlib import contextmanager
from collections import OrderedDict
-from .hookenv import log, DEBUG
+from .hookenv import log, DEBUG, local_unit
from .fstab import Fstab
from charmhelpers.osplatform import get_platform
@@ -441,6 +441,49 @@ def add_user_to_group(username, group):
subprocess.check_call(cmd)
+def chage(username, lastday=None, expiredate=None, inactive=None,
+ mindays=None, maxdays=None, root=None, warndays=None):
+ """Change user password expiry information
+
+ :param str username: User to update
+ :param str lastday: Set when password was changed in YYYY-MM-DD format
+ :param str expiredate: Set when user's account will no longer be
+ accessible in YYYY-MM-DD format.
+ -1 will remove an account expiration date.
+ :param str inactive: Set the number of days of inactivity after a password
+ has expired before the account is locked.
+ -1 will remove an account's inactivity.
+ :param str mindays: Set the minimum number of days between password
+ changes to MIN_DAYS.
+ 0 indicates the password can be changed anytime.
+ :param str maxdays: Set the maximum number of days during which a
+ password is valid.
+ -1 as MAX_DAYS will remove checking maxdays
+ :param str root: Apply changes in the CHROOT_DIR directory
+ :param str warndays: Set the number of days of warning before a password
+ change is required
+ :raises subprocess.CalledProcessError: if call to chage fails
+ """
+ cmd = ['chage']
+ if root:
+ cmd.extend(['--root', root])
+ if lastday:
+ cmd.extend(['--lastday', lastday])
+ if expiredate:
+ cmd.extend(['--expiredate', expiredate])
+ if inactive:
+ cmd.extend(['--inactive', inactive])
+ if mindays:
+ cmd.extend(['--mindays', mindays])
+ if maxdays:
+ cmd.extend(['--maxdays', maxdays])
+ if warndays:
+ cmd.extend(['--warndays', warndays])
+ cmd.append(username)
+ subprocess.check_call(cmd)
+
+remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
+
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
"""Replicate the contents of a path"""
options = options or ['--delete', '--executability']
@@ -946,3 +989,31 @@ def updatedb(updatedb_text, new_path):
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
output = "\n".join(lines)
return output
+
+
+def modulo_distribution(modulo=3, wait=30):
+ """ Modulo distribution
+
+ This helper uses the unit number, a modulo value and a constant wait time
+ to produce a calculated wait time distribution. This is useful in large
+ scale deployments to distribute load during an expensive operation such as
+ service restarts.
+
+ If you have 1000 nodes that need to restart 100 at a time 1 minute at a
+ time:
+
+ time.wait(modulo_distribution(modulo=100, wait=60))
+ restart()
+
+ If you need restarts to happen serially set modulo to the exact number of
+ nodes and set a high constant wait time:
+
+ time.wait(modulo_distribution(modulo=10, wait=120))
+ restart()
+
+ @param modulo: int The modulo number creates the group distribution
+ @param wait: int The constant time wait value
+ @return: int Calculated time to wait for unit operation
+ """
+ unit_number = int(local_unit().split('/')[1])
+ return (unit_number % modulo) * wait
diff --git a/tests/charmhelpers/core/strutils.py b/tests/charmhelpers/core/strutils.py
index 685dabd..e8df045 100644
--- a/tests/charmhelpers/core/strutils.py
+++ b/tests/charmhelpers/core/strutils.py
@@ -61,13 +61,19 @@ def bytes_from_string(value):
if isinstance(value, six.string_types):
value = six.text_type(value)
else:
- msg = "Unable to interpret non-string value '%s' as boolean" % (value)
+ msg = "Unable to interpret non-string value '%s' as bytes" % (value)
raise ValueError(msg)
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
- if not matches:
- msg = "Unable to interpret string value '%s' as bytes" % (value)
- raise ValueError(msg)
- return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ if matches:
+ size = int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
+ else:
+ # Assume that value passed in is bytes
+ try:
+ size = int(value)
+ except ValueError:
+ msg = "Unable to interpret string value '%s' as bytes" % (value)
+ raise ValueError(msg)
+ return size
class BasicStringComparator(object):
diff --git a/tests/charmhelpers/core/unitdata.py b/tests/charmhelpers/core/unitdata.py
index 54ec969..7af875c 100644
--- a/tests/charmhelpers/core/unitdata.py
+++ b/tests/charmhelpers/core/unitdata.py
@@ -358,7 +358,7 @@ class Storage(object):
try:
yield self.revision
self.revision = None
- except:
+ except Exception:
self.flush(False)
self.revision = None
raise
diff --git a/tests/gate-basic-xenial-pike b/tests/gate-basic-xenial-pike
new file mode 100755
index 0000000..2d8e25c
--- /dev/null
+++ b/tests/gate-basic-xenial-pike
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+#
+# Copyright 2016 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Amulet tests on a basic lxd deployment on xenial-pike."""
+
+from basic_deployment import LXDBasicDeployment
+
+if __name__ == '__main__':
+ deployment = LXDBasicDeployment(series='xenial',
+ openstack='cloud:xenial-pike',
+ source='cloud:xenial-updates/pike')
+ deployment.run_tests()
diff --git a/tox.ini b/tox.ini
index 7c2936e..6d44f4b 100644
--- a/tox.ini
+++ b/tox.ini
@@ -60,7 +60,7 @@ basepython = python2.7
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
commands =
- bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-mitaka --no-destroy
+ bundletester -vl DEBUG -r json -o func-results.json gate-basic-xenial-pike --no-destroy
[testenv:func27-dfs]
# Charm Functional Test

This mirror site include all the OpenStack related repositories under: openstack, openstack-dev and openstack-infra.

NOTE: All repositories are updated every one hour.

Usage

For Git Clone
 git clone http://git.trystack.cn/openstack/nova.git 
For DevStack

Add GIT_BASE, NOVNC_REPO and SPICE_REPO variables to local.conf file.

[[local|localrc]]

# use TryStack git mirror
GIT_BASE=http://git.trystack.cn
NOVNC_REPO=http://git.trystack.cn/kanaka/noVNC.git
SPICE_REPO=http://git.trystack.cn/git/spice/spice-html5.git