summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRyan Beisner <ryan.beisner@canonical.com>2018-08-23 10:00:36 -0500
committerDavid Ames <david.ames@canonical.com>2018-09-20 12:28:01 +0200
commit72ada2bc203b228d827f6b27cd50499608ea9e29 (patch)
treeb2cff99b4b9992157b2d74fdc2293aa06f3ee8d6
parent4d7b3e924008af878b0c369c1bd3e92238d51a93 (diff)
downloadcharm-nova-lxd-72ada2bc203b228d827f6b27cd50499608ea9e29.zip
charm-nova-lxd-72ada2bc203b228d827f6b27cd50499608ea9e29.tar.gz
charm-nova-lxd-72ada2bc203b228d827f6b27cd50499608ea9e29.tar.bz2
Sync charm helpers and enable rocky func test
Change-Id: I898b4f28f8b65a7ba3b244ca6d0e27405a4e9ac5
-rw-r--r--hooks/charmhelpers/contrib/charmsupport/nrpe.py15
-rw-r--r--hooks/charmhelpers/contrib/hahelpers/cluster.py5
-rw-r--r--hooks/charmhelpers/contrib/network/ovs/__init__.py57
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/deployment.py3
-rw-r--r--hooks/charmhelpers/contrib/openstack/amulet/utils.py14
-rw-r--r--hooks/charmhelpers/contrib/openstack/cert_utils.py227
-rw-r--r--hooks/charmhelpers/contrib/openstack/context.py80
-rw-r--r--hooks/charmhelpers/contrib/openstack/ip.py10
-rw-r--r--hooks/charmhelpers/contrib/openstack/ssh_migrations.py412
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf6
-rw-r--r--hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf91
-rw-r--r--hooks/charmhelpers/contrib/openstack/utils.py52
-rw-r--r--hooks/charmhelpers/core/hookenv.py65
-rw-r--r--hooks/charmhelpers/core/host.py14
-rw-r--r--hooks/charmhelpers/fetch/ubuntu.py10
-rw-r--r--tests/basic_deployment.py17
-rw-r--r--tests/charmhelpers/contrib/amulet/deployment.py6
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/deployment.py3
-rw-r--r--tests/charmhelpers/contrib/openstack/amulet/utils.py136
-rw-r--r--tests/charmhelpers/core/hookenv.py65
-rw-r--r--tests/charmhelpers/core/host.py14
-rwxr-xr-xtests/gate-basic-bionic-rocky (renamed from tests/dev-basic-bionic-rocky)0
-rw-r--r--tests/lxd_amulet_utils.py133
23 files changed, 1202 insertions, 233 deletions
diff --git a/hooks/charmhelpers/contrib/charmsupport/nrpe.py b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
index 1c55b30..e3d10c1 100644
--- a/hooks/charmhelpers/contrib/charmsupport/nrpe.py
+++ b/hooks/charmhelpers/contrib/charmsupport/nrpe.py
@@ -410,16 +410,21 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
os.chmod(checkpath, 0o644)
-def copy_nrpe_checks():
+def copy_nrpe_checks(nrpe_files_dir=None):
"""
Copy the nrpe checks into place
"""
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
- nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
- 'charmhelpers', 'contrib', 'openstack',
- 'files')
-
+ default_nrpe_files_dir = os.path.join(
+ os.getenv('CHARM_DIR'),
+ 'hooks',
+ 'charmhelpers',
+ 'contrib',
+ 'openstack',
+ 'files')
+ if not nrpe_files_dir:
+ nrpe_files_dir = default_nrpe_files_dir
if not os.path.exists(NAGIOS_PLUGINS):
os.makedirs(NAGIOS_PLUGINS)
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
diff --git a/hooks/charmhelpers/contrib/hahelpers/cluster.py b/hooks/charmhelpers/contrib/hahelpers/cluster.py
index 47facd9..4a737e2 100644
--- a/hooks/charmhelpers/contrib/hahelpers/cluster.py
+++ b/hooks/charmhelpers/contrib/hahelpers/cluster.py
@@ -223,6 +223,11 @@ def https():
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
+ for r_id in relation_ids('certificates'):
+ for unit in relation_list(r_id):
+ ca = relation_get('ca', rid=r_id, unit=unit)
+ if ca:
+ return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
diff --git a/hooks/charmhelpers/contrib/network/ovs/__init__.py b/hooks/charmhelpers/contrib/network/ovs/__init__.py
index 9b3583f..a8856e9 100644
--- a/hooks/charmhelpers/contrib/network/ovs/__init__.py
+++ b/hooks/charmhelpers/contrib/network/ovs/__init__.py
@@ -13,6 +13,7 @@
# limitations under the License.
''' Helpers for interacting with OpenvSwitch '''
+import hashlib
import subprocess
import os
import six
@@ -39,6 +40,8 @@ iface {linuxbridge_port} inet manual
down ip link del {linuxbridge_port}
"""
+MAX_KERNEL_INTERFACE_NAME_LEN = 15
+
def add_bridge(name, datapath_type=None):
''' Add the named bridge to openvswitch '''
@@ -92,16 +95,39 @@ def add_ovsbridge_linuxbridge(name, bridge):
apt_install('python3-netifaces', fatal=True)
import netifaces
+ # NOTE(jamespage):
+ # Older code supported addition of a linuxbridge directly
+ # to an OVS bridge; ensure we don't break uses on upgrade
+ existing_ovs_bridge = port_to_br(bridge)
+ if existing_ovs_bridge is not None:
+ log('Linuxbridge {} is already directly in use'
+ ' by OVS bridge {}'.format(bridge, existing_ovs_bridge),
+ level=INFO)
+ return
+
+ # NOTE(jamespage):
+ # preserve existing naming because interfaces may already exist.
ovsbridge_port = "veth-" + name
linuxbridge_port = "veth-" + bridge
- log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
- level=INFO)
+ if (len(ovsbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN or
+ len(linuxbridge_port) > MAX_KERNEL_INTERFACE_NAME_LEN):
+ # NOTE(jamespage):
+ # use parts of hashed bridgename (openstack style) when
+ # a bridge name exceeds 15 chars
+ hashed_bridge = hashlib.sha256(bridge.encode('UTF-8')).hexdigest()
+ base = '{}-{}'.format(hashed_bridge[:8], hashed_bridge[-2:])
+ ovsbridge_port = "cvo{}".format(base)
+ linuxbridge_port = "cvb{}".format(base)
+
interfaces = netifaces.interfaces()
for interface in interfaces:
if interface == ovsbridge_port or interface == linuxbridge_port:
log('Interface {} already exists'.format(interface), level=INFO)
return
+ log('Adding linuxbridge {} to ovsbridge {}'.format(bridge, name),
+ level=INFO)
+
check_for_eni_source()
with open('/etc/network/interfaces.d/{}.cfg'.format(
@@ -134,6 +160,20 @@ def set_manager(manager):
'ssl:{}'.format(manager)])
+def set_Open_vSwitch_column_value(column_value):
+ """
+ Calls ovs-vsctl and sets the 'column_value' in the Open_vSwitch table.
+
+ :param column_value:
+ See http://www.openvswitch.org//ovs-vswitchd.conf.db.5.pdf for
+ details of the relevant values.
+ :type str
+ :raises CalledProcessException: possibly ovsdb-server is not running
+ """
+ log('Setting {} in the Open_vSwitch table'.format(column_value))
+ subprocess.check_call(['ovs-vsctl', 'set', 'Open_vSwitch', '.', column_value])
+
+
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
@@ -194,3 +234,16 @@ def disable_ipfix(bridge):
'''
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
subprocess.check_call(cmd)
+
+
+def port_to_br(port):
+ '''Determine the bridge that contains a port
+ :param port: Name of port to check for
+ :returns str: OVS bridge containing port or None if not found
+ '''
+ try:
+ return subprocess.check_output(
+ ['ovs-vsctl', 'port-to-br', port]
+ ).decode('UTF-8').strip()
+ except subprocess.CalledProcessError:
+ return None
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index 66beeda..1c96752 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -291,6 +291,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
+ ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
+ ('cosmic', None): self.cosmic_rocky,
}
return releases[(self.series, self.openstack)]
@@ -306,6 +308,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
+ ('cosmic', 'rocky'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 84e87f5..ef4ab54 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -40,6 +40,7 @@ import novaclient
import pika
import swiftclient
+from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@@ -55,7 +56,7 @@ OPENSTACK_RELEASES_PAIRS = [
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens',
- 'bionic_queens']
+ 'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
class OpenStackAmuletUtils(AmuletUtils):
@@ -423,6 +424,7 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
+ @retry_on_exception(num_retries=5, base_delay=1)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@@ -542,7 +544,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return ep
def get_default_keystone_session(self, keystone_sentry,
- openstack_release=None):
+ openstack_release=None, api_version=2):
"""Return a keystone session object and client object assuming standard
default settings
@@ -557,12 +559,12 @@ class OpenStackAmuletUtils(AmuletUtils):
eyc
"""
self.log.debug('Authenticating keystone admin...')
- api_version = 2
- client_class = keystone_client.Client
# 11 => xenial_queens
- if openstack_release and openstack_release >= 11:
- api_version = 3
+ if api_version == 3 or (openstack_release and openstack_release >= 11):
client_class = keystone_client_v3.Client
+ api_version = 3
+ else:
+ client_class = keystone_client.Client
keystone_ip = keystone_sentry.info['public-address']
session, auth = self.get_keystone_session(
keystone_ip,
diff --git a/hooks/charmhelpers/contrib/openstack/cert_utils.py b/hooks/charmhelpers/contrib/openstack/cert_utils.py
new file mode 100644
index 0000000..de853b5
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/cert_utils.py
@@ -0,0 +1,227 @@
+# Copyright 2014-2018 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Common python helper functions used for OpenStack charm certificats.
+
+import os
+import json
+
+from charmhelpers.contrib.network.ip import (
+ get_hostname,
+ resolve_network_cidr,
+)
+from charmhelpers.core.hookenv import (
+ local_unit,
+ network_get_primary_address,
+ config,
+ relation_get,
+ unit_get,
+ NoNetworkBinding,
+ log,
+ WARNING,
+)
+from charmhelpers.contrib.openstack.ip import (
+ ADMIN,
+ resolve_address,
+ get_vip_in_network,
+ INTERNAL,
+ PUBLIC,
+ ADDRESS_MAP)
+
+from charmhelpers.core.host import (
+ mkdir,
+ write_file,
+)
+
+from charmhelpers.contrib.hahelpers.apache import (
+ install_ca_cert
+)
+
+
+class CertRequest(object):
+
+ """Create a request for certificates to be generated
+ """
+
+ def __init__(self, json_encode=True):
+ self.entries = []
+ self.hostname_entry = None
+ self.json_encode = json_encode
+
+ def add_entry(self, net_type, cn, addresses):
+ """Add a request to the batch
+
+ :param net_type: str netwrok space name request is for
+ :param cn: str Canonical Name for certificate
+ :param addresses: [] List of addresses to be used as SANs
+ """
+ self.entries.append({
+ 'cn': cn,
+ 'addresses': addresses})
+
+ def add_hostname_cn(self):
+ """Add a request for the hostname of the machine"""
+ ip = unit_get('private-address')
+ addresses = [ip]
+ # If a vip is being used without os-hostname config or
+ # network spaces then we need to ensure the local units
+ # cert has the approriate vip in the SAN list
+ vip = get_vip_in_network(resolve_network_cidr(ip))
+ if vip:
+ addresses.append(vip)
+ self.hostname_entry = {
+ 'cn': get_hostname(ip),
+ 'addresses': addresses}
+
+ def add_hostname_cn_ip(self, addresses):
+ """Add an address to the SAN list for the hostname request
+
+ :param addr: [] List of address to be added
+ """
+ for addr in addresses:
+ if addr not in self.hostname_entry['addresses']:
+ self.hostname_entry['addresses'].append(addr)
+
+ def get_request(self):
+ """Generate request from the batched up entries
+
+ """
+ if self.hostname_entry:
+ self.entries.append(self.hostname_entry)
+ request = {}
+ for entry in self.entries:
+ sans = sorted(list(set(entry['addresses'])))
+ request[entry['cn']] = {'sans': sans}
+ if self.json_encode:
+ return {'cert_requests': json.dumps(request, sort_keys=True)}
+ else:
+ return {'cert_requests': request}
+
+
+def get_certificate_request(json_encode=True):
+ """Generate a certificatee requests based on the network confioguration
+
+ """
+ req = CertRequest(json_encode=json_encode)
+ req.add_hostname_cn()
+ # Add os-hostname entries
+ for net_type in [INTERNAL, ADMIN, PUBLIC]:
+ net_config = config(ADDRESS_MAP[net_type]['override'])
+ try:
+ net_addr = resolve_address(endpoint_type=net_type)
+ ip = network_get_primary_address(
+ ADDRESS_MAP[net_type]['binding'])
+ addresses = [net_addr, ip]
+ vip = get_vip_in_network(resolve_network_cidr(ip))
+ if vip:
+ addresses.append(vip)
+ if net_config:
+ req.add_entry(
+ net_type,
+ net_config,
+ addresses)
+ else:
+ # There is network address with no corresponding hostname.
+ # Add the ip to the hostname cert to allow for this.
+ req.add_hostname_cn_ip(addresses)
+ except NoNetworkBinding:
+ log("Skipping request for certificate for ip in {} space, no "
+ "local address found".format(net_type), WARNING)
+ return req.get_request()
+
+
+def create_ip_cert_links(ssl_dir, custom_hostname_link=None):
+ """Create symlinks for SAN records
+
+ :param ssl_dir: str Directory to create symlinks in
+ :param custom_hostname_link: str Additional link to be created
+ """
+ hostname = get_hostname(unit_get('private-address'))
+ hostname_cert = os.path.join(
+ ssl_dir,
+ 'cert_{}'.format(hostname))
+ hostname_key = os.path.join(
+ ssl_dir,
+ 'key_{}'.format(hostname))
+ # Add links to hostname cert, used if os-hostname vars not set
+ for net_type in [INTERNAL, ADMIN, PUBLIC]:
+ try:
+ addr = resolve_address(endpoint_type=net_type)
+ cert = os.path.join(ssl_dir, 'cert_{}'.format(addr))
+ key = os.path.join(ssl_dir, 'key_{}'.format(addr))
+ if os.path.isfile(hostname_cert) and not os.path.isfile(cert):
+ os.symlink(hostname_cert, cert)
+ os.symlink(hostname_key, key)
+ except NoNetworkBinding:
+ log("Skipping creating cert symlink for ip in {} space, no "
+ "local address found".format(net_type), WARNING)
+ if custom_hostname_link:
+ custom_cert = os.path.join(
+ ssl_dir,
+ 'cert_{}'.format(custom_hostname_link))
+ custom_key = os.path.join(
+ ssl_dir,
+ 'key_{}'.format(custom_hostname_link))
+ if os.path.isfile(hostname_cert) and not os.path.isfile(custom_cert):
+ os.symlink(hostname_cert, custom_cert)
+ os.symlink(hostname_key, custom_key)
+
+
+def install_certs(ssl_dir, certs, chain=None):
+ """Install the certs passed into the ssl dir and append the chain if
+ provided.
+
+ :param ssl_dir: str Directory to create symlinks in
+ :param certs: {} {'cn': {'cert': 'CERT', 'key': 'KEY'}}
+ :param chain: str Chain to be appended to certs
+ """
+ for cn, bundle in certs.items():
+ cert_filename = 'cert_{}'.format(cn)
+ key_filename = 'key_{}'.format(cn)
+ cert_data = bundle['cert']
+ if chain:
+ # Append chain file so that clients that trust the root CA will
+ # trust certs signed by an intermediate in the chain
+ cert_data = cert_data + chain
+ write_file(
+ path=os.path.join(ssl_dir, cert_filename),
+ content=cert_data, perms=0o640)
+ write_file(
+ path=os.path.join(ssl_dir, key_filename),
+ content=bundle['key'], perms=0o640)
+
+
+def process_certificates(service_name, relation_id, unit,
+ custom_hostname_link=None):
+ """Process the certificates supplied down the relation
+
+ :param service_name: str Name of service the certifcates are for.
+ :param relation_id: str Relation id providing the certs
+ :param unit: str Unit providing the certs
+ :param custom_hostname_link: str Name of custom link to create
+ """
+ data = relation_get(rid=relation_id, unit=unit)
+ ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
+ mkdir(path=ssl_dir)
+ name = local_unit().replace('/', '_')
+ certs = data.get('{}.processed_requests'.format(name))
+ chain = data.get('chain')
+ ca = data.get('ca')
+ if certs:
+ certs = json.loads(certs)
+ install_ca_cert(ca.encode())
+ install_certs(ssl_dir, certs, chain)
+ create_ip_cert_links(
+ ssl_dir,
+ custom_hostname_link=custom_hostname_link)
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index 2d91f0a..ca91396 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -190,8 +190,8 @@ class OSContextGenerator(object):
class SharedDBContext(OSContextGenerator):
interfaces = ['shared-db']
- def __init__(self,
- database=None, user=None, relation_prefix=None, ssl_dir=None):
+ def __init__(self, database=None, user=None, relation_prefix=None,
+ ssl_dir=None, relation_id=None):
"""Allows inspecting relation for settings prefixed with
relation_prefix. This is useful for parsing access for multiple
databases returned via the shared-db interface (eg, nova_password,
@@ -202,6 +202,7 @@ class SharedDBContext(OSContextGenerator):
self.user = user
self.ssl_dir = ssl_dir
self.rel_name = self.interfaces[0]
+ self.relation_id = relation_id
def __call__(self):
self.database = self.database or config('database')
@@ -235,7 +236,12 @@ class SharedDBContext(OSContextGenerator):
if self.relation_prefix:
password_setting = self.relation_prefix + '_password'
- for rid in relation_ids(self.interfaces[0]):
+ if self.relation_id:
+ rids = [self.relation_id]
+ else:
+ rids = relation_ids(self.interfaces[0])
+
+ for rid in rids:
self.related = True
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
@@ -448,11 +454,13 @@ class IdentityCredentialsContext(IdentityServiceContext):
class AMQPContext(OSContextGenerator):
- def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None):
+ def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
+ relation_id=None):
self.ssl_dir = ssl_dir
self.rel_name = rel_name
self.relation_prefix = relation_prefix
self.interfaces = [rel_name]
+ self.relation_id = relation_id
def __call__(self):
log('Generating template context for amqp', level=DEBUG)
@@ -473,7 +481,11 @@ class AMQPContext(OSContextGenerator):
raise OSContextError
ctxt = {}
- for rid in relation_ids(self.rel_name):
+ if self.relation_id:
+ rids = [self.relation_id]
+ else:
+ rids = relation_ids(self.rel_name)
+ for rid in rids:
ha_vip_only = False
self.related = True
transport_hosts = None
@@ -789,17 +801,18 @@ class ApacheSSLContext(OSContextGenerator):
ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
mkdir(path=ssl_dir)
cert, key = get_cert(cn)
- if cn:
- cert_filename = 'cert_{}'.format(cn)
- key_filename = 'key_{}'.format(cn)
- else:
- cert_filename = 'cert'
- key_filename = 'key'
+ if cert and key:
+ if cn:
+ cert_filename = 'cert_{}'.format(cn)
+ key_filename = 'key_{}'.format(cn)
+ else:
+ cert_filename = 'cert'
+ key_filename = 'key'
- write_file(path=os.path.join(ssl_dir, cert_filename),
- content=b64decode(cert), perms=0o640)
- write_file(path=os.path.join(ssl_dir, key_filename),
- content=b64decode(key), perms=0o640)
+ write_file(path=os.path.join(ssl_dir, cert_filename),
+ content=b64decode(cert), perms=0o640)
+ write_file(path=os.path.join(ssl_dir, key_filename),
+ content=b64decode(key), perms=0o640)
def configure_ca(self):
ca_cert = get_ca_cert()
@@ -871,23 +884,31 @@ class ApacheSSLContext(OSContextGenerator):
if not self.external_ports or not https():
return {}
- self.configure_ca()
+ use_keystone_ca = True
+ for rid in relation_ids('certificates'):
+ if related_units(rid):
+ use_keystone_ca = False
+
+ if use_keystone_ca:
+ self.configure_ca()
+
self.enable_modules()
ctxt = {'namespace': self.service_namespace,
'endpoints': [],
'ext_ports': []}
- cns = self.canonical_names()
- if cns:
- for cn in cns:
- self.configure_cert(cn)
- else:
- # Expect cert/key provided in config (currently assumed that ca
- # uses ip for cn)
- for net_type in (INTERNAL, ADMIN, PUBLIC):
- cn = resolve_address(endpoint_type=net_type)
- self.configure_cert(cn)
+ if use_keystone_ca:
+ cns = self.canonical_names()
+ if cns:
+ for cn in cns:
+ self.configure_cert(cn)
+ else:
+ # Expect cert/key provided in config (currently assumed that ca
+ # uses ip for cn)
+ for net_type in (INTERNAL, ADMIN, PUBLIC):
+ cn = resolve_address(endpoint_type=net_type)
+ self.configure_cert(cn)
addresses = self.get_network_addresses()
for address, endpoint in addresses:
@@ -1368,11 +1389,12 @@ class WorkerConfigContext(OSContextGenerator):
class WSGIWorkerConfigContext(WorkerConfigContext):
def __init__(self, name=None, script=None, admin_script=None,
- public_script=None, process_weight=1.00,
+ public_script=None, user=None, group=None,
+ process_weight=1.00,
admin_process_weight=0.25, public_process_weight=0.75):
self.service_name = name
- self.user = name
- self.group = name
+ self.user = user or name
+ self.group = group or name
self.script = script
self.admin_script = admin_script
self.public_script = public_script
diff --git a/hooks/charmhelpers/contrib/openstack/ip.py b/hooks/charmhelpers/contrib/openstack/ip.py
index d1476b1..73102af 100644
--- a/hooks/charmhelpers/contrib/openstack/ip.py
+++ b/hooks/charmhelpers/contrib/openstack/ip.py
@@ -184,3 +184,13 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
"clustered=%s)" % (net_type, clustered))
return resolved_address
+
+
+def get_vip_in_network(network):
+ matching_vip = None
+ vips = config('vip')
+ if vips:
+ for vip in vips.split():
+ if is_address_in_network(network, vip):
+ matching_vip = vip
+ return matching_vip
diff --git a/hooks/charmhelpers/contrib/openstack/ssh_migrations.py b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
new file mode 100644
index 0000000..96b9f71
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/ssh_migrations.py
@@ -0,0 +1,412 @@
+# Copyright 2018 Canonical Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import subprocess
+
+from charmhelpers.core.hookenv import (
+ ERROR,
+ log,
+ relation_get,
+)
+from charmhelpers.contrib.network.ip import (
+ is_ipv6,
+ ns_query,
+)
+from charmhelpers.contrib.openstack.utils import (
+ get_hostname,
+ get_host_ip,
+ is_ip,
+)
+
+NOVA_SSH_DIR = '/etc/nova/compute_ssh/'
+
+
+def ssh_directory_for_unit(application_name, user=None):
+ """Return the directory used to store ssh assets for the application.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Fully qualified directory path.
+ :rtype: str
+ """
+ if user:
+ application_name = "{}_{}".format(application_name, user)
+ _dir = os.path.join(NOVA_SSH_DIR, application_name)
+ for d in [NOVA_SSH_DIR, _dir]:
+ if not os.path.isdir(d):
+ os.mkdir(d)
+ for f in ['authorized_keys', 'known_hosts']:
+ f = os.path.join(_dir, f)
+ if not os.path.isfile(f):
+ open(f, 'w').close()
+ return _dir
+
+
+def known_hosts(application_name, user=None):
+ """Return the known hosts file for the application.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Fully qualified path to file.
+ :rtype: str
+ """
+ return os.path.join(
+ ssh_directory_for_unit(application_name, user),
+ 'known_hosts')
+
+
+def authorized_keys(application_name, user=None):
+ """Return the authorized keys file for the application.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Fully qualified path to file.
+ :rtype: str
+ """
+ return os.path.join(
+ ssh_directory_for_unit(application_name, user),
+ 'authorized_keys')
+
+
+def ssh_known_host_key(host, application_name, user=None):
+ """Return the first entry in known_hosts for host.
+
+ :param host: hostname to lookup in file.
+ :type host: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Host key
+ :rtype: str or None
+ """
+ cmd = [
+ 'ssh-keygen',
+ '-f', known_hosts(application_name, user),
+ '-H',
+ '-F',
+ host]
+ try:
+ # The first line of output is like '# Host xx found: line 1 type RSA',
+ # which should be excluded.
+ output = subprocess.check_output(cmd)
+ except subprocess.CalledProcessError as e:
+ # RC of 1 seems to be legitimate for most ssh-keygen -F calls.
+ if e.returncode == 1:
+ output = e.output
+ else:
+ raise
+ output = output.strip()
+
+ if output:
+ # Bug #1500589 cmd has 0 rc on precise if entry not present
+ lines = output.split('\n')
+ if len(lines) >= 1:
+ return lines[0]
+
+ return None
+
+
+def remove_known_host(host, application_name, user=None):
+ """Remove the entry in known_hosts for host.
+
+ :param host: hostname to lookup in file.
+ :type host: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ log('Removing SSH known host entry for compute host at %s' % host)
+ cmd = ['ssh-keygen', '-f', known_hosts(application_name, user), '-R', host]
+ subprocess.check_call(cmd)
+
+
+def is_same_key(key_1, key_2):
+ """Extract the key from two host entries and compare them.
+
+ :param key_1: Host key
+ :type key_1: str
+ :param key_2: Host key
+ :type key_2: str
+ """
+ # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp'
+ # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare
+ # the part start with 'ssh-rsa' followed with '= ', because the hash
+ # value in the beginning will change each time.
+ k_1 = key_1.split('= ')[1]
+ k_2 = key_2.split('= ')[1]
+ return k_1 == k_2
+
+
+def add_known_host(host, application_name, user=None):
+ """Add the given host key to the known hosts file.
+
+ :param host: host name
+ :type host: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host]
+ try:
+ remote_key = subprocess.check_output(cmd).strip()
+ except Exception as e:
+ log('Could not obtain SSH host key from %s' % host, level=ERROR)
+ raise e
+
+ current_key = ssh_known_host_key(host, application_name, user)
+ if current_key and remote_key:
+ if is_same_key(remote_key, current_key):
+ log('Known host key for compute host %s up to date.' % host)
+ return
+ else:
+ remove_known_host(host, application_name, user)
+
+ log('Adding SSH host key to known hosts for compute node at %s.' % host)
+ with open(known_hosts(application_name, user), 'a') as out:
+ out.write("{}\n".format(remote_key))
+
+
+def ssh_authorized_key_exists(public_key, application_name, user=None):
+ """Check if given key is in the authorized_key file.
+
+ :param public_key: Public key.
+ :type public_key: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Whether given key is in the authorized_key file.
+ :rtype: boolean
+ """
+ with open(authorized_keys(application_name, user)) as keys:
+ return ('%s' % public_key) in keys.read()
+
+
+def add_authorized_key(public_key, application_name, user=None):
+ """Add given key to the authorized_key file.
+
+ :param public_key: Public key.
+ :type public_key: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ with open(authorized_keys(application_name, user), 'a') as keys:
+ keys.write("{}\n".format(public_key))
+
+
+def ssh_compute_add_host_and_key(public_key, hostname, private_address,
+ application_name, user=None):
+ """Add a compute nodes ssh details to local cache.
+
+ Collect various hostname variations and add the corresponding host keys to
+ the local known hosts file. Finally, add the supplied public key to the
+ authorized_key file.
+
+ :param public_key: Public key.
+ :type public_key: str
+ :param hostname: Hostname to collect host keys from.
+ :type hostname: str
+ :param private_address:aCorresponding private address for hostname
+ :type private_address: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ # If remote compute node hands us a hostname, ensure we have a
+ # known hosts entry for its IP, hostname and FQDN.
+ hosts = [private_address]
+
+ if not is_ipv6(private_address):
+ if hostname:
+ hosts.append(hostname)
+
+ if is_ip(private_address):
+ hn = get_hostname(private_address)
+ if hn:
+ hosts.append(hn)
+ short = hn.split('.')[0]
+ if ns_query(short):
+ hosts.append(short)
+ else:
+ hosts.append(get_host_ip(private_address))
+ short = private_address.split('.')[0]
+ if ns_query(short):
+ hosts.append(short)
+
+ for host in list(set(hosts)):
+ add_known_host(host, application_name, user)
+
+ if not ssh_authorized_key_exists(public_key, application_name, user):
+ log('Saving SSH authorized key for compute host at %s.' %
+ private_address)
+ add_authorized_key(public_key, application_name, user)
+
+
+def ssh_compute_add(public_key, application_name, rid=None, unit=None,
+ user=None):
+ """Add a compute nodes ssh details to local cache.
+
+ Collect various hostname variations and add the corresponding host keys to
+ the local known hosts file. Finally, add the supplied public key to the
+ authorized_key file.
+
+ :param public_key: Public key.
+ :type public_key: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param rid: Relation id of the relation between this charm and the app. If
+ none is supplied it is assumed its the relation relating to
+ the current hook context.
+ :type rid: str
+ :param unit: Unit to add ssh asserts for if none is supplied it is assumed
+ its the unit relating to the current hook context.
+ :type unit: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ relation_data = relation_get(rid=rid, unit=unit)
+ ssh_compute_add_host_and_key(
+ public_key,
+ relation_data.get('hostname'),
+ relation_data.get('private-address'),
+ application_name,
+ user=user)
+
+
+def ssh_known_hosts_lines(application_name, user=None):
+ """Return contents of known_hosts file for given application.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ known_hosts_list = []
+ with open(known_hosts(application_name, user)) as hosts:
+ for hosts_line in hosts:
+ if hosts_line.rstrip():
+ known_hosts_list.append(hosts_line.rstrip())
+ return(known_hosts_list)
+
+
+def ssh_authorized_keys_lines(application_name, user=None):
+ """Return contents of authorized_keys file for given application.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ authorized_keys_list = []
+
+ with open(authorized_keys(application_name, user)) as keys:
+ for authkey_line in keys:
+ if authkey_line.rstrip():
+ authorized_keys_list.append(authkey_line.rstrip())
+ return(authorized_keys_list)
+
+
+def ssh_compute_remove(public_key, application_name, user=None):
+ """Remove given public key from authorized_keys file.
+
+ :param public_key: Public key.
+ :type public_key: str
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ """
+ if not (os.path.isfile(authorized_keys(application_name, user)) or
+ os.path.isfile(known_hosts(application_name, user))):
+ return
+
+ keys = ssh_authorized_keys_lines(application_name, user=None)
+ keys = [k.strip() for k in keys]
+
+ if public_key not in keys:
+ return
+
+ [keys.remove(key) for key in keys if key == public_key]
+
+ with open(authorized_keys(application_name, user), 'w') as _keys:
+ keys = '\n'.join(keys)
+ if not keys.endswith('\n'):
+ keys += '\n'
+ _keys.write(keys)
+
+
+def get_ssh_settings(application_name, user=None):
+ """Retrieve the known host entries and public keys for application
+
+ Retrieve the known host entries and public keys for application for all
+ units of the given application related to this application for the
+ app + user combination.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :param user: The user that the ssh asserts are for.
+ :type user: str
+ :returns: Public keys + host keys for all units for app + user combination.
+ :rtype: dict
+ """
+ settings = {}
+ keys = {}
+ prefix = ''
+ if user:
+ prefix = '{}_'.format(user)
+
+ for i, line in enumerate(ssh_known_hosts_lines(
+ application_name=application_name, user=user)):
+ settings['{}known_hosts_{}'.format(prefix, i)] = line
+ if settings:
+ settings['{}known_hosts_max_index'.format(prefix)] = len(
+ settings.keys())
+
+ for i, line in enumerate(ssh_authorized_keys_lines(
+ application_name=application_name, user=user)):
+ keys['{}authorized_keys_{}'.format(prefix, i)] = line
+ if keys:
+ keys['{}authorized_keys_max_index'.format(prefix)] = len(keys.keys())
+ settings.update(keys)
+ return settings
+
+
+def get_all_user_ssh_settings(application_name):
+ """Retrieve the known host entries and public keys for application
+
+ Retrieve the known host entries and public keys for application for all
+ units of the given application related to this application for root user
+ and nova user.
+
+ :param application_name: Name of application eg nova-compute-something
+ :type application_name: str
+ :returns: Public keys + host keys for all units for app + user combination.
+ :rtype: dict
+ """
+ settings = get_ssh_settings(application_name)
+ settings.update(get_ssh_settings(application_name, user='nova'))
+ return settings
diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
index e2e73b2..23b62a3 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
+++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-api.conf
@@ -14,7 +14,7 @@ Listen {{ public_port }}
{% if port -%}
<VirtualHost *:{{ port }}>
- WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+ WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}
WSGIScriptAlias / {{ script }}
@@ -40,7 +40,7 @@ Listen {{ public_port }}
{% if admin_port -%}
<VirtualHost *:{{ admin_port }}>
- WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+ WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-admin
WSGIScriptAlias / {{ admin_script }}
@@ -66,7 +66,7 @@ Listen {{ public_port }}
{% if public_port -%}
<VirtualHost *:{{ public_port }}>
- WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
+ WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
display-name=%{GROUP}
WSGIProcessGroup {{ service_name }}-public
WSGIScriptAlias / {{ public_script }}
diff --git a/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf
new file mode 100644
index 0000000..23b62a3
--- /dev/null
+++ b/hooks/charmhelpers/contrib/openstack/templates/wsgi-openstack-metadata.conf
@@ -0,0 +1,91 @@
+# Configuration file maintained by Juju. Local changes may be overwritten.
+
+{% if port -%}
+Listen {{ port }}
+{% endif -%}
+
+{% if admin_port -%}
+Listen {{ admin_port }}
+{% endif -%}
+
+{% if public_port -%}
+Listen {{ public_port }}
+{% endif -%}
+
+{% if port -%}
+<VirtualHost *:{{ port }}>
+ WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ user }} group={{ group }} \
+ display-name=%{GROUP}
+ WSGIProcessGroup {{ service_name }}
+ WSGIScriptAlias / {{ script }}
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/apache2/{{ service_name }}_error.log
+ CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+{% endif -%}
+
+{% if admin_port -%}
+<VirtualHost *:{{ admin_port }}>
+ WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
+ display-name=%{GROUP}
+ WSGIProcessGroup {{ service_name }}-admin
+ WSGIScriptAlias / {{ admin_script }}
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/apache2/{{ service_name }}_error.log
+ CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+{% endif -%}
+
+{% if public_port -%}
+<VirtualHost *:{{ public_port }}>
+ WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ user }} group={{ group }} \
+ display-name=%{GROUP}
+ WSGIProcessGroup {{ service_name }}-public
+ WSGIScriptAlias / {{ public_script }}
+ WSGIApplicationGroup %{GLOBAL}
+ WSGIPassAuthorization On
+ <IfVersion >= 2.4>
+ ErrorLogFormat "%{cu}t %M"
+ </IfVersion>
+ ErrorLog /var/log/apache2/{{ service_name }}_error.log
+ CustomLog /var/log/apache2/{{ service_name }}_access.log combined
+
+ <Directory /usr/bin>
+ <IfVersion >= 2.4>
+ Require all granted
+ </IfVersion>
+ <IfVersion < 2.4>
+ Order allow,deny
+ Allow from all
+ </IfVersion>
+ </Directory>
+</VirtualHost>
+{% endif -%}
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 6184abd..24f5b80 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -133,6 +133,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
+ ('cosmic', 'rocky'),
])
@@ -151,6 +152,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2017.1', 'ocata'),
('2017.2', 'pike'),
('2018.1', 'queens'),
+ ('2018.2', 'rocky'),
])
# The ugly duckling - must list releases oldest to newest
@@ -183,6 +185,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.13.0', '2.15.0']),
('queens',
['2.16.0', '2.17.0']),
+ ('rocky',
+ ['2.18.0']),
])
# >= Liberty version->codename mapping
@@ -827,12 +831,25 @@ def _ows_check_if_paused(services=None, ports=None):
"""Check if the unit is supposed to be paused, and if so check that the
services/ports (if passed) are actually stopped/not being listened to.
- if the unit isn't supposed to be paused, just return None, None
+ If the unit isn't supposed to be paused, just return None, None
+
+ If the unit is performing a series upgrade, return a message indicating
+ this.
@param services: OPTIONAL services spec or list of service names.
@param ports: OPTIONAL list of port numbers.
@returns state, message or None, None
"""
+ if is_unit_upgrading_set():
+ state, message = check_actually_paused(services=services,
+ ports=ports)
+ if state is None:
+ # we're paused okay, so set maintenance and return
+ state = "blocked"
+ message = ("Ready for do-release-upgrade and reboot. "
+ "Set complete when finished.")
+ return state, message
+
if is_unit_paused_set():
state, message = check_actually_paused(services=services,
ports=ports)
@@ -1335,7 +1352,7 @@ def pause_unit(assess_status_func, services=None, ports=None,
message = assess_status_func()
if message:
messages.append(message)
- if messages:
+ if messages and not is_unit_upgrading_set():
raise Exception("Couldn't pause: {}".format("; ".join(messages)))
@@ -1685,3 +1702,34 @@ def install_os_snaps(snaps, refresh=False):
snap_install(snap,
_ensure_flag(snaps[snap]['channel']),
_ensure_flag(snaps[snap]['mode']))
+
+
+def set_unit_upgrading():
+ """Set the unit to a upgrading state in the local kv() store.
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-upgrading', True)
+
+
+def clear_unit_upgrading():
+ """Clear the unit from a upgrading state in the local kv() store
+ """
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ kv.set('unit-upgrading', False)
+
+
+def is_unit_upgrading_set():
+ """Return the state of the kv().get('unit-upgrading').
+
+ To help with units that don't have HookData() (testing)
+ if it excepts, return False
+ """
+ try:
+ with unitdata.HookData()() as t:
+ kv = t[0]
+ # transform something truth-y into a Boolean.
+ return not(not(kv.get('unit-upgrading')))
+ except Exception:
+ return False
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 627d8f7..6880007 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -201,11 +201,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
-def service_name():
- """The name service group this unit belongs to"""
+def application_name():
+ """
+ The name of the deployed application this unit belongs to.
+ """
return local_unit().split('/')[0]
+def service_name():
+ """
+ .. deprecated:: 0.19.1
+ Alias for :func:`application_name`.
+ """
+ return application_name()
+
+
+def model_name():
+ """
+ Name of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_NAME']
+
+
+def model_uuid():
+ """
+ UUID of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_UUID']
+
+
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@@ -973,6 +997,13 @@ def application_version_set(version):
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def goal_state():
+ """Juju goal state values"""
+ cmd = ['goal-state', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
@@ -1290,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
+
+
+def unit_doomed(unit=None):
+ """Determines if the unit is being removed from the model
+
+ Requires Juju 2.4.1.
+
+ :param unit: string unit name, defaults to local_unit
+ :side effect: calls goal_state
+ :side effect: calls local_unit
+ :side effect: calls has_juju_version
+ :return: True if the unit is being removed, already gone, or never existed
+ """
+ if not has_juju_version("2.4.1"):
+ # We cannot risk blindly returning False for 'we don't know',
+ # because that could cause data loss; if call sites don't
+ # need an accurate answer, they likely don't need this helper
+ # at all.
+ # goal-state existed in 2.4.0, but did not handle removals
+ # correctly until 2.4.1.
+ raise NotImplementedError("is_doomed")
+ if unit is None:
+ unit = local_unit()
+ gs = goal_state()
+ units = gs.get('units', {})
+ if unit not in units:
+ return True
+ # I don't think 'dead' units ever show up in the goal-state, but
+ # check anyway in addition to 'dying'.
+ return units[unit]['status'] in ('dying', 'dead')
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 322ab2a..e9fd38a 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -972,6 +972,20 @@ def is_container():
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+ """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
+
+ This method has no effect if the path specified by updatedb_path does not
+ exist or is not a file.
+
+ @param path: string the path to add to the updatedb.conf PRUNEPATHS value
+ @param updatedb_path: the path the updatedb.conf file
+ """
+ if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
+ # If the updatedb.conf file doesn't exist then don't attempt to update
+ # the file as the package providing mlocate may not be installed on
+ # the local system
+ return
+
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
diff --git a/hooks/charmhelpers/fetch/ubuntu.py b/hooks/charmhelpers/fetch/ubuntu.py
index 653d58f..19aa6ba 100644
--- a/hooks/charmhelpers/fetch/ubuntu.py
+++ b/hooks/charmhelpers/fetch/ubuntu.py
@@ -158,6 +158,14 @@ CLOUD_ARCHIVE_POCKETS = {
'queens/proposed': 'xenial-proposed/queens',
'xenial-queens/proposed': 'xenial-proposed/queens',
'xenial-proposed/queens': 'xenial-proposed/queens',
+ # Rocky
+ 'rocky': 'bionic-updates/rocky',
+ 'bionic-rocky': 'bionic-updates/rocky',
+ 'bionic-rocky/updates': 'bionic-updates/rocky',
+ 'bionic-updates/rocky': 'bionic-updates/rocky',
+ 'rocky/proposed': 'bionic-proposed/rocky',
+ 'bionic-rocky/proposed': 'bionic-proposed/rocky',
+ 'bionic-proposed/rocky': 'bionic-proposed/rocky',
}
@@ -307,7 +315,7 @@ def import_key(key):
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
- subprocess.check_call(cmd)
+ _run_with_retries(cmd)
except subprocess.CalledProcessError:
error = "Error importing PGP key '{}'".format(key)
log(error)
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 33d5c3c..eebb359 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -20,27 +20,20 @@ import time
import keystoneclient
from keystoneclient.v3 import client as keystone_client_v3
-import glanceclient
from novaclient import client as nova_client
from charmhelpers.contrib.openstack.amulet.deployment import (
OpenStackAmuletDeployment
)
-# NOTE(beisner):
-# LXDAmuletUtils inherits and extends OpenStackAmuletUtils, with
-# the intention of ultimately moving the relevant helpers into
-# OpenStackAmuletUtils.
-#
-# from charmhelpers.contrib.openstack.amulet.utils import (
-# OpenStackAmuletUtils,
-from lxd_amulet_utils import (
- LXDAmuletUtils,
+from charmhelpers.contrib.openstack.amulet.utils import (
+ OpenStackAmuletUtils,
DEBUG,
)
+# Use DEBUG to turn on debug logging
+u = OpenStackAmuletUtils(DEBUG)
-u = LXDAmuletUtils(DEBUG)
LXD_IMAGE_URL = 'http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-root.tar.xz' # noqa
LXD_IMAGE_NAME = 'trusty-server-cloudimg-amd64-root.tar.xz'
@@ -206,7 +199,7 @@ class LXDBasicDeployment(OpenStackAmuletDeployment):
)
# Authenticate admin with glance endpoint
- self.glance = glanceclient.Client('1', session=self.keystone_session)
+ self.glance = u.authenticate_glance_admin(self.keystone)
self.nova_admin = nova_client.Client(2, session=self.keystone_session)
diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py
index 9c65518..d21d01d 100644
--- a/tests/charmhelpers/contrib/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/amulet/deployment.py
@@ -50,7 +50,8 @@ class AmuletDeployment(object):
this_service['units'] = 1
self.d.add(this_service['name'], units=this_service['units'],
- constraints=this_service.get('constraints'))
+ constraints=this_service.get('constraints'),
+ storage=this_service.get('storage'))
for svc in other_services:
if 'location' in svc:
@@ -64,7 +65,8 @@ class AmuletDeployment(object):
svc['units'] = 1
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
- constraints=svc.get('constraints'))
+ constraints=svc.get('constraints'),
+ storage=svc.get('storage'))
def _add_relations(self, relations):
"""Add all of the relations for the services."""
diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
index 66beeda..1c96752 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -291,6 +291,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('zesty', None): self.zesty_ocata,
('artful', None): self.artful_pike,
('bionic', None): self.bionic_queens,
+ ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
+ ('cosmic', None): self.cosmic_rocky,
}
return releases[(self.series, self.openstack)]
@@ -306,6 +308,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('zesty', 'ocata'),
('artful', 'pike'),
('bionic', 'queens'),
+ ('cosmic', 'rocky'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py
index 84e87f5..10dbe59 100644
--- a/tests/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py
@@ -24,7 +24,8 @@ import urlparse
import cinderclient.v1.client as cinder_client
import cinderclient.v2.client as cinder_clientv2
-import glanceclient.v1.client as glance_client
+import glanceclient.v1 as glance_client
+import glanceclient.v2 as glance_clientv2
import heatclient.v1.client as heat_client
from keystoneclient.v2_0 import client as keystone_client
from keystoneauth1.identity import (
@@ -40,6 +41,7 @@ import novaclient
import pika
import swiftclient
+from charmhelpers.core.decorators import retry_on_exception
from charmhelpers.contrib.amulet.utils import (
AmuletUtils
)
@@ -55,7 +57,7 @@ OPENSTACK_RELEASES_PAIRS = [
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
'xenial_pike', 'artful_pike', 'xenial_queens',
- 'bionic_queens']
+ 'bionic_queens', 'bionic_rocky', 'cosmic_rocky']
class OpenStackAmuletUtils(AmuletUtils):
@@ -423,6 +425,7 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
return tenant in [t.name for t in keystone.tenants.list()]
+ @retry_on_exception(num_retries=5, base_delay=1)
def keystone_wait_for_propagation(self, sentry_relation_pairs,
api_version):
"""Iterate over list of sentry and relation tuples and verify that
@@ -542,7 +545,7 @@ class OpenStackAmuletUtils(AmuletUtils):
return ep
def get_default_keystone_session(self, keystone_sentry,
- openstack_release=None):
+ openstack_release=None, api_version=2):
"""Return a keystone session object and client object assuming standard
default settings
@@ -557,12 +560,12 @@ class OpenStackAmuletUtils(AmuletUtils):
eyc
"""
self.log.debug('Authenticating keystone admin...')
- api_version = 2
- client_class = keystone_client.Client
# 11 => xenial_queens
- if openstack_release and openstack_release >= 11:
- api_version = 3
+ if api_version == 3 or (openstack_release and openstack_release >= 11):
client_class = keystone_client_v3.Client
+ api_version = 3
+ else:
+ client_class = keystone_client.Client
keystone_ip = keystone_sentry.info['public-address']
session, auth = self.get_keystone_session(
keystone_ip,
@@ -621,7 +624,7 @@ class OpenStackAmuletUtils(AmuletUtils):
ep = keystone.service_catalog.url_for(service_type='image',
interface='adminURL')
if keystone.session:
- return glance_client.Client(ep, session=keystone.session)
+ return glance_clientv2.Client("2", session=keystone.session)
else:
return glance_client.Client(ep, token=keystone.auth_token)
@@ -677,18 +680,30 @@ class OpenStackAmuletUtils(AmuletUtils):
nova.flavors.create(name, ram, vcpus, disk, flavorid,
ephemeral, swap, rxtx_factor, is_public)
- def create_cirros_image(self, glance, image_name):
- """Download the latest cirros image and upload it to glance,
- validate and return a resource pointer.
-
- :param glance: pointer to authenticated glance connection
+ def glance_create_image(self, glance, image_name, image_url,
+ download_dir='tests',
+ hypervisor_type='qemu',
+ disk_format='qcow2',
+ architecture='x86_64',
+ container_format='bare'):
+ """Download an image and upload it to glance, validate its status
+ and return an image object pointer. KVM defaults, can override for
+ LXD.
+
+ :param glance: pointer to authenticated glance api connection
:param image_name: display name for new image
+ :param image_url: url to retrieve
+ :param download_dir: directory to store downloaded image file
+ :param hypervisor_type: glance image hypervisor property
+ :param disk_format: glance image disk format
+ :param architecture: glance image architecture property
+ :param container_format: glance image container format
:returns: glance image pointer
"""
- self.log.debug('Creating glance cirros image '
- '({})...'.format(image_name))
+ self.log.debug('Creating glance image ({}) from '
+ '{}...'.format(image_name, image_url))
- # Download cirros image
+ # Download image
http_proxy = os.getenv('AMULET_HTTP_PROXY')
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
if http_proxy:
@@ -697,22 +712,33 @@ class OpenStackAmuletUtils(AmuletUtils):
else:
opener = urllib.FancyURLopener()
- f = opener.open('http://download.cirros-cloud.net/version/released')
- version = f.read().strip()
- cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
- local_path = os.path.join('tests', cirros_img)
-
- if not os.path.exists(local_path):
- cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
- version, cirros_img)
- opener.retrieve(cirros_url, local_path)
- f.close()
+ abs_file_name = os.path.join(download_dir, image_name)
+ if not os.path.exists(abs_file_name):
+ opener.retrieve(image_url, abs_file_name)
# Create glance image
- with open(local_path) as f:
- image = glance.images.create(name=image_name, is_public=True,
- disk_format='qcow2',
- container_format='bare', data=f)
+ glance_properties = {
+ 'architecture': architecture,
+ 'hypervisor_type': hypervisor_type
+ }
+ # Create glance image
+ if float(glance.version) < 2.0:
+ with open(abs_file_name) as f:
+ image = glance.images.create(
+ name=image_name,
+ is_public=True,
+ disk_format=disk_format,
+ container_format=container_format,
+ properties=glance_properties,
+ data=f)
+ else:
+ image = glance.images.create(
+ name=image_name,
+ visibility="public",
+ disk_format=disk_format,
+ container_format=container_format)
+ glance.images.upload(image.id, open(abs_file_name, 'rb'))
+ glance.images.update(image.id, **glance_properties)
# Wait for image to reach active status
img_id = image.id
@@ -727,24 +753,63 @@ class OpenStackAmuletUtils(AmuletUtils):
self.log.debug('Validating image attributes...')
val_img_name = glance.images.get(img_id).name
val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
val_img_cfmt = glance.images.get(img_id).container_format
val_img_dfmt = glance.images.get(img_id).disk_format
+
+ if float(glance.version) < 2.0:
+ val_img_pub = glance.images.get(img_id).is_public
+ else:
+ val_img_pub = glance.images.get(img_id).visibility == "public"
+
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
'container fmt:{} disk fmt:{}'.format(
val_img_name, val_img_pub, img_id,
val_img_stat, val_img_cfmt, val_img_dfmt))
if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == 'bare' \
- and val_img_dfmt == 'qcow2':
+ and val_img_pub is True and val_img_cfmt == container_format \
+ and val_img_dfmt == disk_format:
self.log.debug(msg_attr)
else:
- msg = ('Volume validation failed, {}'.format(msg_attr))
+ msg = ('Image validation failed, {}'.format(msg_attr))
amulet.raise_status(amulet.FAIL, msg=msg)
return image
+ def create_cirros_image(self, glance, image_name):
+ """Download the latest cirros image and upload it to glance,
+ validate and return a resource pointer.
+
+ :param glance: pointer to authenticated glance connection
+ :param image_name: display name for new image
+ :returns: glance image pointer
+ """
+ # /!\ DEPRECATION WARNING
+ self.log.warn('/!\\ DEPRECATION WARNING: use '
+ 'glance_create_image instead of '
+ 'create_cirros_image.')
+
+ self.log.debug('Creating glance cirros image '
+ '({})...'.format(image_name))
+
+ # Get cirros image URL
+ http_proxy = os.getenv('AMULET_HTTP_PROXY')
+ self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
+ if http_proxy:
+ proxies = {'http': http_proxy}
+ opener = urllib.FancyURLopener(proxies)
+ else:
+ opener = urllib.FancyURLopener()
+
+ f = opener.open('http://download.cirros-cloud.net/version/released')
+ version = f.read().strip()
+ cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
+ cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
+ version, cirros_img)
+ f.close()
+
+ return self.glance_create_image(glance, image_name, cirros_url)
+
def delete_image(self, glance, image):
"""Delete the specified image."""
@@ -996,6 +1061,9 @@ class OpenStackAmuletUtils(AmuletUtils):
cmd, code, output))
amulet.raise_status(amulet.FAIL, msg=msg)
+ # For mimic ceph osd lspools output
+ output = output.replace("\n", ",")
+
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')
diff --git a/tests/charmhelpers/core/hookenv.py b/tests/charmhelpers/core/hookenv.py
index 627d8f7..6880007 100644
--- a/tests/charmhelpers/core/hookenv.py
+++ b/tests/charmhelpers/core/hookenv.py
@@ -201,11 +201,35 @@ def remote_unit():
return os.environ.get('JUJU_REMOTE_UNIT', None)
-def service_name():
- """The name service group this unit belongs to"""
+def application_name():
+ """
+ The name of the deployed application this unit belongs to.
+ """
return local_unit().split('/')[0]
+def service_name():
+ """
+ .. deprecated:: 0.19.1
+ Alias for :func:`application_name`.
+ """
+ return application_name()
+
+
+def model_name():
+ """
+ Name of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_NAME']
+
+
+def model_uuid():
+ """
+ UUID of the model that this unit is deployed in.
+ """
+ return os.environ['JUJU_MODEL_UUID']
+
+
def principal_unit():
"""Returns the principal unit of this unit, otherwise None"""
# Juju 2.2 and above provides JUJU_PRINCIPAL_UNIT
@@ -973,6 +997,13 @@ def application_version_set(version):
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
+def goal_state():
+ """Juju goal state values"""
+ cmd = ['goal-state', '--format=json']
+ return json.loads(subprocess.check_output(cmd).decode('UTF-8'))
+
+
+@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
@@ -1290,3 +1321,33 @@ def egress_subnets(rid=None, unit=None):
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen
+
+
+def unit_doomed(unit=None):
+ """Determines if the unit is being removed from the model
+
+ Requires Juju 2.4.1.
+
+ :param unit: string unit name, defaults to local_unit
+ :side effect: calls goal_state
+ :side effect: calls local_unit
+ :side effect: calls has_juju_version
+ :return: True if the unit is being removed, already gone, or never existed
+ """
+ if not has_juju_version("2.4.1"):
+ # We cannot risk blindly returning False for 'we don't know',
+ # because that could cause data loss; if call sites don't
+ # need an accurate answer, they likely don't need this helper
+ # at all.
+ # goal-state existed in 2.4.0, but did not handle removals
+ # correctly until 2.4.1.
+ raise NotImplementedError("is_doomed")
+ if unit is None:
+ unit = local_unit()
+ gs = goal_state()
+ units = gs.get('units', {})
+ if unit not in units:
+ return True
+ # I don't think 'dead' units ever show up in the goal-state, but
+ # check anyway in addition to 'dying'.
+ return units[unit]['status'] in ('dying', 'dead')
diff --git a/tests/charmhelpers/core/host.py b/tests/charmhelpers/core/host.py
index 322ab2a..e9fd38a 100644
--- a/tests/charmhelpers/core/host.py
+++ b/tests/charmhelpers/core/host.py
@@ -972,6 +972,20 @@ def is_container():
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
+ """Adds the specified path to the mlocate's udpatedb.conf PRUNEPATH list.
+
+ This method has no effect if the path specified by updatedb_path does not
+ exist or is not a file.
+
+ @param path: string the path to add to the updatedb.conf PRUNEPATHS value
+ @param updatedb_path: the path the updatedb.conf file
+ """
+ if not os.path.exists(updatedb_path) or os.path.isdir(updatedb_path):
+ # If the updatedb.conf file doesn't exist then don't attempt to update
+ # the file as the package providing mlocate may not be installed on
+ # the local system
+ return
+
with open(updatedb_path, 'r+') as f_id:
updatedb_text = f_id.read()
output = updatedb(updatedb_text, path)
diff --git a/tests/dev-basic-bionic-rocky b/tests/gate-basic-bionic-rocky
index e7079f3..e7079f3 100755
--- a/tests/dev-basic-bionic-rocky
+++ b/tests/gate-basic-bionic-rocky
diff --git a/tests/lxd_amulet_utils.py b/tests/lxd_amulet_utils.py
deleted file mode 100644
index fe88b62..0000000
--- a/tests/lxd_amulet_utils.py
+++ /dev/null
@@ -1,133 +0,0 @@
-# Copyright 2016 Canonical Ltd
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Temporary Local Helpers - Extends OpenStackAmuletUtils
-# ============================================================================
-# NOTE:
-# Move to charmhelpers/contrib/openstack/amulet/utils.py once
-# validated and settled.
-#
-# These helpers are and should be written in a way that they
-# are not LXD-specific. They should default to KVM/x86_64
-# with enough parameters plumbed to allow LXD.
-#
-
-import amulet
-import logging
-import os
-import urllib
-
-from charmhelpers.contrib.openstack.amulet.utils import (
- OpenStackAmuletUtils
-)
-
-DEBUG = logging.DEBUG
-ERROR = logging.ERROR
-
-# LXD_IMAGE_URL = 'http://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-root.tar.xz' # noqa
-
-
-class LXDAmuletUtils(OpenStackAmuletUtils):
- """LXD amulet utilities.
-
- This class inherits from AmuletUtils and has additional support
- that is specifically for use by OpenStack charm tests.
- """
-
- def __init__(self, log_level=ERROR):
- """Initialize the deployment environment."""
- super(LXDAmuletUtils, self).__init__(log_level)
-
- # NOTE(beisner): to eventually replace the existing amulet openstack
- # glance image creation helper method. Plopped here to fine-tune and
- # make more flexible.
- def glance_create_image(self, glance, image_name, image_url,
- download_dir='tests',
- hypervisor_type='qemu',
- disk_format='qcow2',
- architecture='x86_64',
- container_format='bare'):
- """Download an image and upload it to glance, validate its status
- and return an image object pointer. KVM defaults, can override for
- LXD.
-
- :param glance: pointer to authenticated glance api connection
- :param image_name: display name for new image
- :param image_url: url to retrieve
- :param download_dir: directory to store downloaded image file
- :param hypervisor_type: glance image hypervisor property
- :param disk_format: glance image disk format
- :param architecture: glance image architecture property
- :param container_format: glance image container format
- :returns: glance image pointer
- """
- self.log.debug('Creating glance image ({}) from '
- '{}...'.format(image_name, image_url))
-
- # Download image
- http_proxy = os.getenv('AMULET_HTTP_PROXY')
- self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
- if http_proxy:
- proxies = {'http': http_proxy}
- opener = urllib.FancyURLopener(proxies)
- else:
- opener = urllib.FancyURLopener()
-
- abs_file_name = os.path.join(download_dir, image_name)
- if not os.path.exists(abs_file_name):
- opener.retrieve(image_url, abs_file_name)
-
- # Create glance image
- glance_properties = {
- 'architecture': architecture,
- 'hypervisor_type': hypervisor_type
- }
- with open(abs_file_name) as f:
- image = glance.images.create(name=image_name,
- is_public=True,
- disk_format=disk_format,
- container_format=container_format,
- properties=glance_properties,
- data=f)
-
- # Wait for image to reach active status
- img_id = image.id
- ret = self.resource_reaches_status(glance.images, img_id,
- expected_stat='active',
- msg='Image status wait')
- if not ret:
- msg = 'Glance image failed to reach expected state.'
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- # Re-validate new image
- self.log.debug('Validating image attributes...')
- val_img_name = glance.images.get(img_id).name
- val_img_stat = glance.images.get(img_id).status
- val_img_pub = glance.images.get(img_id).is_public
- val_img_cfmt = glance.images.get(img_id).container_format
- val_img_dfmt = glance.images.get(img_id).disk_format
- msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
- 'container fmt:{} disk fmt:{}'.format(
- val_img_name, val_img_pub, img_id,
- val_img_stat, val_img_cfmt, val_img_dfmt))
-
- if val_img_name == image_name and val_img_stat == 'active' \
- and val_img_pub is True and val_img_cfmt == container_format \
- and val_img_dfmt == disk_format:
- self.log.debug(msg_attr)
- else:
- msg = ('Image validation failed, {}'.format(msg_attr))
- amulet.raise_status(amulet.FAIL, msg=msg)
-
- return image

This mirror site include all the OpenStack related repositories under: openstack, openstack-dev and openstack-infra.

NOTE: All repositories are updated every one hour.

Usage

For Git Clone
 git clone http://git.trystack.cn/openstack/nova.git 
For DevStack

Add GIT_BASE, NOVNC_REPO and SPICE_REPO variables to local.conf file.

[[local|localrc]]

# use TryStack git mirror
GIT_BASE=http://git.trystack.cn
NOVNC_REPO=http://git.trystack.cn/kanaka/noVNC.git
SPICE_REPO=http://git.trystack.cn/git/spice/spice-html5.git