Name: nova-bin Namespace: openstack Labels: app.kubernetes.io/managed-by=Helm Annotations: meta.helm.sh/release-name: nova meta.helm.sh/release-namespace: openstack Data ==== ks-user.sh: ---- #!/bin/bash # Copyright 2017 Pete Birley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex shopt -s nocasematch if [[ "${SERVICE_OS_PROJECT_DOMAIN_NAME}" == "Default" ]] then PROJECT_DOMAIN_ID="default" else # Manage project domain PROJECT_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \ --description="Domain for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_PROJECT_DOMAIN_NAME}" \ "${SERVICE_OS_PROJECT_DOMAIN_NAME}") fi if [[ "${SERVICE_OS_USER_DOMAIN_NAME}" == "Default" ]] then USER_DOMAIN_ID="default" else # Manage user domain USER_DOMAIN_ID=$(openstack domain create --or-show --enable -f value -c id \ --description="Domain for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}" \ "${SERVICE_OS_USER_DOMAIN_NAME}") fi shopt -u nocasematch # Manage user project USER_PROJECT_DESC="Service Project for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_PROJECT_DOMAIN_NAME}" USER_PROJECT_ID=$(openstack project create --or-show --enable -f value -c id \ --domain="${PROJECT_DOMAIN_ID}" \ --description="${USER_PROJECT_DESC}" \ "${SERVICE_OS_PROJECT_NAME}"); # Manage user USER_DESC="Service User for ${SERVICE_OS_REGION_NAME}/${SERVICE_OS_USER_DOMAIN_NAME}/${SERVICE_OS_SERVICE_NAME}" USER_ID=$(openstack user create --or-show --enable -f value -c id \ --domain="${USER_DOMAIN_ID}" \ --project-domain="${PROJECT_DOMAIN_ID}" \ --project="${USER_PROJECT_ID}" \ --description="${USER_DESC}" \ "${SERVICE_OS_USERNAME}"); # Manage user password (we do this in a seperate step to ensure the password is updated if required) set +x echo "Setting user password via: openstack user set --password=xxxxxxx ${USER_ID}" openstack user set --password="${SERVICE_OS_PASSWORD}" "${USER_ID}" set -x function ks_assign_user_role () { if [[ "$SERVICE_OS_ROLE" == "admin" ]] then USER_ROLE_ID="$SERVICE_OS_ROLE" else USER_ROLE_ID=$(openstack role create --or-show -f value -c id "${SERVICE_OS_ROLE}"); fi # Manage user role assignment openstack role add \ --user="${USER_ID}" \ --user-domain="${USER_DOMAIN_ID}" \ --project-domain="${PROJECT_DOMAIN_ID}" \ --project="${USER_PROJECT_ID}" \ "${USER_ROLE_ID}" } # Manage user service role IFS=',' for SERVICE_OS_ROLE in ${SERVICE_OS_ROLES}; do ks_assign_user_role done # Manage user member role : ${MEMBER_OS_ROLE:="member"} export USER_ROLE_ID=$(openstack role create --or-show -f value -c id \ "${MEMBER_OS_ROLE}"); ks_assign_user_role nova-api-metadata.sh: ---- #!/bin/bash set -ex COMMAND="${@:-start}" function start () { exec uwsgi --ini /etc/nova/nova-metadata-uwsgi.ini } function stop () { kill -TERM 1 } $COMMAND nova-service-cleaner.sh: ---- #!/bin/bash set -xe # If any non-compute service is down, then sleep for 2 times the report_interval # to confirm service is still down. DISABLED_SVC="$(openstack compute service list -f value | grep -v 'nova-compute' | grep 'down' || true)" if [ ! -z "${DISABLED_SVC}" ]; then sleep 60 fi NOVA_SERVICES_TO_CLEAN="$(openstack compute service list -f value -c Binary | sort | uniq | grep -v '^nova-compute$')" for NOVA_SERVICE in ${NOVA_SERVICES_TO_CLEAN}; do DEAD_SERVICE_IDS=$(openstack compute service list --service ${NOVA_SERVICE} -f json | jq -r '.[] | select(.State == "down") | .ID') for SERVICE_ID in ${DEAD_SERVICE_IDS}; do openstack compute service delete "${SERVICE_ID}" done done cell-setup.sh: ---- #!/bin/bash set -ex NOVA_VERSION=$(nova-manage --version 2>&1 | grep -Eo '[0-9]+[.][0-9]+[.][0-9]+') # NOTE(portdirect): check if nova fully supports cells v2, and manage # accordingly. Support was complete in ocata (V14.x.x). if [ "${NOVA_VERSION%%.*}" -gt "14" ]; then nova-manage cell_v2 discover_hosts --verbose fi db-sync.sh: ---- #!/bin/bash set -ex NOVA_VERSION=$(nova-manage --version 2>&1 | grep -Eo '[0-9]+[.][0-9]+[.][0-9]+') function manage_cells () { # NOTE(portdirect): check if nova fully supports cells v2, and manage # accordingly. Support was complete in ocata (V14.x.x). if [ "${NOVA_VERSION%%.*}" -gt "14" ]; then nova-manage cell_v2 map_cell0 nova-manage cell_v2 list_cells | grep -q " cell1 " || \ nova-manage cell_v2 create_cell --name=cell1 --verbose CELL0_ID=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell0 / { print $3 }' | tr -d ' ') CELL1_ID=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell1 / { print $3 }' | tr -d ' ') set +x CELL0_TRANSPORT=$(nova-manage cell_v2 list_cells | awk -F '|' '/ cell0 / { print $4 }' | tr -d ' ') if [ -z "${DB_CONNECTION_CELL0}" ]; then echo "ERROR: missing DB_CONNECTION_CELL0" exit 1 fi nova-manage cell_v2 update_cell \ --cell_uuid="${CELL0_ID}" \ --name="cell0" \ --transport-url="${CELL0_TRANSPORT}" \ --database_connection="${DB_CONNECTION_CELL0}" for VAR in TRANSPORT_URL DB_CONNECTION; do if [ -z "${!VAR}" ]; then echo "ERROR: missing $VAR variable" exit 1 fi done nova-manage cell_v2 update_cell \ --cell_uuid="${CELL1_ID}" \ --name="cell1" \ --transport-url="${TRANSPORT_URL}" \ --database_connection="${DB_CONNECTION}" set -x fi } # NOTE(aostapenko) Starting Wallaby nova-manage api_db version returns init version for empty database # greater than 0 # https://opendev.org/openstack/nova/src/branch/stable/wallaby/nova/db/sqlalchemy/migration.py#L32 # thus logic prior to this commit does not work. We need to either remove or justify and alter previous logic. nova-manage api_db sync manage_cells nova-manage db sync nova-manage db online_data_migrations echo 'Finished DB migrations' bootstrap.sh: ---- #!/bin/bash set -ex export HOME=/tmp echo 'Wait for Computes script not enabled' echo 'No other bootstrap customizations found.' ceph-admin-keyring.sh: ---- #!/bin/bash set -ex export HOME=/tmp cat > /etc/ceph/ceph.client.admin.keyring << EOF [client.admin] key = $(cat /tmp/client-keyring) EOF exit 0 rally-test.sh: ---- #!/bin/bash set -ex : "${RALLY_ENV_NAME:="openstack-helm"}" : "${OS_INTERFACE:="public"}" : "${RALLY_CLEANUP:="true"}" if [ "x$RALLY_CLEANUP" == "xtrue" ]; then function rally_cleanup { openstack user delete \ --domain="${SERVICE_OS_USER_DOMAIN_NAME}" \ "${SERVICE_OS_USERNAME}" FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') if [ -n "$FLAVORS" ]; then echo $FLAVORS | xargs openstack flavor delete fi SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }') if [ -n "$SERVERS" ]; then echo $SERVERS | xargs openstack server delete fi IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }') if [ -n "$IMAGES" ]; then echo $IMAGES | xargs openstack image delete fi } trap rally_cleanup EXIT fi function create_or_update_db () { revisionResults=$(rally db revision) if [ $revisionResults = "None" ] then rally db create else rally db upgrade fi } create_or_update_db cat > /tmp/rally-config.json << EOF { "openstack": { "auth_url": "${OS_AUTH_URL}", "region_name": "${OS_REGION_NAME}", "endpoint_type": "${OS_INTERFACE}", "admin": { "username": "${OS_USERNAME}", "password": "${OS_PASSWORD}", "user_domain_name": "${OS_USER_DOMAIN_NAME}", "project_name": "${OS_PROJECT_NAME}", "project_domain_name": "${OS_PROJECT_DOMAIN_NAME}" }, "users": [ { "username": "${SERVICE_OS_USERNAME}", "password": "${SERVICE_OS_PASSWORD}", "project_name": "${SERVICE_OS_PROJECT_NAME}", "user_domain_name": "${SERVICE_OS_USER_DOMAIN_NAME}", "project_domain_name": "${SERVICE_OS_PROJECT_DOMAIN_NAME}" } ], "https_insecure": false, "https_cacert": "${OS_CACERT}" } } EOF rally deployment create --file /tmp/rally-config.json --name "${RALLY_ENV_NAME}" rm -f /tmp/rally-config.json rally deployment use "${RALLY_ENV_NAME}" rally deployment check rally task validate /etc/rally/rally_tests.yaml rally task start /etc/rally/rally_tests.yaml rally task sla-check rally env cleanup rally deployment destroy --deployment "${RALLY_ENV_NAME}" storage-init.sh: ---- #!/bin/bash set -x if [ "x$STORAGE_BACKEND" == "xrbd" ]; then SECRET=$(mktemp --suffix .yaml) KEYRING=$(mktemp --suffix .keyring) function cleanup { rm -f ${SECRET} ${KEYRING} } trap cleanup EXIT fi set -ex if [ "x$STORAGE_BACKEND" == "xrbd" ]; then ceph -s function ensure_pool () { ceph osd pool stats $1 || ceph osd pool create $1 $2 if [[ $(ceph mgr versions | awk '/version/{print $3}' | cut -d. -f1) -ge 12 ]]; then ceph osd pool application enable $1 $3 fi size_protection=$(ceph osd pool get $1 nosizechange | cut -f2 -d: | tr -d '[:space:]') ceph osd pool set $1 nosizechange 0 ceph osd pool set $1 size ${RBD_POOL_REPLICATION} ceph osd pool set $1 nosizechange ${size_protection} ceph osd pool set $1 crush_rule "${RBD_POOL_CRUSH_RULE}" } ensure_pool ${RBD_POOL_NAME} ${RBD_POOL_CHUNK_SIZE} ${RBD_POOL_APP_NAME} fi nova-api-metadata-init.sh: ---- #!/bin/bash set -ex metadata_ip="" if [ -z "${metadata_ip}" ] ; then metadata_ip=$(getent hosts metadata | awk '{print $1}') fi cat </tmp/pod-shared/nova-api-metadata.ini [DEFAULT] metadata_host=$metadata_ip EOF nova-compute.sh: ---- #!/bin/bash set -ex exec nova-compute \ --config-file /etc/nova/nova.conf \ --config-file /tmp/pod-shared/nova-console.conf \ --config-file /tmp/pod-shared/nova-libvirt.conf \ --config-file /tmp/pod-shared/nova-compute-fqdn.conf \ --config-file /tmp/pod-shared/nova-hypervisor.conf nova-conductor.sh: ---- #!/bin/bash set -x exec nova-conductor \ --config-file /etc/nova/nova.conf nova-scheduler.sh: ---- #!/bin/bash set -xe exec nova-scheduler \ --config-file /etc/nova/nova.conf ssh-init.sh: ---- #!/bin/bash set -ex export NOVA_USERNAME=$(id -u ${NOVA_USER_UID} -n) export NOVA_USER_HOME=$(eval echo ~${NOVA_USERNAME}) mkdir -p ${NOVA_USER_HOME}/.ssh cat > ${NOVA_USER_HOME}/.ssh/config < 1: # Third time in, kill the previous process os.kill(int(data['pid']), signal.SIGTERM) else: data['exit_count'] = data.get('exit_count', 0) + 1 with open(pidfile, 'w') as f: json.dump(data, f) sys.exit(0) data['pid'] = os.getpid() data['exit_count'] = 0 with open(pidfile, 'w') as f: json.dump(data, f) test_rpc_liveness() sys.exit(0) # return success ks-service.sh: ---- #!/bin/bash # Copyright 2017 Pete Birley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex # Service boilerplate description OS_SERVICE_DESC="${OS_REGION_NAME}: ${OS_SERVICE_NAME} (${OS_SERVICE_TYPE}) service" # Get Service ID if it exists unset OS_SERVICE_ID # FIXME - There seems to be an issue once in a while where the # openstack service list fails and encounters an error message such as: # Unable to establish connection to # https://keystone-api.openstack.svc.cluster.local:5000/v3/auth/tokens: # ('Connection aborted.', OSError("(104, 'ECONNRESET')",)) # During an upgrade scenario, this would cause the OS_SERVICE_ID to be blank # and it would attempt to create a new service when it was not needed. # This duplciate service would sometimes be used by other services such as # Horizon and would give an 'Invalid Service Catalog' error. # This loop allows for a 'retry' of the openstack service list in an # attempt to get the service list as expected if it does ecounter an error. # This loop and recheck can be reverted once the underlying issue is addressed. # If OS_SERVICE_ID is blank then wait a few seconds to give it # additional time and try again for i in $(seq 3) do OS_SERVICE_ID=$( openstack service list -f csv --quote none | \ grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \ sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" ) # If the service was found, go ahead and exit successfully. if [[ -n "${OS_SERVICE_ID}" ]]; then exit 0 fi sleep 2 done # If we've reached this point and a Service ID was not found, # then create the service OS_SERVICE_ID=$(openstack service create -f value -c id \ --name="${OS_SERVICE_NAME}" \ --description "${OS_SERVICE_DESC}" \ --enable \ "${OS_SERVICE_TYPE}") nova-console-compute-init.sh: ---- #!/bin/bash set -ex console_kind="novnc" if [ "${console_kind}" == "novnc" ] ; then client_address="" client_interface="" client_network_cidr="0/0" listen_ip="0.0.0.0" elif [ "${console_kind}" == "spice" ] ; then client_address="" client_interface="" client_network_cidr="0/0" listen_ip="0.0.0.0" fi if [ -z "${client_address}" ] ; then if [ -z "${client_interface}" ] ; then if [ -z "${client_network_cidr}" ] ; then client_network_cidr="0/0" fi client_interface=$(ip -4 route list ${client_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1 fi # determine client ip dynamically based on interface provided client_address=$(ip a s $client_interface | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}' | head -1) fi if [ -z "${listen_ip}" ] ; then # The server component listens on all IP addresses and the proxy component # only listens on the management interface IP address of the compute node. listen_ip=0.0.0.0 fi touch /tmp/pod-shared/nova-console.conf if [ "${console_kind}" == "novnc" ] ; then cat > /tmp/pod-shared/nova-console.conf < /tmp/pod-shared/nova-console.conf < /tmp/rmq_definitions.json rabbitmqadmin_cli import /tmp/rmq_definitions.json fi cell-setup-init.sh: ---- #!/bin/bash set -ex until openstack compute service list --service nova-compute -f value -c State | grep -q "^up$" ;do echo "Waiting for Nova Compute processes to register" sleep 10 done fake-iptables.sh: ---- #!/bin/bash exit 0 ks-endpoints.sh: ---- #!/bin/bash # Copyright 2017 Pete Birley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. set -ex # Get Service ID OS_SERVICE_ID=$( openstack service list -f csv --quote none | \ grep ",${OS_SERVICE_NAME},${OS_SERVICE_TYPE}$" | \ sed -e "s/,${OS_SERVICE_NAME},${OS_SERVICE_TYPE}//g" ) # Get Endpoint ID if it exists OS_ENDPOINT_ID=$( openstack endpoint list -f csv --quote none | \ grep "^[a-z0-9]*,${OS_REGION_NAME},${OS_SERVICE_NAME},${OS_SERVICE_TYPE},True,${OS_SVC_ENDPOINT}," | \ awk -F ',' '{ print $1 }' ) # Making sure only a single endpoint exists for a service within a region if [ "$(echo $OS_ENDPOINT_ID | wc -w)" -gt "1" ]; then echo "More than one endpoint found, cleaning up" for ENDPOINT_ID in $OS_ENDPOINT_ID; do openstack endpoint delete ${ENDPOINT_ID} done unset OS_ENDPOINT_ID fi # Determine if Endpoint needs updated if [[ ${OS_ENDPOINT_ID} ]]; then OS_ENDPOINT_URL_CURRENT=$(openstack endpoint show ${OS_ENDPOINT_ID} -f value -c url) if [ "${OS_ENDPOINT_URL_CURRENT}" == "${OS_SERVICE_ENDPOINT}" ]; then echo "Endpoints Match: no action required" OS_ENDPOINT_UPDATE="False" else echo "Endpoints Dont Match: removing existing entries" openstack endpoint delete ${OS_ENDPOINT_ID} OS_ENDPOINT_UPDATE="True" fi else OS_ENDPOINT_UPDATE="True" fi # Update Endpoint if required if [[ "${OS_ENDPOINT_UPDATE}" == "True" ]]; then OS_ENDPOINT_ID=$( openstack endpoint create -f value -c id \ --region="${OS_REGION_NAME}" \ "${OS_SERVICE_ID}" \ ${OS_SVC_ENDPOINT} \ "${OS_SERVICE_ENDPOINT}" ) fi # Display the Endpoint openstack endpoint show ${OS_ENDPOINT_ID} nova-api.sh: ---- #!/bin/bash set -ex COMMAND="${@:-start}" function start () { exec uwsgi --ini /etc/nova/nova-api-uwsgi.ini } function stop () { kill -TERM 1 } $COMMAND ssh-start.sh: ---- #!/bin/bash set -ex IFS=',' for KEY_TYPE in $KEY_TYPES; do KEY_PATH=/etc/ssh/ssh_host_${KEY_TYPE}_key if [[ ! -f "${KEY_PATH}" ]]; then ssh-keygen -q -t ${KEY_TYPE} -f ${KEY_PATH} -N "" fi done IFS='' subnet_address="0.0.0.0/0" if [ -z "${subnet_address}" ] ; then subnet_address="0.0.0.0/0" fi listen_interface=$(ip -4 route list ${subnet_address} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1 listen_address=$(ip a s $listen_interface | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}' | head -1) cat > /tmp/sshd_config_extend <> /etc/ssh/sshd_config rm /tmp/sshd_config_extend mkdir -p /run/sshd exec /usr/sbin/sshd -D -e -o Port=$SSH_PORT db-drop.py: ---- #!/usr/bin/env python # Drops db and user for an OpenStack Service: # Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain # SQLAlchemy strings for the root connection to the database and the one you # wish the service to use. Alternatively, you can use an ini formatted config # at the location specified by OPENSTACK_CONFIG_FILE, and extract the string # from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by # OPENSTACK_CONFIG_DB_SECTION. import os import sys try: import ConfigParser PARSER_OPTS = {} except ImportError: import configparser as ConfigParser PARSER_OPTS = {"strict": False} import logging from sqlalchemy import create_engine from sqlalchemy import text # Create logger, console handler and formatter logger = logging.getLogger('OpenStack-Helm DB Drop') logger.setLevel(logging.DEBUG) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s') # Set the formatter and add the handler ch.setFormatter(formatter) logger.addHandler(ch) # Get the connection string for the service db root user if "ROOT_DB_CONNECTION" in os.environ: db_connection = os.environ['ROOT_DB_CONNECTION'] logger.info('Got DB root connection') else: logger.critical('environment variable ROOT_DB_CONNECTION not set') sys.exit(1) mysql_x509 = os.getenv('MARIADB_X509', "") ssl_args = {} if mysql_x509: ssl_args = {'ssl': {'ca': '/etc/mysql/certs/ca.crt', 'key': '/etc/mysql/certs/tls.key', 'cert': '/etc/mysql/certs/tls.crt'}} # Get the connection string for the service db if "OPENSTACK_CONFIG_FILE" in os.environ: os_conf = os.environ['OPENSTACK_CONFIG_FILE'] if "OPENSTACK_CONFIG_DB_SECTION" in os.environ: os_conf_section = os.environ['OPENSTACK_CONFIG_DB_SECTION'] else: logger.critical('environment variable OPENSTACK_CONFIG_DB_SECTION not set') sys.exit(1) if "OPENSTACK_CONFIG_DB_KEY" in os.environ: os_conf_key = os.environ['OPENSTACK_CONFIG_DB_KEY'] else: logger.critical('environment variable OPENSTACK_CONFIG_DB_KEY not set') sys.exit(1) try: config = ConfigParser.RawConfigParser(**PARSER_OPTS) logger.info("Using {0} as db config source".format(os_conf)) config.read(os_conf) logger.info("Trying to load db config from {0}:{1}".format( os_conf_section, os_conf_key)) user_db_conn = config.get(os_conf_section, os_conf_key) logger.info("Got config from {0}".format(os_conf)) except: logger.critical("Tried to load config from {0} but failed.".format(os_conf)) raise elif "DB_CONNECTION" in os.environ: user_db_conn = os.environ['DB_CONNECTION'] logger.info('Got config from DB_CONNECTION env var') else: logger.critical('Could not get db config, either from config file or env var') sys.exit(1) # Root DB engine try: root_engine_full = create_engine(db_connection) root_user = root_engine_full.url.username root_password = root_engine_full.url.password drivername = root_engine_full.url.drivername host = root_engine_full.url.host port = root_engine_full.url.port root_engine_url = ''.join([drivername, '://', root_user, ':', root_password, '@', host, ':', str (port)]) root_engine = create_engine(root_engine_url, connect_args=ssl_args) connection = root_engine.connect() connection.close() logger.info("Tested connection to DB @ {0}:{1} as {2}".format( host, port, root_user)) except: logger.critical('Could not connect to database as root user') raise # User DB engine try: user_engine = create_engine(user_db_conn, connect_args=ssl_args) # Get our user data out of the user_engine database = user_engine.url.database user = user_engine.url.username password = user_engine.url.password logger.info('Got user db config') except: logger.critical('Could not get user database config') raise # Delete DB try: with root_engine.connect() as connection: connection.execute(text("DROP DATABASE IF EXISTS {0}".format(database))) try: connection.commit() except AttributeError: pass logger.info("Deleted database {0}".format(database)) except: logger.critical("Could not drop database {0}".format(database)) raise # Delete DB User try: with root_engine.connect() as connection: connection.execute(text("DROP USER IF EXISTS {0}".format(user))) try: connection.commit() except AttributeError: pass logger.info("Deleted user {0}".format(user)) except: logger.critical("Could not delete user {0}".format(user)) raise logger.info('Finished DB Management') nova-compute-init.sh: ---- #!/bin/bash set -ex # Make the Nova Instances Dir as this is not autocreated. mkdir -p /var/lib/nova/instances # Set Ownership of nova dirs to the nova user chown ${NOVA_USER_UID} /var/lib/nova /var/lib/nova/instances migration_interface="" if [[ -z $migration_interface ]]; then # search for interface with default routing # If there is not default gateway, exit migration_network_cidr="0/0" if [ -z "${migration_network_cidr}" ] ; then migration_network_cidr="0/0" fi migration_interface=$(ip -4 route list ${migration_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1 fi migration_address=$(ip a s $migration_interface | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}' | head -1) if [ -z "${migration_address}" ] ; then echo "Var live_migration_interface is empty" exit 1 fi tee > /tmp/pod-shared/nova-libvirt.conf << EOF [libvirt] live_migration_inbound_addr = $migration_address EOF hypervisor_interface="" if [[ -z $hypervisor_interface ]]; then # search for interface with default routing # If there is not default gateway, exit hypervisor_network_cidr="0/0" if [ -z "${hypervisor_network_cidr}" ] ; then hypervisor_network_cidr="0/0" fi hypervisor_interface=$(ip -4 route list ${hypervisor_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1 fi hypervisor_address=$(ip a s $hypervisor_interface | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}' | head -1) if [ -z "${hypervisor_address}" ] ; then echo "Var my_ip is empty" exit 1 fi tee > /tmp/pod-shared/nova-hypervisor.conf << EOF [DEFAULT] my_ip = $hypervisor_address EOF tee > /tmp/pod-shared/nova-compute-fqdn.conf << EOF [DEFAULT] host = $(hostname --fqdn) EOF nova-compute-ironic.sh: ---- #!/bin/bash set -ex exec nova-compute \ --config-file /etc/nova/nova.conf \ --config-file /etc/nova/nova-ironic.conf nova-console-proxy-init.sh: ---- #!/bin/bash set -ex console_kind="novnc" if [ "${console_kind}" == "novnc" ] ; then client_address="" client_interface="" client_network_cidr="0/0" listen_ip="0.0.0.0" elif [ "${console_kind}" == "spice" ] ; then client_address="" client_interface="" client_network_cidr="0/0" listen_ip="0.0.0.0" fi if [ -z "${client_address}" ] ; then if [ -z "${client_interface}" ] ; then if [ -z "${client_network_cidr}" ] ; then client_network_cidr="0/0" fi client_interface=$(ip -4 route list ${client_network_cidr} | awk -F 'dev' '{ print $2; exit }' | awk '{ print $1 }') || exit 1 fi # determine client ip dynamically based on interface provided client_address=$(ip a s $client_interface | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}' | head -1) fi if [ -z "${listen_ip}" ] ; then listen_ip=$client_address fi if [ "${console_kind}" == "novnc" ] ; then cat </tmp/pod-shared/nova-vnc.ini [vnc] server_proxyclient_address = $client_address server_listen = $listen_ip novncproxy_host = $listen_ip EOF elif [ "${console_kind}" == "spice" ] ; then cat </tmp/pod-shared/nova-spice.ini [spice] server_proxyclient_address = $client_address server_listen = $listen_ip EOF fi wait-for-computes-init.sh: ---- #!/bin/bash set -ex # This runs in a bootstrap init container. It counts the number of compute nodes. COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort) /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt BinaryData ==== Events: