COMPUTED VALUES:
bootstrap:
  enabled: true
  ks_user: admin
  script: null
  structured:
    flavors:
      enabled: false
      options:
        m1_large:
          disk: 80
          name: m1.large
          ram: 8192
          vcpus: 4
        m1_medium:
          disk: 40
          name: m1.medium
          ram: 4096
          vcpus: 2
        m1_small:
          disk: 20
          name: m1.small
          ram: 2048
          vcpus: 1
        m1_tiny:
          disk: 1
          name: m1.tiny
          ram: 512
          vcpus: 1
        m1_xlarge:
          disk: 160
          name: m1.xlarge
          ram: 16384
          vcpus: 8
  wait_for_computes:
    enabled: false
    remaining_wait: 300
    scripts:
      init_script: |
        # This runs in a bootstrap init container. It counts the number of compute nodes.
        COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
        /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
      wait_script: |
        # This script runs in the main bootstrap container just before the
        # bootstrap.script is called.
        COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
        if [[ $COMPUTE_HOSTS == 0 ]]; then
          echo "There are no compute hosts found!"
          exit 1
        fi

        # Wait for all hypervisors to come up before moving on with the deployment
        HYPERVISOR_WAIT=true
        WAIT_AFTER_READY=0
        SLEEP=5
        while [[ $HYPERVISOR_WAIT == true ]]; do
          # Its possible that openstack command may fail due to not being able to
          # reach the compute service
          set +e
          HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
          set -e

          PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
          if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
            echo "Hypervisor ready percentage is $PERCENT_READY"
            if [[ $PERCENT_READY == 100 ]]; then
              HYPERVISOR_WAIT=false
              echo "All hypervisors are ready."
            elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
              HYPERVISOR_WAIT=false
              echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
            else
              sleep $SLEEP
              WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
            fi
          else
            echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
            sleep $SLEEP
          fi
        done
    wait_percentage: 70
ceph_client:
  configmap: ceph-etc
  user_secret_name: pvc-ceph-client-key
conf:
  api_audit_map:
    DEFAULT:
      target_endpoint_type: None
    custom_actions:
      delete: delete
      disable: disable
      enable: enable
      os-migrations/get: read
      os-server-password/post: update
      reboot: start/reboot
      shutdown: stop/shutdown
      startup: start/startup
    path_keywords:
      action: None
      add: None
      configure-project: None
      defaults: None
      delete: None
      detail: None
      diagnostics: None
      disable: None
      enable: None
      entries: entry
      extensions: alias
      flavors: flavor
      images: image
      ips: label
      limits: None
      metadata: key
      os-agents: os-agent
      os-aggregates: os-aggregate
      os-availability-zone: None
      os-certificates: None
      os-cloudpipe: None
      os-extra_specs: key
      os-fixed-ips: ip
      os-flavor-access: None
      os-floating-ip-dns: domain
      os-floating-ip-pools: None
      os-floating-ips: floating-ip
      os-floating-ips-bulk: host
      os-hosts: host
      os-hypervisors: hypervisor
      os-instance-actions: instance-action
      os-keypairs: keypair
      os-migrations: None
      os-networks: network
      os-quota-sets: tenant
      os-security-group-rules: rule
      os-security-groups: security_group
      os-server-password: None
      os-services: None
      os-simple-tenant-usage: tenant
      os-snapshots: snapshot
      os-virtual-interfaces: None
      os-volume-types: volume-type
      os-volume_attachments: attachment
      os-volumes: volume
      os-volumes_boot: None
      reboot: None
      servers: server
      shutdown: None
      startup: None
      statistics: None
    service_endpoints:
      compute: service/compute
  archive_deleted_rows:
    all_cells: false
    before:
      date: nil
      enabled: false
    max_rows:
      enabled: false
      rows: 1000
    purge_deleted_rows: false
    until_completion: true
  ceph:
    admin_keyring: null
    cinder:
      keyring: null
      secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      user: cinder
    enabled: true
  enable_iscsi: false
  hypervisor:
    address_search_enabled: true
    host_interface: null
    host_network_cidr: 0/0
  libvirt:
    address_search_enabled: true
    live_migration_interface: null
    live_migration_network_cidr: 0/0
  logging:
    formatter_context:
      class: oslo_log.formatters.ContextFormatter
      datefmt: '%Y-%m-%d %H:%M:%S'
    formatter_default:
      datefmt: '%Y-%m-%d %H:%M:%S'
      format: '%(message)s'
    formatters:
      keys:
      - context
      - default
    handler_null:
      args: ()
      class: logging.NullHandler
      formatter: default
    handler_stderr:
      args: (sys.stderr,)
      class: StreamHandler
      formatter: context
    handler_stdout:
      args: (sys.stdout,)
      class: StreamHandler
      formatter: context
    handlers:
      keys:
      - stdout
      - stderr
      - "null"
    logger_amqp:
      handlers: stderr
      level: WARNING
      qualname: amqp
    logger_amqplib:
      handlers: stderr
      level: WARNING
      qualname: amqplib
    logger_boto:
      handlers: stderr
      level: WARNING
      qualname: boto
    logger_eventletwsgi:
      handlers: stderr
      level: WARNING
      qualname: eventlet.wsgi.server
    logger_nova:
      handlers:
      - stdout
      level: INFO
      qualname: nova
    logger_os.brick:
      handlers:
      - stdout
      level: INFO
      qualname: os.brick
    logger_root:
      handlers: "null"
      level: WARNING
    logger_sqlalchemy:
      handlers: stderr
      level: WARNING
      qualname: sqlalchemy
    loggers:
      keys:
      - root
      - nova
      - os.brick
  nova:
    DEFAULT:
      allow_resize_to_same_host: true
      compute_driver: libvirt.LibvirtDriver
      cpu_allocation_ratio: 4.5
      default_ephemeral_format: ext4
      disk_allocation_ratio: 3
      instance_usage_audit: true
      instance_usage_audit_period: hour
      metadata_workers: 2
      my_ip: 0.0.0.0
      osapi_compute_listen: 0.0.0.0
      osapi_compute_listen_port: null
      osapi_compute_workers: 2
      ram_allocation_ratio: 0.9
      resume_guests_state_on_host_boot: true
      state_path: /var/lib/nova
    api:
      list_records_by_skipping_down_cells: false
    api_database:
      connection: mysql+pymysql://nova:ocDjohhCvGks3YHGVgd1JWNFjwNQXJqc@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_api
      max_retries: -1
    barbican:
      barbican_endpoint_type: internal
    cache:
      backend: oslo_cache.memcache_pool
      enabled: true
    cell0_database:
      connection: mysql+pymysql://nova:ocDjohhCvGks3YHGVgd1JWNFjwNQXJqc@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_cell0
      max_retries: -1
    cinder:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      catalog_info: volumev3::internalURL
      os_region_name: RegionOne
      password: e9N4XQDXDfgHebIUovcwgaeoXBNiBqss
      project_domain_name: service
      project_name: service
      user_domain_name: service
      username: cinder-RegionOne
    compute:
      consecutive_build_service_disable_threshold: 0
    conductor:
      workers: 2
    cors:
      allow_headers: X-Auth-Token,X-OpenStack-Nova-API-Version
      allowed_origin: '*'
    database:
      connection_recycle_time: 600
      max_overflow: 50
      max_pool_size: 5
      max_retries: -1
      pool_timeout: 30
    filter_scheduler:
      available_filters:
        type: multistring
        values:
        - nova.scheduler.filters.all_filters
        - nova_scheduler_filters.failure_domain_filter.FailureDomainFilter
      enabled_filters: ComputeFilter, AggregateTypeAffinityFilter, ComputeCapabilitiesFilter,
        PciPassthroughFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter,
        ServerGroupAffinityFilter, FailureDomainFilter
      image_properties_default_architecture: x86_64
      max_instances_per_host: 200
    glance:
      enable_rbd_download: true
      num_retries: 3
    ironic:
      api_endpoint: http://ironic-api.openstack.svc.cluster.local:6385
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: 3bpkMbUoAhnhTuJzonC5LqOYSiA2qbcO
      memcache_servers: memcached.openstack.svc.cluster.local:11211
      password: fvluy860dcGjKzEdgoucJD9iezynsplX
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: ironic-RegionOne
    keystone_authtoken:
      auth_type: password
      auth_uri: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: 3bpkMbUoAhnhTuJzonC5LqOYSiA2qbcO
      memcache_security_strategy: ENCRYPT
      memcached_servers: memcached.openstack.svc.cluster.local:11211
      password: dUAFCIXJYDC7MuX0kvxVketfzVypQGYy
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      service_token_roles: service
      service_token_roles_required: true
      service_type: compute
      user_domain_name: service
      username: nova-RegionOne
    libvirt:
      connection_uri: qemu+unix:///system?socket=/run/libvirt/libvirt-sock
      disk_cachemodes: network=writeback
      hw_disk_discard: unmap
      images_rbd_ceph_conf: /etc/ceph/ceph.conf
      images_rbd_pool: vms
      images_type: qcow2
      live_migration_scheme: tls
      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      rbd_user: cinder
      swtpm_enabled: true
      swtpm_group: swtpm
      swtpm_user: swtpm
    neutron:
      auth_type: password
      auth_version: v3
      metadata_proxy_shared_secret: YSxSVakrlqSoULOyx5JwKcf5VS4O1l2Q
      service_metadata_proxy: true
    notifications:
      notify_on_state_change: vm_and_task_state
    os_vif_ovs:
      ovsdb_connection: unix:/run/openvswitch/db.sock
    oslo_concurrency:
      lock_path: /var/lib/nova/tmp
    oslo_messaging_notifications:
      driver: noop
    oslo_messaging_rabbit:
      rabbit_ha_queues: true
    oslo_middleware:
      enable_proxy_headers_parsing: true
    oslo_policy:
      policy_file: /etc/nova/policy.yaml
    placement:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      password: dq138xC2A6Emn3AibiD89rlurXh6VBpp
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: placement-RegionOne
    privsep_osbrick:
      helper_command: sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
        /etc/nova/nova.conf
    scheduler:
      discover_hosts_in_cells_interval: 30
      max_attempts: 3
      workers: 2
    service_user:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      password: dUAFCIXJYDC7MuX0kvxVketfzVypQGYy
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      send_service_user_token: true
      user_domain_name: service
      username: nova-RegionOne
    spice:
      html5proxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    upgrade_levels:
      compute: auto
    vnc:
      auth_schemes: vencrypt,none
      novncproxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    workarounds:
      skip_cpu_compare_on_dest: true
    wsgi:
      api_paste_config: /etc/nova/api-paste.ini
  nova_api_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      module: nova.wsgi.osapi_compute:application
      need-app: true
      procname-prefix-spaced: 'nova-api:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
  nova_compute_redactions:
  - database
  - api_database
  - cell0_database
  nova_ironic:
    DEFAULT:
      compute_driver: ironic.IronicDriver
      cpu_allocation_ratio: 1
      force_config_drive: true
      ram_allocation_ratio: 1
      reserved_host_memory_mb: 0
      scheduler_host_manager: ironic_host_manager
  nova_metadata_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      module: nova.wsgi.metadata:application
      need-app: true
      procname-prefix-spaced: 'nova-metadata:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
  nova_sudoers: |
    # This sudoers file supports rootwrap for both Kolla and LOCI Images.
    Defaults !requiretty
    Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
    nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
  paste:
    app:metaapp:
      paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
    app:osapi_compute_app_v21:
      paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
    app:oscomputeversionapp:
      paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
    composite:metadata:
      /: meta
      use: egg:Paste#urlmap
    composite:openstack_compute_api_v21:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:openstack_compute_api_v21_legacy_v2_compatible:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        legacy_v2_compatible osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:osapi_compute:
      /: oscomputeversions
      /v2: openstack_compute_api_v21_legacy_v2_compatible
      /v2.1: openstack_compute_api_v21
      use: call:nova.api.openstack.urlmap:urlmap_factory
    filter:audit:
      audit_map_file: /etc/nova/api_audit_map.conf
      paste.filter_factory: keystonemiddleware.audit:filter_factory
    filter:authtoken:
      paste.filter_factory: keystonemiddleware.auth_token:filter_factory
    filter:compute_req_id:
      paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
    filter:cors:
      oslo_config_project: nova
      paste.filter_factory: oslo_middleware.cors:filter_factory
    filter:faultwrap:
      paste.filter_factory: nova.api.openstack:FaultWrapper.factory
    filter:http_proxy_to_wsgi:
      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
    filter:keystonecontext:
      paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
    filter:legacy_v2_compatible:
      paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
    filter:noauth2:
      paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
    filter:request_id:
      paste.filter_factory: oslo_middleware:RequestId.factory
    filter:sizelimit:
      paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
    pipeline:meta:
      pipeline: cors metaapp
    pipeline:oscomputeversions:
      pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
  policy: {}
  rabbitmq:
    policies:
    - apply-to: all
      definition:
        message-ttl: 70000
      name: ha_ttl_nova
      pattern: ^(?!(amq\.|reply_)).*
      priority: 0
      vhost: nova
  rally_tests:
    clean_up: |
      FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$FLAVORS" ]; then
        echo $FLAVORS | xargs openstack flavor delete
      fi
      SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$SERVERS" ]; then
        echo $SERVERS | xargs openstack server delete
      fi
      IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
      if [ -n "$IMAGES" ]; then
        echo $IMAGES | xargs openstack image delete
      fi
    run_tempest: false
    tests:
      NovaAggregates.create_and_get_aggregate_details:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.create_and_update_aggregate:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.list_aggregates:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAvailabilityZones.list_availability_zones:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_delete_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_list_flavor_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_add_tenant_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_set_keys:
      - args:
          disk: 1
          extra_specs:
            quota:disk_read_bytes_sec: 10240
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.list_flavors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_uptime_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_search_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.statistics_hypervisors:
      - args: {}
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_delete_keypair:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_list_keypairs:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServerGroups.create_and_list_server_groups:
      - args:
          all_projects: false
          kwargs:
            policies:
            - affinity
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServices.list_services:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
  rootwrap: |
    # Configuration for nova-rootwrap
    # This file should be owned by (and only-writeable by) the root user

    [DEFAULT]
    # List of directories to load filter definitions from (separated by ',').
    # These directories MUST all be only writeable by root !
    filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap

    # List of directories to search executables in, in case filters do not
    # explicitely specify a full path (separated by ',')
    # If not specified, defaults to system PATH environment variable.
    # These directories MUST all be only writeable by root !
    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin

    # Enable logging to syslog
    # Default value is False
    use_syslog=False

    # Which syslog facility to use.
    # Valid values include auth, authpriv, syslog, local0, local1...
    # Default value is 'syslog'
    syslog_log_facility=syslog

    # Which messages to log.
    # INFO means log all usage
    # ERROR means only log unsuccessful attempts
    syslog_log_level=ERROR
  rootwrap_filters:
    api_metadata:
      content: |
        # nova-rootwrap command filters for api-metadata nodes
        # This is needed on nova-api hosts running with "metadata" in enabled_apis
        # or when running nova-api-metadata
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root
      pods:
      - metadata
    compute:
      content: |
        # nova-rootwrap command filters for compute nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
        # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
        kpartx: CommandFilter, kpartx, root

        # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
        # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
        tune2fs: CommandFilter, tune2fs, root

        # nova/virt/disk/mount/api.py: 'mount', mapped_device
        # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
        # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
        # nova/virt/configdrive.py: 'mount', device, mountdir
        # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
        mount: CommandFilter, mount, root

        # nova/virt/disk/mount/api.py: 'umount', mapped_device
        # nova/virt/disk/api.py: 'umount' target
        # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
        # nova/virt/configdrive.py: 'umount', mountdir
        umount: CommandFilter, umount, root

        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
        qemu-nbd: CommandFilter, qemu-nbd, root

        # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
        # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
        losetup: CommandFilter, losetup, root

        # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
        blkid: CommandFilter, blkid, root

        # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
        # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
        blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*

        # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
        tee: CommandFilter, tee, root

        # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
        mkdir: CommandFilter, mkdir, root

        # nova/virt/disk/vfs/localfs.py: 'chown'
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
        chown: CommandFilter, chown, root

        # nova/virt/disk/vfs/localfs.py: 'chmod'
        chmod: CommandFilter, chmod, root

        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
        # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
        tunctl: CommandFilter, tunctl, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
        vrouter-port-control: CommandFilter, vrouter-port-control, root

        # nova/virt/libvirt/vif.py: 'ebrctl', ...
        ebrctl: CommandFilter, ebrctl, root

        # nova/virt/libvirt/vif.py: 'mm-ctl', ...
        mm-ctl: CommandFilter, mm-ctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
        dd: CommandFilter, dd, root

        # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
        iscsiadm: CommandFilter, iscsiadm, root

        # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
        # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
        aoe-revalidate: CommandFilter, aoe-revalidate, root
        aoe-discover: CommandFilter, aoe-discover, root

        # nova/virt/xenapi/vm_utils.py: parted, --script, ...
        # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
        parted: CommandFilter, parted, root

        # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
        pygrub: CommandFilter, pygrub, root

        # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
        fdisk: CommandFilter, fdisk, root

        # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
        # nova/virt/disk/api.py: e2fsck, -f, -p, image
        e2fsck: CommandFilter, e2fsck, root

        # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
        # nova/virt/disk/api.py: resize2fs, image
        resize2fs: CommandFilter, resize2fs, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/virt/libvirt/utils.py: 'mkswap'
        # nova/virt/xenapi/vm_utils.py: 'mkswap'
        mkswap: CommandFilter, mkswap, root

        # nova/virt/libvirt/utils.py: 'nova-idmapshift'
        nova-idmapshift: CommandFilter, nova-idmapshift, root

        # nova/virt/xenapi/vm_utils.py: 'mkfs'
        # nova/utils.py: 'mkfs', fs, path, label
        mkfs: CommandFilter, mkfs, root

        # nova/virt/libvirt/utils.py: 'qemu-img'
        qemu-img: CommandFilter, qemu-img, root

        # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
        readlink: CommandFilter, readlink, root

        # nova/virt/disk/api.py:
        mkfs.ext3: CommandFilter, mkfs.ext3, root
        mkfs.ext4: CommandFilter, mkfs.ext4, root
        mkfs.ntfs: CommandFilter, mkfs.ntfs, root

        # nova/virt/libvirt/connection.py:
        lvremove: CommandFilter, lvremove, root

        # nova/virt/libvirt/utils.py:
        lvcreate: CommandFilter, lvcreate, root

        # nova/virt/libvirt/utils.py:
        lvs: CommandFilter, lvs, root

        # nova/virt/libvirt/utils.py:
        vgs: CommandFilter, vgs, root

        # nova/utils.py:read_file_as_root: 'cat', file_path
        # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
        read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
        read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow

        # os-brick needed commands
        read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
        multipath: CommandFilter, multipath, root
        # multipathd show status
        multipathd: CommandFilter, multipathd, root
        systool: CommandFilter, systool, root
        vgc-cluster: CommandFilter, vgc-cluster, root
        # os_brick/initiator/connector.py
        drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid

        # TODO(smcginnis) Temporary fix.
        # Need to pull in os-brick os-brick.filters file instead and clean
        # out stale brick values from this file.
        scsi_id: CommandFilter, /lib/udev/scsi_id, root
        # os_brick.privileged.default oslo.privsep context
        # This line ties the superuser privs with the config files, context name,
        # and (implicitly) the actual python code invoked.
        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*

        # nova/storage/linuxscsi.py: sg_scan device
        sg_scan: CommandFilter, sg_scan, root

        # nova/volume/encryptors/cryptsetup.py:
        # nova/volume/encryptors/luks.py:
        ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+

        # nova/volume/encryptors.py:
        # nova/virt/libvirt/dmcrypt.py:
        cryptsetup: CommandFilter, cryptsetup, root

        # nova/virt/xenapi/vm_utils.py:
        xenstore-read: CommandFilter, xenstore-read, root

        # nova/virt/libvirt/utils.py:
        rbd: CommandFilter, rbd, root

        # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
        shred: CommandFilter, shred, root

        # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
        cp: CommandFilter, cp, root

        # nova/virt/xenapi/vm_utils.py:
        sync: CommandFilter, sync, root

        # nova/virt/libvirt/imagebackend.py:
        ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
        prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*

        # nova/virt/libvirt/utils.py: 'xend', 'status'
        xend: CommandFilter, xend, root

        # nova/virt/libvirt/utils.py:
        touch: CommandFilter, touch, root

        # nova/virt/libvirt/volume/vzstorage.py
        pstorage-mount: CommandFilter, pstorage-mount, root
      pods:
      - compute
    network:
      content: |
        # nova-rootwrap command filters for network nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
        # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
        # nova/network/linux_net.py: 'ivs-ctl', ....
        ivs-ctl: CommandFilter, ivs-ctl, root

        # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
        ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root

        # nova/network/linux_net.py: 'ebtables', '-D' ...
        # nova/network/linux_net.py: 'ebtables', '-I' ...
        ebtables: CommandFilter, ebtables, root
        ebtables_usr: CommandFilter, ebtables, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/network/linux_net.py: 'sysctl', ....
        sysctl: CommandFilter, sysctl, root

        # nova/network/linux_net.py: 'conntrack'
        conntrack: CommandFilter, conntrack, root

        # nova/network/linux_net.py: 'fp-vdev'
        fp-vdev: CommandFilter, fp-vdev, root
      pods:
      - compute
  security: |
    #
    # Disable access to the entire file system except for the directories that
    # are explicitly allowed later.
    #
    # This currently breaks the configurations that come with some web application
    # Debian packages.
    #
    #<Directory />
    #   AllowOverride None
    #   Require all denied
    #</Directory>

    # Changing the following options will not really affect the security of the
    # server, but might make attacks slightly more difficult in some cases.

    #
    # ServerTokens
    # This directive configures what you return as the Server HTTP response
    # Header. The default is 'Full' which sends information about the OS-Type
    # and compiled in modules.
    # Set to one of:  Full | OS | Minimal | Minor | Major | Prod
    # where Full conveys the most information, and Prod the least.
    ServerTokens Prod

    #
    # Optionally add a line containing the server version and virtual host
    # name to server-generated pages (internal error documents, FTP directory
    # listings, mod_status and mod_info output etc., but not CGI generated
    # documents or custom error documents).
    # Set to "EMail" to also include a mailto: link to the ServerAdmin.
    # Set to one of:  On | Off | EMail
    ServerSignature Off

    #
    # Allow TRACE method
    #
    # Set to "extended" to also reflect the request body (only for testing and
    # diagnostic purposes).
    #
    # Set to one of:  On | Off | extended
    TraceEnable Off

    #
    # Forbid access to version control directories
    #
    # If you use version control systems in your document root, you should
    # probably deny access to their directories. For example, for subversion:
    #
    #<DirectoryMatch "/\.svn">
    #   Require all denied
    #</DirectoryMatch>

    #
    # Setting this header will prevent MSIE from interpreting files as something
    # else than declared by the content type in the HTTP headers.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Content-Type-Options: "nosniff"

    #
    # Setting this header will prevent other sites from embedding pages from this
    # site as frames. This defends against clickjacking attacks.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Frame-Options: "sameorigin"
  software:
    apache2:
      a2dismod: null
      a2enmod: null
      binary: apache2
      conf_dir: /etc/apache2/conf-enabled
      mods_dir: /etc/apache2/mods-available
      site_dir: /etc/apache2/sites-enable
      start_parameters: -DFOREGROUND
console:
  address_search_enabled: true
  console_kind: novnc
  novnc:
    compute:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
    vncproxy:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
  serial: null
  spice:
    compute:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
    proxy:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
dependencies:
  dynamic:
    common:
      local_image_registry:
        jobs:
        - nova-image-repo-sync
        services:
        - endpoint: node
          service: local_image_registry
    targeted:
      linuxbridge:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-lb-agent
            requireSameNode: true
      openvswitch:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-ovs-agent
            requireSameNode: true
      ovn:
        compute:
          pod:
          - labels:
              application: ovn
              component: ovn-controller
            requireSameNode: true
      sriov:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-sriov-agent
            requireSameNode: true
  static:
    api:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    api_metadata:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    archive_deleted_rows:
      jobs:
      - nova-db-init
      - nova-db-sync
    bootstrap:
      services:
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    cell_setup:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: nova
          component: compute
        requireSameNode: false
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    compute:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: libvirt
          component: libvirt
        requireSameNode: true
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
    compute_ironic:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: baremetal
    conductor:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    db_drop:
      services:
      - endpoint: internal
        service: oslo_db
    db_init:
      services:
      - endpoint: internal
        service: oslo_db
    db_sync:
      jobs:
      - nova-db-init
      services:
      - endpoint: internal
        service: oslo_db
    image_repo_sync:
      services:
      - endpoint: internal
        service: local_image_registry
    ks_endpoints:
      jobs:
      - nova-ks-service
      services:
      - endpoint: internal
        service: identity
    ks_service:
      services:
      - endpoint: internal
        service: identity
    ks_user:
      services:
      - endpoint: internal
        service: identity
    novncproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    rabbit_init:
      services:
      - endpoint: internal
        service: oslo_messaging
    scheduler:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    service_cleaner:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    spiceproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    tests:
      services:
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
endpoints:
  baremetal:
    host_fqdn_override:
      default: null
      public:
        host: baremetal.199-204-45-205.nip.io
    hosts:
      default: ironic-api
      public: ironic
    name: ironic
    path:
      default: null
    port:
      api:
        default: 6385
        public: 443
    scheme:
      default: http
      public: https
  cluster_domain_suffix: cluster.local
  compute:
    host_fqdn_override:
      default: null
      public:
        host: compute.199-204-45-205.nip.io
    hosts:
      default: nova-api
      public: nova
    name: nova
    path:
      default: /v2.1
    port:
      api:
        default: 8774
        public: 443
        service: 8774
      novncproxy:
        default: 6080
    scheme:
      default: http
      public: https
      service: http
  compute_metadata:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-metadata
      public: nova-metadata
    ip:
      ingress: null
    name: nova
    path:
      default: /
    port:
      metadata:
        default: 8775
        public: 8775
    scheme:
      default: http
    secret: YSxSVakrlqSoULOyx5JwKcf5VS4O1l2Q
  compute_novnc_proxy:
    host_fqdn_override:
      default: null
      public:
        host: vnc.199-204-45-205.nip.io
    hosts:
      default: nova-novncproxy
      public: novncproxy
    name: nova
    path:
      default: /vnc_lite.html
    port:
      novnc_proxy:
        default: 6080
        public: 443
    scheme:
      default: http
      public: https
  compute_novnc_vencrypt:
    host_fqdn_override:
      default:
        commonName: nova-novncproxy
        tls:
          commonName: nova-novncproxy
          issuerRef:
            kind: Issuer
            name: libvirt-vnc
          secretName: nova-novncproxy-vencrypt
          usages:
          - client auth
        usages:
        - client auth
    hosts:
      default: nova-novncproxy
  compute_spice_proxy:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-spiceproxy
      public: spiceproxy
    name: nova
    path:
      default: /spice_auto.html
    port:
      spice_proxy:
        default: 6082
        public: 80
    scheme:
      default: http
  fluentd:
    host_fqdn_override:
      default: null
    hosts:
      default: fluentd-logging
    name: fluentd
    namespace: null
    path:
      default: null
    port:
      metrics:
        default: 24220
      service:
        default: 24224
    scheme: http
  identity:
    auth:
      admin:
        password: wSrEtxwmgaKmfT5D7XK23gPbMsBdmiRe
        project_domain_name: default
        project_name: admin
        region_name: RegionOne
        user_domain_name: default
        username: admin-RegionOne
      cinder:
        password: e9N4XQDXDfgHebIUovcwgaeoXBNiBqss
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: cinder-RegionOne
      glance:
        password: YbeXqZQx93gfRepWrufZdZN0KxHJ4zLJ
        region_name: RegionOne
        username: glance-RegionOne
      ironic:
        auth_type: password
        auth_version: v3
        password: fvluy860dcGjKzEdgoucJD9iezynsplX
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: ironic-RegionOne
      neutron:
        password: 0HjgZaRGHOxsdcXZ4VdybvRsakC2dUvb
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: neutron-RegionOne
      nova:
        password: dUAFCIXJYDC7MuX0kvxVketfzVypQGYy
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: nova-RegionOne
      placement:
        password: dq138xC2A6Emn3AibiD89rlurXh6VBpp
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: placement-RegionOne
      test:
        password: password
        project_domain_name: service
        project_name: test
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: nova-test
    host_fqdn_override:
      default: null
      public:
        host: identity.199-204-45-205.nip.io
    hosts:
      default: keystone-api
      internal: keystone-api
    name: keystone
    path:
      default: /
    port:
      api:
        default: 5000
        internal: 5000
        public: 443
    scheme:
      default: http
      public: https
  image:
    host_fqdn_override:
      default: null
      public:
        host: image.199-204-45-205.nip.io
    hosts:
      default: glance-api
      public: glance
    name: glance
    path:
      default: null
    port:
      api:
        default: 9292
        public: 443
    scheme:
      default: http
      public: https
  ingress:
    hosts:
      default: ingress
    name: ingress
    namespace: null
    port:
      ingress:
        default: 80
  kube_dns:
    host_fqdn_override:
      default: null
    hosts:
      default: kube-dns
    name: kubernetes-dns
    namespace: kube-system
    path:
      default: null
    port:
      dns:
        default: 53
        protocol: UDP
    scheme: http
  local_image_registry:
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
      internal: docker-registry
      node: localhost
    name: docker-registry
    namespace: docker-registry
    port:
      registry:
        node: 5000
  network:
    host_fqdn_override:
      default: null
      public:
        host: network.199-204-45-205.nip.io
    hosts:
      default: neutron-server
      public: neutron
    name: neutron
    path:
      default: null
    port:
      api:
        default: 9696
        public: 443
    scheme:
      default: http
      public: https
  oci_image_registry:
    auth:
      enabled: false
      nova:
        password: password
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
    name: oci-image-registry
    namespace: oci-image-registry
    port:
      registry:
        default: null
  oslo_cache:
    auth:
      memcache_secret_key: 3bpkMbUoAhnhTuJzonC5LqOYSiA2qbcO
    host_fqdn_override:
      default: null
    hosts:
      default: memcached
    port:
      memcache:
        default: 11211
  oslo_db:
    auth:
      admin:
        password: IaWdI5GZKXNwYoRVRSfSXuMWAjBbuh4r
        secret:
          tls:
            internal: mariadb-tls-direct
        username: root
      cinder:
        password: VSChIp1UsOf9dHsc02fmQY4zxLHUx1uR
      glance:
        password: GfRgdU73eqqrb4HIy98QKWBBNJZLxpOa
      ironic:
        password: MbUfefcQs8xbI4RrGXLgLkkDl01ZykoD
      keystone:
        password: PyrjTyhH9OlE59GAnVZPPsXEfNJkjQBC
      neutron:
        password: 3TVPzYMXG8T8nJYGKDdR1YglGDpxQJmD
      nova:
        password: ocDjohhCvGks3YHGVgd1JWNFjwNQXJqc
        username: nova
      placement:
        password: T1F2rp5vfggXQzaszSliekJyBgR8PUrR
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_api:
    auth:
      admin:
        password: IaWdI5GZKXNwYoRVRSfSXuMWAjBbuh4r
        username: root
      nova:
        password: ocDjohhCvGks3YHGVgd1JWNFjwNQXJqc
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_api
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_cell0:
    auth:
      admin:
        password: IaWdI5GZKXNwYoRVRSfSXuMWAjBbuh4r
        username: root
      nova:
        password: ocDjohhCvGks3YHGVgd1JWNFjwNQXJqc
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_cell0
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_messaging:
    auth:
      admin:
        password: 5IPJ04_YT6TKLzT37S2K2AHo3vCbg3Mv
        secret:
          tls:
            internal: rabbitmq-tls-direct
        username: default_user_6DvnjsZiSfczb9VTtu_
      cinder:
        password: OUrnNeD6xpGuGivPxSTnMyrnFRttACe0
      glance:
        password: Ob33AB6anda9WnO2WJBFdVqFy9yT41YR
      ironic:
        password: 1uaNCLM7niEAFu4PKvBCJwcJzFkmASps
      keystone:
        password: PbNGvGvFIPEtG06GhhN3qNxalGaHc3Ak
      neutron:
        password: Jlj7uFCXLta1bqHdVIbr0tV8pEfqnWgo
      nova:
        password: LBBecAQMJXDWZE68D3R7eVDCtWnZM4sV
        username: nova
      user:
        password: 5IPJ04_YT6TKLzT37S2K2AHo3vCbg3Mv
        username: default_user_6DvnjsZiSfczb9VTtu_
    host_fqdn_override:
      default: null
    hosts:
      default: rabbitmq-nova
    path: /nova
    port:
      amqp:
        default: 5672
      http:
        default: 15672
    scheme: rabbit
  placement:
    host_fqdn_override:
      default: null
      public:
        host: placement.199-204-45-205.nip.io
    hosts:
      default: placement-api
      public: placement
    name: placement
    path:
      default: /
    port:
      api:
        default: 8778
        public: 443
        service: 8778
    scheme:
      default: http
      public: https
      service: http
  volumev3:
    host_fqdn_override:
      default: null
      public:
        host: volume.199-204-45-205.nip.io
    hosts:
      default: cinder-api
      public: cinder
    name: cinderv3
    path:
      default: /v3/%(tenant_id)s
      healthcheck: /healthcheck
    port:
      api:
        default: 8776
        public: 443
    scheme:
      default: http
      public: https
health_probe:
  logging:
    level: ERROR
helm-toolkit:
  global: {}
helm3_hook: true
images:
  local_registry:
    active: false
    exclude:
    - dep_check
    - image_repo_sync
  pull_policy: IfNotPresent
  tags:
    bootstrap: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    db_drop: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    db_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    dep_check: harbor.atmosphere.dev/ghcr.io/vexxhost/kubernetes-entrypoint:edge
    image_repo_sync: docker.io/docker:17.07.0
    ks_endpoints: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    ks_service: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    ks_user: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    nova_api: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_archive_deleted_rows: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_cell_setup: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_cell_setup_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    nova_compute: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_compute_ironic: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_compute_ssh: harbor.atmosphere.dev/ghcr.io/vexxhost/nova-ssh:main@sha256:b5063b69ce4c8cbccc92c911c10849fe9481c8d0bdb6db0c2372a149144e534c
    nova_conductor: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_db_sync: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_novncproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_novncproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_scheduler: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_service_cleaner: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    nova_spiceproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_spiceproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:4d20ff43a98cfa92485d805f8724266e75c27ed46a4c955681c11dfb94a9995e
    nova_storage_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:3be2b3d1ab07714491f915307416d288783e484669a4b58a8fe3b7412b97044c
    nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
    rabbit_init: harbor.atmosphere.dev/docker.io/library/rabbitmq:4.1.4-management
    test: docker.io/xrally/xrally-openstack:2.0.0
jobs:
  archive_deleted_rows:
    cron: 0 */1 * * *
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  cell_setup:
    cron: 0 */1 * * *
    extended_wait:
      duration: 5
      enabled: false
      iteration: 3
    extra_command: null
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  service_cleaner:
    cron: 0 */1 * * *
    extra_command: null
    history:
      failed: 1
      success: 3
    sleep_time: 60
    starting_deadline: 600
labels:
  agent:
    compute:
      node_selector_key: openstack-compute-node
      node_selector_value: enabled
    compute_ironic:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
  api_metadata:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  conductor:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  job:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  novncproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  osapi:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  scheduler:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  spiceproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  test:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
manifests:
  certificates: false
  configmap_bin: true
  configmap_etc: true
  cron_job_archive_deleted_rows: false
  cron_job_cell_setup: true
  cron_job_service_cleaner: true
  daemonset_compute: true
  deployment_api_metadata: true
  deployment_api_osapi: true
  deployment_conductor: true
  deployment_consoleauth: false
  deployment_novncproxy: true
  deployment_placement: false
  deployment_scheduler: true
  deployment_spiceproxy: true
  ingress_metadata: false
  ingress_novncproxy: false
  ingress_osapi: false
  ingress_placement: false
  ingress_spiceproxy: false
  job_bootstrap: true
  job_cell_setup: true
  job_db_drop: false
  job_db_init: true
  job_db_init_placement: false
  job_db_sync: true
  job_image_repo_sync: true
  job_ks_endpoints: true
  job_ks_placement_endpoints: false
  job_ks_placement_service: false
  job_ks_placement_user: false
  job_ks_service: true
  job_ks_user: true
  job_rabbit_init: true
  job_storage_init: false
  network_policy: false
  pdb_metadata: true
  pdb_osapi: true
  pod_rally_test: true
  secret_db: true
  secret_db_api: true
  secret_db_cell0: true
  secret_ingress_tls: true
  secret_keystone: true
  secret_keystone_placement: false
  secret_rabbitmq: true
  secret_registry: true
  service_ingress_metadata: false
  service_ingress_novncproxy: false
  service_ingress_osapi: false
  service_ingress_placement: false
  service_ingress_spiceproxy: false
  service_metadata: true
  service_novncproxy: true
  service_osapi: true
  service_placement: false
  service_spiceproxy: true
  statefulset_compute_ironic: false
network:
  backend:
  - ovn
  metadata:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30775
    port: 8775
  novncproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30680
  osapi:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30774
    port: 8774
  spiceproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30682
  ssh:
    enabled: true
    from_subnet: 0.0.0.0/0
    key_types:
    - rsa
    - dsa
    - ecdsa
    - ed25519
    port: 8022
    private_key: |+
      -----BEGIN OPENSSH PRIVATE KEY-----
      b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
      NhAAAAAwEAAQAAAgEA6y25jZCpZZA4yx4GEQqCvZoRynk4fsquu03SZvm0sc86UUXIZKAA
      SzqFuTIoiYRmROVXYgWGjDJoWT41G2+/0FSSVsXW8Sib/Kqb0uwOJXfijGPjTNQQrcyC7u
      NOy89vmY7BEvQqZaYS5iTLk9Bdr26xyLZ5KKXmMOxJ4MQkuSWlN/UgOS4emC2klVuRLKiU
      9ng5WFU+LU8CMiKS4QZKGvyEPaabCrvAkQ8EhRuzg+xZlRioFkwaAzF3nVVMaRjRV5vZkk
      g3jRBWfF24wS1VfVhEuy/LUWFu5ED9K7ChMFNDwO9ZOyAJnJKlk3UnzS/slnVHwVPOseNa
      B+AtEBW25druQUY7ZbJmUn34kRyeu/OX2HLI+lq1AzlS8Rl6NDWMRaPeqCEipRla/kEi1j
      kR7rcl1Z+IgFRpe935JSWlgS4wLCH03TCcZoTbZLNs134sFTCuZBUUCnN7kv0zqWdBsRGb
      nu1S2f1IIvM9l9cAkU2EJSJF/AHQ8k/oEGymR3AcG6+Gcr6HqOnm0XX8u7PQYueDxMOJpb
      EDkOuIXV/OEk+5e8o8Nvp7q0lTDr9XtYzD8xGpubZYkYLm58nsPoCE/Uct0kNlDlfpD5ZP
      FT/BJLNw/YtGKQOSl7vNNbX7ajGqhLZFXIDAu2Yqw2WE4+G3ZJMmLd3rIVVp/7r9Y/Lqy9
      8AAAc4gikEQIIpBEAAAAAHc3NoLXJzYQAAAgEA6y25jZCpZZA4yx4GEQqCvZoRynk4fsqu
      u03SZvm0sc86UUXIZKAASzqFuTIoiYRmROVXYgWGjDJoWT41G2+/0FSSVsXW8Sib/Kqb0u
      wOJXfijGPjTNQQrcyC7uNOy89vmY7BEvQqZaYS5iTLk9Bdr26xyLZ5KKXmMOxJ4MQkuSWl
      N/UgOS4emC2klVuRLKiU9ng5WFU+LU8CMiKS4QZKGvyEPaabCrvAkQ8EhRuzg+xZlRioFk
      waAzF3nVVMaRjRV5vZkkg3jRBWfF24wS1VfVhEuy/LUWFu5ED9K7ChMFNDwO9ZOyAJnJKl
      k3UnzS/slnVHwVPOseNaB+AtEBW25druQUY7ZbJmUn34kRyeu/OX2HLI+lq1AzlS8Rl6ND
      WMRaPeqCEipRla/kEi1jkR7rcl1Z+IgFRpe935JSWlgS4wLCH03TCcZoTbZLNs134sFTCu
      ZBUUCnN7kv0zqWdBsRGbnu1S2f1IIvM9l9cAkU2EJSJF/AHQ8k/oEGymR3AcG6+Gcr6HqO
      nm0XX8u7PQYueDxMOJpbEDkOuIXV/OEk+5e8o8Nvp7q0lTDr9XtYzD8xGpubZYkYLm58ns
      PoCE/Uct0kNlDlfpD5ZPFT/BJLNw/YtGKQOSl7vNNbX7ajGqhLZFXIDAu2Yqw2WE4+G3ZJ
      MmLd3rIVVp/7r9Y/Lqy98AAAADAQABAAACAFiO9tTqmQdQODqwWxt2qEXKaxGh9Ra1rveE
      Ngsl05ezT6QEEZJmt8WcS3ex/hbOnw4piG62YvSPFMEqTig/O+0ws4xuYmKcZqTN//B8iF
      UuoEY6ZQDsyD0SCmn3eZgAzGb4HRdMj1yInYxBZdGLAIRLvDG3GMeoNRbOG4HCiouXs+/u
      48v0ZHvskBHYEcJN1w46tmlfbBL2sf58C3AmBKjF1uw6PR7dFYXqMEsJY991FESzLLR8wr
      zaF09NLAFVYZtKajMN87EwakjZp/VCY3NYVkVgmYDTHkVgfZVCpWgDqrtPxfugNE5b4HK3
      Bp5bAg2kKzmPHAHbeW8GySFJ8/1AWPKNietv1Hmv5lg5RUR/tamP1SsafzHFueR0ZSvC/w
      Nxdh5aejQL604ChN1eEueIRngbyS8Xlu0lmR8ck/l/2QJEmOtDonK3aHusvdW4bJzLyeB5
      X6XsMZc/amIh0v6NeJaFS/+bQLj84kFkyO1EAehl+Qs3cOdTwJf0bbBwtWg15K3P9QR9pH
      gXZx+gmXmBlM4jGs46kIsa5iMJKZPo2ENB9h2WK+p1IokEQi+gcjVypiM2FPoB8zxT/jgS
      pM0Zo3iovwD13+/mvewikI9c2ym25sRboJ0FWx1f8s+Rx/vi4fPkiYz4UU0rJsJFsbOXS8
      JjhtmAXlveeHbh4x6pAAABAQCLiKoLxlIqpjGPmxLXyznQAZKhsZX56s8qVkadc7Phd+ZJ
      VA2sbOwtBKDCeCO2dJBJPfPzGwov4+WMjDbpp0nV9cObxv9DX9Vey0SkX5EBQDIZejkmFT
      w5DfrNvdFfKubpamFMtHcqcSdUyQLyAz+SzJuH+sIdKDwytj2oWb6Q27+bLuB3MrmN9w2I
      sUm4fYPhqQ4fwNla+yNDsm1Cvolkjw+qvOHFY93OfRyxqnB/2RBikmUAa6UuOxnnGorBsR
      iJJUkWnbE5YzkcZGPHNzbiLbg/H7r9M6Rdsrlbi8eRjiOsmTjmE1+G6FuQ3KSTsS1IUvQV
      mF+3uR5Li79fQX0eAAABAQD0/TdDm5CPbmpoJ5WwI7aTd+3DnzN6BJ2fBP7ETCyh7obHa2
      72memr4t6ryi/bn2587n7JXJwq9TjfsN4WqcKzUY0gtLDj6xefiKgr2cTSpXygy2SO/bsP
      ntjsCKYjl52Fo7pdF/4BLfDaE1iUKzTs1vSA5WaGvXsNHqh4nuOc9rdMyp1jc0n7bzJnA1
      4IhZ7lPT2JtKleEclwTgpgRJqVYxCwA3JhnKFBF5MoEdW4ozMeHQKoo7n8c0wXmzb5Suwy
      jZaGjcNtbVdoesyai4Jd2pZZ4CRa9PT2/sOrmPc3+fGmxinqiUdn9sYMKCPGvtf+fThn0e
      h6mjrhcUf6ujf3AAABAQD1v6GyCfKRi2k28YcDIjDVyIdD/kGD+UMWCWW1Oc/pRY6EIq+m
      cEl20oiTDMUE4avpwpJ8eDhS3VTYfT9GwqCXQZ+oRVh6tkX94VFoBL1oiTvTKrOZJvYnj7
      3BNrKpLAZ2CEXDbCZIGtYbGjJTNFwlGau475WgS2gmgSXigwrRHqEntyR7kM415MyIGM1b
      cmllimDdsi3uUycP/tavd/zduYJcyggJbtl5Rs3EFX0hKEbdNwBG8JDaGvmWyks1/ngQKi
      OzQmv7gH0EhNxwxm6XuQ4ByOe3zCtbDantNm9BaoQVLY+u1/nqO0Ggljrv8o44QAZXUB9+
      o+MRvI9MHaFZAAAAAAEC
      -----END OPENSSH PRIVATE KEY-----

    public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDrLbmNkKllkDjLHgYRCoK9mhHKeTh+yq67TdJm+bSxzzpRRchkoABLOoW5MiiJhGZE5VdiBYaMMmhZPjUbb7/QVJJWxdbxKJv8qpvS7A4ld+KMY+NM1BCtzILu407Lz2+ZjsES9CplphLmJMuT0F2vbrHItnkopeYw7EngxCS5JaU39SA5Lh6YLaSVW5EsqJT2eDlYVT4tTwIyIpLhBkoa/IQ9ppsKu8CRDwSFG7OD7FmVGKgWTBoDMXedVUxpGNFXm9mSSDeNEFZ8XbjBLVV9WES7L8tRYW7kQP0rsKEwU0PA71k7IAmckqWTdSfNL+yWdUfBU86x41oH4C0QFbbl2u5BRjtlsmZSffiRHJ6785fYcsj6WrUDOVLxGXo0NYxFo96oISKlGVr+QSLWORHutyXVn4iAVGl73fklJaWBLjAsIfTdMJxmhNtks2zXfiwVMK5kFRQKc3uS/TOpZ0GxEZue7VLZ/Ugi8z2X1wCRTYQlIkX8AdDyT+gQbKZHcBwbr4Zyvoeo6ebRdfy7s9Bi54PEw4mlsQOQ64hdX84ST7l7yjw2+nurSVMOv1e1jMPzEam5tliRgubnyew+gIT9Ry3SQ2UOV+kPlk8VP8Eks3D9i0YpA5KXu801tftqMaqEtkVcgMC7ZirDZYTj4bdkkyYt3eshVWn/uv1j8urL3w==
network_policy:
  nova:
    egress:
    - {}
    ingress:
    - {}
pod:
  affinity:
    anti:
      topologyKey:
        default: kubernetes.io/hostname
      type:
        default: preferredDuringSchedulingIgnoredDuringExecution
      weight:
        default: 10
  labels:
    include_app_kubernetes_io: false
  lifecycle:
    disruption_budget:
      metadata:
        min_available: 0
      osapi:
        min_available: 0
    termination_grace_period:
      metadata:
        timeout: 30
      osapi:
        timeout: 30
    upgrades:
      daemonsets:
        compute:
          enabled: true
          max_unavailable: 1
          min_ready_seconds: 0
        pod_replacement_strategy: RollingUpdate
      deployments:
        pod_replacement_strategy: RollingUpdate
        revision_history: 3
        rolling_update:
          max_surge: 3
          max_unavailable: 1
  mounts:
    nova_api_metadata:
      init_container: null
      nova_api_metadata:
        volumeMounts: null
        volumes: null
    nova_api_osapi:
      init_container: null
      nova_api_osapi:
        volumeMounts: null
        volumes: null
    nova_bootstrap:
      init_container: null
      nova_bootstrap:
        volumeMounts: null
        volumes: null
    nova_compute:
      init_container: null
      nova_compute:
        volumeMounts: null
        volumes: null
    nova_compute_ironic:
      init_container: null
      nova_compute_ironic:
        volumeMounts: null
        volumes: null
    nova_conductor:
      init_container: null
      nova_conductor:
        volumeMounts: null
        volumes: null
    nova_db_sync:
      nova_db_sync:
        volumeMounts: null
        volumes: null
    nova_novncproxy:
      init_novncproxy: null
      nova_novncproxy:
        volumeMounts: null
        volumes: null
    nova_scheduler:
      init_container: null
      nova_scheduler:
        volumeMounts: null
        volumes: null
    nova_spiceproxy:
      init_spiceproxy: null
      nova_spiceproxy:
        volumeMounts: null
        volumes: null
    nova_tests:
      init_container: null
      nova_tests:
        volumeMounts: null
        volumes: null
  probes:
    api-metadata:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    api-osapi:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    compute:
      default:
        liveness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        startup:
          enabled: true
          params:
            failureThreshold: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 70
    compute-spice-proxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    conductor:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
    novncproxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    rpc_retries: 2
    rpc_timeout: 60
    scheduler:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
  replicas:
    api_metadata: 1
    compute_ironic: 1
    conductor: 1
    novncproxy: 1
    osapi: 1
    scheduler: 1
    spiceproxy: 1
  resources:
    api:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    api_metadata:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute_ironic:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    conductor:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    enabled: false
    jobs:
      archive_deleted_rows:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      bootstrap:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      cell_setup:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_drop:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      image_repo_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_endpoints:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_service:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_user:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      rabbit_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      service_cleaner:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      storage_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      tests:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
    novncproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    scheduler:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    spiceproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    ssh:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
  security_context:
    archive_deleted_rows:
      container:
        nova_archive_deleted_rows:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_archive_deleted_rows_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    bootstrap:
      container:
        bootstrap:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova:
      container:
        ceph_perms:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_api:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_api_metadata_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute:
          privileged: true
          readOnlyRootFilesystem: true
        nova_compute_init:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_compute_spice_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute_ssh:
          privileged: true
          runAsUser: 0
        nova_compute_ssh_init:
          runAsUser: 0
        nova_compute_vnc_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_conductor:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init_assests:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_osapi:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_scheduler:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init_assets:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        tungstenfabric_compute_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova_cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_cell_setup_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    service_cleaner:
      container:
        nova_service_cleaner:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
  tolerations:
    nova:
      enabled: false
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/control-plane
        operator: Exists
  use_fqdn:
    compute: true
  useHostNetwork:
    novncproxy: false
rbd_pool:
  app_name: nova-vms
  chunk_size: 8
  crush_rule: replicated_rule
  replication: 3
release_group: null
secrets:
  identity:
    admin: nova-keystone-admin
    nova: nova-keystone-user
    test: nova-keystone-test
  oci_image_registry:
    nova: nova-oci-image-registry
  oslo_db:
    admin: nova-db-admin
    nova: nova-db-user
  oslo_db_api:
    admin: nova-db-api-admin
    nova: nova-db-api-user
  oslo_db_cell0:
    admin: nova-db-cell0-admin
    nova: nova-db-cell0-user
  oslo_messaging:
    admin: nova-rabbitmq-admin
    nova: nova-rabbitmq-user
  tls:
    compute:
      osapi:
        internal: nova-tls-api
        public: nova-tls-public
    compute_metadata:
      metadata:
        internal: metadata-tls-metadata
        public: metadata-tls-public
    compute_novnc_proxy:
      novncproxy:
        internal: nova-novncproxy-tls-proxy
        public: nova-novncproxy-tls-public
      vencrypt:
        internal: nova-novncproxy-vencrypt
    compute_spice_proxy:
      spiceproxy:
        internal: nova-spiceproxy-tls-proxy
        public: nova-spiceproxy-tls-public
tls:
  identity: false
  oslo_db: false
  oslo_messaging: false
