COMPUTED VALUES:
bootstrap:
  enabled: true
  ks_user: admin
  script: null
  structured:
    flavors:
      enabled: false
      options:
        m1_large:
          disk: 80
          name: m1.large
          ram: 8192
          vcpus: 4
        m1_medium:
          disk: 40
          name: m1.medium
          ram: 4096
          vcpus: 2
        m1_small:
          disk: 20
          name: m1.small
          ram: 2048
          vcpus: 1
        m1_tiny:
          disk: 1
          name: m1.tiny
          ram: 512
          vcpus: 1
        m1_xlarge:
          disk: 160
          name: m1.xlarge
          ram: 16384
          vcpus: 8
  wait_for_computes:
    enabled: false
    remaining_wait: 300
    scripts:
      init_script: |
        # This runs in a bootstrap init container. It counts the number of compute nodes.
        COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
        /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
      wait_script: |
        # This script runs in the main bootstrap container just before the
        # bootstrap.script is called.
        COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
        if [[ $COMPUTE_HOSTS == 0 ]]; then
          echo "There are no compute hosts found!"
          exit 1
        fi

        # Wait for all hypervisors to come up before moving on with the deployment
        HYPERVISOR_WAIT=true
        WAIT_AFTER_READY=0
        SLEEP=5
        while [[ $HYPERVISOR_WAIT == true ]]; do
          # Its possible that openstack command may fail due to not being able to
          # reach the compute service
          set +e
          HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
          set -e

          PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
          if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
            echo "Hypervisor ready percentage is $PERCENT_READY"
            if [[ $PERCENT_READY == 100 ]]; then
              HYPERVISOR_WAIT=false
              echo "All hypervisors are ready."
            elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
              HYPERVISOR_WAIT=false
              echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
            else
              sleep $SLEEP
              WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
            fi
          else
            echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
            sleep $SLEEP
          fi
        done
    wait_percentage: 70
ceph_client:
  configmap: ceph-etc
  user_secret_name: pvc-ceph-client-key
conf:
  api_audit_map:
    DEFAULT:
      target_endpoint_type: None
    custom_actions:
      delete: delete
      disable: disable
      enable: enable
      os-migrations/get: read
      os-server-password/post: update
      reboot: start/reboot
      shutdown: stop/shutdown
      startup: start/startup
    path_keywords:
      action: None
      add: None
      configure-project: None
      defaults: None
      delete: None
      detail: None
      diagnostics: None
      disable: None
      enable: None
      entries: entry
      extensions: alias
      flavors: flavor
      images: image
      ips: label
      limits: None
      metadata: key
      os-agents: os-agent
      os-aggregates: os-aggregate
      os-availability-zone: None
      os-certificates: None
      os-cloudpipe: None
      os-extra_specs: key
      os-fixed-ips: ip
      os-flavor-access: None
      os-floating-ip-dns: domain
      os-floating-ip-pools: None
      os-floating-ips: floating-ip
      os-floating-ips-bulk: host
      os-hosts: host
      os-hypervisors: hypervisor
      os-instance-actions: instance-action
      os-keypairs: keypair
      os-migrations: None
      os-networks: network
      os-quota-sets: tenant
      os-security-group-rules: rule
      os-security-groups: security_group
      os-server-password: None
      os-services: None
      os-simple-tenant-usage: tenant
      os-snapshots: snapshot
      os-virtual-interfaces: None
      os-volume-types: volume-type
      os-volume_attachments: attachment
      os-volumes: volume
      os-volumes_boot: None
      reboot: None
      servers: server
      shutdown: None
      startup: None
      statistics: None
    service_endpoints:
      compute: service/compute
  archive_deleted_rows:
    all_cells: false
    before:
      date: nil
      enabled: false
    max_rows:
      enabled: false
      rows: 1000
    purge_deleted_rows: false
    until_completion: true
  ceph:
    admin_keyring: null
    cinder:
      keyring: null
      secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      user: cinder
    enabled: true
  enable_iscsi: false
  hypervisor:
    address_search_enabled: true
    host_interface: null
    host_network_cidr: 0/0
  libvirt:
    address_search_enabled: true
    live_migration_interface: null
    live_migration_network_cidr: 0/0
  logging:
    formatter_context:
      class: oslo_log.formatters.ContextFormatter
      datefmt: '%Y-%m-%d %H:%M:%S'
    formatter_default:
      datefmt: '%Y-%m-%d %H:%M:%S'
      format: '%(message)s'
    formatters:
      keys:
      - context
      - default
    handler_null:
      args: ()
      class: logging.NullHandler
      formatter: default
    handler_stderr:
      args: (sys.stderr,)
      class: StreamHandler
      formatter: context
    handler_stdout:
      args: (sys.stdout,)
      class: StreamHandler
      formatter: context
    handlers:
      keys:
      - stdout
      - stderr
      - "null"
    logger_amqp:
      handlers: stderr
      level: WARNING
      qualname: amqp
    logger_amqplib:
      handlers: stderr
      level: WARNING
      qualname: amqplib
    logger_boto:
      handlers: stderr
      level: WARNING
      qualname: boto
    logger_eventletwsgi:
      handlers: stderr
      level: WARNING
      qualname: eventlet.wsgi.server
    logger_nova:
      handlers:
      - stdout
      level: INFO
      qualname: nova
    logger_os.brick:
      handlers:
      - stdout
      level: INFO
      qualname: os.brick
    logger_root:
      handlers: "null"
      level: WARNING
    logger_sqlalchemy:
      handlers: stderr
      level: WARNING
      qualname: sqlalchemy
    loggers:
      keys:
      - root
      - nova
      - os.brick
  nova:
    DEFAULT:
      allow_resize_to_same_host: true
      compute_driver: libvirt.LibvirtDriver
      cpu_allocation_ratio: 4.5
      default_ephemeral_format: ext4
      disk_allocation_ratio: 3
      instance_usage_audit: true
      instance_usage_audit_period: hour
      metadata_workers: 2
      my_ip: 0.0.0.0
      osapi_compute_listen: 0.0.0.0
      osapi_compute_listen_port: null
      osapi_compute_workers: 2
      ram_allocation_ratio: 0.9
      resume_guests_state_on_host_boot: true
      state_path: /var/lib/nova
    api:
      list_records_by_skipping_down_cells: false
    api_database:
      connection: mysql+pymysql://nova:yyvbTGR7awRIWiWzp0wsDQDDSRTsYFQR@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_api
      max_retries: -1
    barbican:
      barbican_endpoint_type: internal
    cache:
      backend: oslo_cache.memcache_pool
      enabled: true
    cell0_database:
      connection: mysql+pymysql://nova:yyvbTGR7awRIWiWzp0wsDQDDSRTsYFQR@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_cell0
      max_retries: -1
    cinder:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      catalog_info: volumev3::internalURL
      os_region_name: RegionOne
      password: rIV2CcCFi7OVicBeuNjU8NwhQgzkeFo3
      project_domain_name: service
      project_name: service
      user_domain_name: service
      username: cinder-RegionOne
    compute:
      consecutive_build_service_disable_threshold: 0
    conductor:
      workers: 2
    cors:
      allow_headers: X-Auth-Token,X-OpenStack-Nova-API-Version
      allowed_origin: '*'
    database:
      connection_recycle_time: 600
      max_overflow: 50
      max_pool_size: 5
      max_retries: -1
      pool_timeout: 30
    filter_scheduler:
      available_filters:
        type: multistring
        values:
        - nova.scheduler.filters.all_filters
        - nova_scheduler_filters.failure_domain_filter.FailureDomainFilter
      enabled_filters: ComputeFilter, AggregateTypeAffinityFilter, ComputeCapabilitiesFilter,
        PciPassthroughFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter,
        ServerGroupAffinityFilter, FailureDomainFilter
      image_properties_default_architecture: x86_64
      max_instances_per_host: 200
    glance:
      enable_rbd_download: true
      num_retries: 3
    ironic:
      api_endpoint: http://ironic-api.openstack.svc.cluster.local:6385
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: VPQG7mWkMqzuwXdRJ6DCbtP0VD6HlbGZ
      memcache_servers: memcached.openstack.svc.cluster.local:11211
      password: IEA3NqFnEbgR1gxFgD4NN4A6Jq9ngDH1
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: ironic-RegionOne
    keystone_authtoken:
      auth_type: password
      auth_uri: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: VPQG7mWkMqzuwXdRJ6DCbtP0VD6HlbGZ
      memcache_security_strategy: ENCRYPT
      memcached_servers: memcached.openstack.svc.cluster.local:11211
      password: gvWGeh8sgolVJdK0SsUVeMfh8V1lPghh
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      service_token_roles: service
      service_token_roles_required: true
      service_type: compute
      user_domain_name: service
      username: nova-RegionOne
    libvirt:
      connection_uri: qemu+unix:///system?socket=/run/libvirt/libvirt-sock
      disk_cachemodes: network=writeback
      hw_disk_discard: unmap
      images_rbd_ceph_conf: /etc/ceph/ceph.conf
      images_rbd_pool: vms
      images_type: qcow2
      live_migration_scheme: tls
      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      rbd_user: cinder
      swtpm_enabled: true
      swtpm_group: swtpm
      swtpm_user: swtpm
    neutron:
      auth_type: password
      auth_version: v3
      metadata_proxy_shared_secret: oXaw8dGbkoJzOEKvYI5iI6bVTBl7UvMo
      service_metadata_proxy: true
    notifications:
      notify_on_state_change: vm_and_task_state
    os_vif_ovs:
      ovsdb_connection: unix:/run/openvswitch/db.sock
    oslo_concurrency:
      lock_path: /var/lib/nova/tmp
    oslo_messaging_notifications:
      driver: noop
    oslo_messaging_rabbit:
      rabbit_ha_queues: true
    oslo_middleware:
      enable_proxy_headers_parsing: true
    oslo_policy:
      policy_file: /etc/nova/policy.yaml
    placement:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      password: OgclUyPtKivfsOzdXS0SLu8ty0UQ31bj
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: placement-RegionOne
    privsep_osbrick:
      helper_command: sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
        /etc/nova/nova.conf
    scheduler:
      discover_hosts_in_cells_interval: 30
      max_attempts: 3
      workers: 2
    service_user:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      password: gvWGeh8sgolVJdK0SsUVeMfh8V1lPghh
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      send_service_user_token: true
      user_domain_name: service
      username: nova-RegionOne
    spice:
      html5proxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    upgrade_levels:
      compute: auto
    vnc:
      auth_schemes: vencrypt,none
      novncproxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    workarounds:
      skip_cpu_compare_on_dest: true
    wsgi:
      api_paste_config: /etc/nova/api-paste.ini
  nova_api_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      module: nova.wsgi.osapi_compute:application
      need-app: true
      procname-prefix-spaced: 'nova-api:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
  nova_compute_redactions:
  - database
  - api_database
  - cell0_database
  nova_ironic:
    DEFAULT:
      compute_driver: ironic.IronicDriver
      cpu_allocation_ratio: 1
      force_config_drive: true
      ram_allocation_ratio: 1
      reserved_host_memory_mb: 0
      scheduler_host_manager: ironic_host_manager
  nova_metadata_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      module: nova.wsgi.metadata:application
      need-app: true
      procname-prefix-spaced: 'nova-metadata:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
  nova_sudoers: |
    # This sudoers file supports rootwrap for both Kolla and LOCI Images.
    Defaults !requiretty
    Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
    nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
  paste:
    app:metaapp:
      paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
    app:osapi_compute_app_v21:
      paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
    app:oscomputeversionapp:
      paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
    composite:metadata:
      /: meta
      use: egg:Paste#urlmap
    composite:openstack_compute_api_v21:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:openstack_compute_api_v21_legacy_v2_compatible:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        legacy_v2_compatible osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:osapi_compute:
      /: oscomputeversions
      /v2: openstack_compute_api_v21_legacy_v2_compatible
      /v2.1: openstack_compute_api_v21
      use: call:nova.api.openstack.urlmap:urlmap_factory
    filter:audit:
      audit_map_file: /etc/nova/api_audit_map.conf
      paste.filter_factory: keystonemiddleware.audit:filter_factory
    filter:authtoken:
      paste.filter_factory: keystonemiddleware.auth_token:filter_factory
    filter:compute_req_id:
      paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
    filter:cors:
      oslo_config_project: nova
      paste.filter_factory: oslo_middleware.cors:filter_factory
    filter:faultwrap:
      paste.filter_factory: nova.api.openstack:FaultWrapper.factory
    filter:http_proxy_to_wsgi:
      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
    filter:keystonecontext:
      paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
    filter:legacy_v2_compatible:
      paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
    filter:noauth2:
      paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
    filter:request_id:
      paste.filter_factory: oslo_middleware:RequestId.factory
    filter:sizelimit:
      paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
    pipeline:meta:
      pipeline: cors metaapp
    pipeline:oscomputeversions:
      pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
  policy: {}
  rabbitmq:
    policies:
    - apply-to: all
      definition:
        message-ttl: 70000
      name: ha_ttl_nova
      pattern: ^(?!(amq\.|reply_)).*
      priority: 0
      vhost: nova
  rally_tests:
    clean_up: |
      FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$FLAVORS" ]; then
        echo $FLAVORS | xargs openstack flavor delete
      fi
      SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$SERVERS" ]; then
        echo $SERVERS | xargs openstack server delete
      fi
      IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
      if [ -n "$IMAGES" ]; then
        echo $IMAGES | xargs openstack image delete
      fi
    run_tempest: false
    tests:
      NovaAggregates.create_and_get_aggregate_details:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.create_and_update_aggregate:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.list_aggregates:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAvailabilityZones.list_availability_zones:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_delete_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_list_flavor_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_add_tenant_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_set_keys:
      - args:
          disk: 1
          extra_specs:
            quota:disk_read_bytes_sec: 10240
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.list_flavors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_uptime_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_search_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.statistics_hypervisors:
      - args: {}
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_delete_keypair:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_list_keypairs:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServerGroups.create_and_list_server_groups:
      - args:
          all_projects: false
          kwargs:
            policies:
            - affinity
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServices.list_services:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
  rootwrap: |
    # Configuration for nova-rootwrap
    # This file should be owned by (and only-writeable by) the root user

    [DEFAULT]
    # List of directories to load filter definitions from (separated by ',').
    # These directories MUST all be only writeable by root !
    filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap

    # List of directories to search executables in, in case filters do not
    # explicitely specify a full path (separated by ',')
    # If not specified, defaults to system PATH environment variable.
    # These directories MUST all be only writeable by root !
    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin

    # Enable logging to syslog
    # Default value is False
    use_syslog=False

    # Which syslog facility to use.
    # Valid values include auth, authpriv, syslog, local0, local1...
    # Default value is 'syslog'
    syslog_log_facility=syslog

    # Which messages to log.
    # INFO means log all usage
    # ERROR means only log unsuccessful attempts
    syslog_log_level=ERROR
  rootwrap_filters:
    api_metadata:
      content: |
        # nova-rootwrap command filters for api-metadata nodes
        # This is needed on nova-api hosts running with "metadata" in enabled_apis
        # or when running nova-api-metadata
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root
      pods:
      - metadata
    compute:
      content: |
        # nova-rootwrap command filters for compute nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
        # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
        kpartx: CommandFilter, kpartx, root

        # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
        # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
        tune2fs: CommandFilter, tune2fs, root

        # nova/virt/disk/mount/api.py: 'mount', mapped_device
        # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
        # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
        # nova/virt/configdrive.py: 'mount', device, mountdir
        # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
        mount: CommandFilter, mount, root

        # nova/virt/disk/mount/api.py: 'umount', mapped_device
        # nova/virt/disk/api.py: 'umount' target
        # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
        # nova/virt/configdrive.py: 'umount', mountdir
        umount: CommandFilter, umount, root

        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
        qemu-nbd: CommandFilter, qemu-nbd, root

        # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
        # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
        losetup: CommandFilter, losetup, root

        # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
        blkid: CommandFilter, blkid, root

        # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
        # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
        blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*

        # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
        tee: CommandFilter, tee, root

        # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
        mkdir: CommandFilter, mkdir, root

        # nova/virt/disk/vfs/localfs.py: 'chown'
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
        chown: CommandFilter, chown, root

        # nova/virt/disk/vfs/localfs.py: 'chmod'
        chmod: CommandFilter, chmod, root

        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
        # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
        tunctl: CommandFilter, tunctl, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
        vrouter-port-control: CommandFilter, vrouter-port-control, root

        # nova/virt/libvirt/vif.py: 'ebrctl', ...
        ebrctl: CommandFilter, ebrctl, root

        # nova/virt/libvirt/vif.py: 'mm-ctl', ...
        mm-ctl: CommandFilter, mm-ctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
        dd: CommandFilter, dd, root

        # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
        iscsiadm: CommandFilter, iscsiadm, root

        # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
        # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
        aoe-revalidate: CommandFilter, aoe-revalidate, root
        aoe-discover: CommandFilter, aoe-discover, root

        # nova/virt/xenapi/vm_utils.py: parted, --script, ...
        # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
        parted: CommandFilter, parted, root

        # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
        pygrub: CommandFilter, pygrub, root

        # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
        fdisk: CommandFilter, fdisk, root

        # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
        # nova/virt/disk/api.py: e2fsck, -f, -p, image
        e2fsck: CommandFilter, e2fsck, root

        # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
        # nova/virt/disk/api.py: resize2fs, image
        resize2fs: CommandFilter, resize2fs, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/virt/libvirt/utils.py: 'mkswap'
        # nova/virt/xenapi/vm_utils.py: 'mkswap'
        mkswap: CommandFilter, mkswap, root

        # nova/virt/libvirt/utils.py: 'nova-idmapshift'
        nova-idmapshift: CommandFilter, nova-idmapshift, root

        # nova/virt/xenapi/vm_utils.py: 'mkfs'
        # nova/utils.py: 'mkfs', fs, path, label
        mkfs: CommandFilter, mkfs, root

        # nova/virt/libvirt/utils.py: 'qemu-img'
        qemu-img: CommandFilter, qemu-img, root

        # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
        readlink: CommandFilter, readlink, root

        # nova/virt/disk/api.py:
        mkfs.ext3: CommandFilter, mkfs.ext3, root
        mkfs.ext4: CommandFilter, mkfs.ext4, root
        mkfs.ntfs: CommandFilter, mkfs.ntfs, root

        # nova/virt/libvirt/connection.py:
        lvremove: CommandFilter, lvremove, root

        # nova/virt/libvirt/utils.py:
        lvcreate: CommandFilter, lvcreate, root

        # nova/virt/libvirt/utils.py:
        lvs: CommandFilter, lvs, root

        # nova/virt/libvirt/utils.py:
        vgs: CommandFilter, vgs, root

        # nova/utils.py:read_file_as_root: 'cat', file_path
        # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
        read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
        read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow

        # os-brick needed commands
        read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
        multipath: CommandFilter, multipath, root
        # multipathd show status
        multipathd: CommandFilter, multipathd, root
        systool: CommandFilter, systool, root
        vgc-cluster: CommandFilter, vgc-cluster, root
        # os_brick/initiator/connector.py
        drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid

        # TODO(smcginnis) Temporary fix.
        # Need to pull in os-brick os-brick.filters file instead and clean
        # out stale brick values from this file.
        scsi_id: CommandFilter, /lib/udev/scsi_id, root
        # os_brick.privileged.default oslo.privsep context
        # This line ties the superuser privs with the config files, context name,
        # and (implicitly) the actual python code invoked.
        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*

        # nova/storage/linuxscsi.py: sg_scan device
        sg_scan: CommandFilter, sg_scan, root

        # nova/volume/encryptors/cryptsetup.py:
        # nova/volume/encryptors/luks.py:
        ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+

        # nova/volume/encryptors.py:
        # nova/virt/libvirt/dmcrypt.py:
        cryptsetup: CommandFilter, cryptsetup, root

        # nova/virt/xenapi/vm_utils.py:
        xenstore-read: CommandFilter, xenstore-read, root

        # nova/virt/libvirt/utils.py:
        rbd: CommandFilter, rbd, root

        # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
        shred: CommandFilter, shred, root

        # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
        cp: CommandFilter, cp, root

        # nova/virt/xenapi/vm_utils.py:
        sync: CommandFilter, sync, root

        # nova/virt/libvirt/imagebackend.py:
        ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
        prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*

        # nova/virt/libvirt/utils.py: 'xend', 'status'
        xend: CommandFilter, xend, root

        # nova/virt/libvirt/utils.py:
        touch: CommandFilter, touch, root

        # nova/virt/libvirt/volume/vzstorage.py
        pstorage-mount: CommandFilter, pstorage-mount, root
      pods:
      - compute
    network:
      content: |
        # nova-rootwrap command filters for network nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
        # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
        # nova/network/linux_net.py: 'ivs-ctl', ....
        ivs-ctl: CommandFilter, ivs-ctl, root

        # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
        ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root

        # nova/network/linux_net.py: 'ebtables', '-D' ...
        # nova/network/linux_net.py: 'ebtables', '-I' ...
        ebtables: CommandFilter, ebtables, root
        ebtables_usr: CommandFilter, ebtables, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/network/linux_net.py: 'sysctl', ....
        sysctl: CommandFilter, sysctl, root

        # nova/network/linux_net.py: 'conntrack'
        conntrack: CommandFilter, conntrack, root

        # nova/network/linux_net.py: 'fp-vdev'
        fp-vdev: CommandFilter, fp-vdev, root
      pods:
      - compute
  security: |
    #
    # Disable access to the entire file system except for the directories that
    # are explicitly allowed later.
    #
    # This currently breaks the configurations that come with some web application
    # Debian packages.
    #
    #<Directory />
    #   AllowOverride None
    #   Require all denied
    #</Directory>

    # Changing the following options will not really affect the security of the
    # server, but might make attacks slightly more difficult in some cases.

    #
    # ServerTokens
    # This directive configures what you return as the Server HTTP response
    # Header. The default is 'Full' which sends information about the OS-Type
    # and compiled in modules.
    # Set to one of:  Full | OS | Minimal | Minor | Major | Prod
    # where Full conveys the most information, and Prod the least.
    ServerTokens Prod

    #
    # Optionally add a line containing the server version and virtual host
    # name to server-generated pages (internal error documents, FTP directory
    # listings, mod_status and mod_info output etc., but not CGI generated
    # documents or custom error documents).
    # Set to "EMail" to also include a mailto: link to the ServerAdmin.
    # Set to one of:  On | Off | EMail
    ServerSignature Off

    #
    # Allow TRACE method
    #
    # Set to "extended" to also reflect the request body (only for testing and
    # diagnostic purposes).
    #
    # Set to one of:  On | Off | extended
    TraceEnable Off

    #
    # Forbid access to version control directories
    #
    # If you use version control systems in your document root, you should
    # probably deny access to their directories. For example, for subversion:
    #
    #<DirectoryMatch "/\.svn">
    #   Require all denied
    #</DirectoryMatch>

    #
    # Setting this header will prevent MSIE from interpreting files as something
    # else than declared by the content type in the HTTP headers.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Content-Type-Options: "nosniff"

    #
    # Setting this header will prevent other sites from embedding pages from this
    # site as frames. This defends against clickjacking attacks.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Frame-Options: "sameorigin"
  software:
    apache2:
      a2dismod: null
      a2enmod: null
      binary: apache2
      conf_dir: /etc/apache2/conf-enabled
      mods_dir: /etc/apache2/mods-available
      site_dir: /etc/apache2/sites-enable
      start_parameters: -DFOREGROUND
console:
  address_search_enabled: true
  console_kind: novnc
  novnc:
    compute:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
    vncproxy:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
  serial: null
  spice:
    compute:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
    proxy:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
dependencies:
  dynamic:
    common:
      local_image_registry:
        jobs:
        - nova-image-repo-sync
        services:
        - endpoint: node
          service: local_image_registry
    targeted:
      linuxbridge:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-lb-agent
            requireSameNode: true
      openvswitch:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-ovs-agent
            requireSameNode: true
      ovn:
        compute:
          pod:
          - labels:
              application: ovn
              component: ovn-controller
            requireSameNode: true
      sriov:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-sriov-agent
            requireSameNode: true
  static:
    api:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    api_metadata:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    archive_deleted_rows:
      jobs:
      - nova-db-init
      - nova-db-sync
    bootstrap:
      services:
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    cell_setup:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: nova
          component: compute
        requireSameNode: false
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    compute:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: libvirt
          component: libvirt
        requireSameNode: true
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
    compute_ironic:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: baremetal
    conductor:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    db_drop:
      services:
      - endpoint: internal
        service: oslo_db
    db_init:
      services:
      - endpoint: internal
        service: oslo_db
    db_sync:
      jobs:
      - nova-db-init
      services:
      - endpoint: internal
        service: oslo_db
    image_repo_sync:
      services:
      - endpoint: internal
        service: local_image_registry
    ks_endpoints:
      jobs:
      - nova-ks-service
      services:
      - endpoint: internal
        service: identity
    ks_service:
      services:
      - endpoint: internal
        service: identity
    ks_user:
      services:
      - endpoint: internal
        service: identity
    novncproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    rabbit_init:
      services:
      - endpoint: internal
        service: oslo_messaging
    scheduler:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    service_cleaner:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    spiceproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    tests:
      services:
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
endpoints:
  baremetal:
    host_fqdn_override:
      default: null
      public:
        host: baremetal.199-204-45-140.nip.io
    hosts:
      default: ironic-api
      public: ironic
    name: ironic
    path:
      default: null
    port:
      api:
        default: 6385
        public: 443
    scheme:
      default: http
      public: https
  cluster_domain_suffix: cluster.local
  compute:
    host_fqdn_override:
      default: null
      public:
        host: compute.199-204-45-140.nip.io
    hosts:
      default: nova-api
      public: nova
    name: nova
    path:
      default: /v2.1
    port:
      api:
        default: 8774
        public: 443
        service: 8774
      novncproxy:
        default: 6080
    scheme:
      default: http
      public: https
      service: http
  compute_metadata:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-metadata
      public: nova-metadata
    ip:
      ingress: null
    name: nova
    path:
      default: /
    port:
      metadata:
        default: 8775
        public: 8775
    scheme:
      default: http
    secret: oXaw8dGbkoJzOEKvYI5iI6bVTBl7UvMo
  compute_novnc_proxy:
    host_fqdn_override:
      default: null
      public:
        host: vnc.199-204-45-140.nip.io
    hosts:
      default: nova-novncproxy
      public: novncproxy
    name: nova
    path:
      default: /vnc_lite.html
    port:
      novnc_proxy:
        default: 6080
        public: 443
    scheme:
      default: http
      public: https
  compute_novnc_vencrypt:
    host_fqdn_override:
      default:
        commonName: nova-novncproxy
        tls:
          commonName: nova-novncproxy
          issuerRef:
            kind: Issuer
            name: libvirt-vnc
          secretName: nova-novncproxy-vencrypt
          usages:
          - client auth
        usages:
        - client auth
    hosts:
      default: nova-novncproxy
  compute_spice_proxy:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-spiceproxy
      public: spiceproxy
    name: nova
    path:
      default: /spice_auto.html
    port:
      spice_proxy:
        default: 6082
        public: 80
    scheme:
      default: http
  fluentd:
    host_fqdn_override:
      default: null
    hosts:
      default: fluentd-logging
    name: fluentd
    namespace: null
    path:
      default: null
    port:
      metrics:
        default: 24220
      service:
        default: 24224
    scheme: http
  identity:
    auth:
      admin:
        password: UoBJkzwpua30nJTLnyRfzv79U8YESQ1j
        project_domain_name: default
        project_name: admin
        region_name: RegionOne
        user_domain_name: default
        username: admin-RegionOne
      cinder:
        password: rIV2CcCFi7OVicBeuNjU8NwhQgzkeFo3
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: cinder-RegionOne
      glance:
        password: q9TgHb3tVN82dXWO8NNIn48NBSEKo6Pi
        region_name: RegionOne
        username: glance-RegionOne
      ironic:
        auth_type: password
        auth_version: v3
        password: IEA3NqFnEbgR1gxFgD4NN4A6Jq9ngDH1
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: ironic-RegionOne
      neutron:
        password: kSVK7TmvWsHTZR6eGYYrXvJ05rYplKFS
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: neutron-RegionOne
      nova:
        password: gvWGeh8sgolVJdK0SsUVeMfh8V1lPghh
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: nova-RegionOne
      placement:
        password: OgclUyPtKivfsOzdXS0SLu8ty0UQ31bj
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: placement-RegionOne
      test:
        password: password
        project_domain_name: service
        project_name: test
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: nova-test
    host_fqdn_override:
      default: null
      public:
        host: identity.199-204-45-140.nip.io
    hosts:
      default: keystone-api
      internal: keystone-api
    name: keystone
    path:
      default: /
    port:
      api:
        default: 5000
        internal: 5000
        public: 443
    scheme:
      default: http
      public: https
  image:
    host_fqdn_override:
      default: null
      public:
        host: image.199-204-45-140.nip.io
    hosts:
      default: glance-api
      public: glance
    name: glance
    path:
      default: null
    port:
      api:
        default: 9292
        public: 443
    scheme:
      default: http
      public: https
  ingress:
    hosts:
      default: ingress
    name: ingress
    namespace: null
    port:
      ingress:
        default: 80
  kube_dns:
    host_fqdn_override:
      default: null
    hosts:
      default: kube-dns
    name: kubernetes-dns
    namespace: kube-system
    path:
      default: null
    port:
      dns:
        default: 53
        protocol: UDP
    scheme: http
  local_image_registry:
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
      internal: docker-registry
      node: localhost
    name: docker-registry
    namespace: docker-registry
    port:
      registry:
        node: 5000
  network:
    host_fqdn_override:
      default: null
      public:
        host: network.199-204-45-140.nip.io
    hosts:
      default: neutron-server
      public: neutron
    name: neutron
    path:
      default: null
    port:
      api:
        default: 9696
        public: 443
    scheme:
      default: http
      public: https
  oci_image_registry:
    auth:
      enabled: false
      nova:
        password: password
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
    name: oci-image-registry
    namespace: oci-image-registry
    port:
      registry:
        default: null
  oslo_cache:
    auth:
      memcache_secret_key: VPQG7mWkMqzuwXdRJ6DCbtP0VD6HlbGZ
    host_fqdn_override:
      default: null
    hosts:
      default: memcached
    port:
      memcache:
        default: 11211
  oslo_db:
    auth:
      admin:
        password: NPSgutZmHv8qbMHnnwaf94OeeThmOS30
        secret:
          tls:
            internal: mariadb-tls-direct
        username: root
      cinder:
        password: xFTjpEU86O2L5r1fbOONUEh7r9jLZ2GY
      glance:
        password: XJwNm24yQ8WG7L98KMIfac8oBN9czKWg
      ironic:
        password: glC2UZ6PMGb8Q3sTZYsusd25i7nwFsxY
      keystone:
        password: 56JUU5lpvJTUB21UPrP8VolEBMM1XELT
      neutron:
        password: UU63MtHnbr5SPGuqjW2MzbaUHckQZaq4
      nova:
        password: yyvbTGR7awRIWiWzp0wsDQDDSRTsYFQR
        username: nova
      placement:
        password: oF3MNLIBP0LkdOxLUlgSgfNRqxyTQPDc
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_api:
    auth:
      admin:
        password: NPSgutZmHv8qbMHnnwaf94OeeThmOS30
        username: root
      nova:
        password: yyvbTGR7awRIWiWzp0wsDQDDSRTsYFQR
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_api
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_cell0:
    auth:
      admin:
        password: NPSgutZmHv8qbMHnnwaf94OeeThmOS30
        username: root
      nova:
        password: yyvbTGR7awRIWiWzp0wsDQDDSRTsYFQR
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_cell0
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_messaging:
    auth:
      admin:
        password: dN_SZ3SSl7m2riB6-OU-opflgMjVeiU4
        secret:
          tls:
            internal: rabbitmq-tls-direct
        username: default_user_6pfUT12STRC8_Ncmjux
      cinder:
        password: oMacZfZqO1nfUdWcx223tjutIHRYEoPQ
      glance:
        password: r56tD9aPtcio3hQDOktMH4hEbO0Rncjh
      ironic:
        password: QMkRQiSjJl2sTwMGt7Vk71AE2WWLHxL6
      keystone:
        password: 3SfSIWUnhbupcESOY4TkcI7x8QNRGBjK
      neutron:
        password: HxRwXkV1uVI8IzsjRiuTcC9wi9WpmkIE
      nova:
        password: LtYtxCpGzIYzwiQdPECV81koxEOF3Hsa
        username: nova
      user:
        password: dN_SZ3SSl7m2riB6-OU-opflgMjVeiU4
        username: default_user_6pfUT12STRC8_Ncmjux
    host_fqdn_override:
      default: null
    hosts:
      default: rabbitmq-nova
    path: /nova
    port:
      amqp:
        default: 5672
      http:
        default: 15672
    scheme: rabbit
  placement:
    host_fqdn_override:
      default: null
      public:
        host: placement.199-204-45-140.nip.io
    hosts:
      default: placement-api
      public: placement
    name: placement
    path:
      default: /
    port:
      api:
        default: 8778
        public: 443
        service: 8778
    scheme:
      default: http
      public: https
      service: http
  volumev3:
    host_fqdn_override:
      default: null
      public:
        host: volume.199-204-45-140.nip.io
    hosts:
      default: cinder-api
      public: cinder
    name: cinderv3
    path:
      default: /v3/%(tenant_id)s
      healthcheck: /healthcheck
    port:
      api:
        default: 8776
        public: 443
    scheme:
      default: http
      public: https
health_probe:
  logging:
    level: ERROR
helm-toolkit:
  global: {}
helm3_hook: true
images:
  local_registry:
    active: false
    exclude:
    - dep_check
    - image_repo_sync
  pull_policy: IfNotPresent
  tags:
    bootstrap: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    db_drop: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    db_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    dep_check: harbor.atmosphere.dev/ghcr.io/vexxhost/kubernetes-entrypoint:edge@sha256:8921b64b87af184a1421dd856b2703bcf3cff9f50863cd0d18371cf964a87bd3
    image_repo_sync: docker.io/docker:17.07.0
    ks_endpoints: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    ks_service: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    ks_user: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    nova_api: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_archive_deleted_rows: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_cell_setup: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_cell_setup_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    nova_compute: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_compute_ironic: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_compute_ssh: harbor.atmosphere.dev/ghcr.io/vexxhost/nova-ssh:main@sha256:7b8c92ca1d175f1144a71b0ea09c356168ab6a726df44782a927be623dbbd633
    nova_conductor: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_db_sync: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_novncproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_novncproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_scheduler: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_service_cleaner: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    nova_spiceproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_spiceproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:main@sha256:241567ac9f0787068712c84a1c52a3fe20ff236f4c456185b21c010d39c02c60
    nova_storage_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:main@sha256:7811776d391e6d416d04ce4fbf0ff8c4919790dfd1f22f7a39da01a1cf927a00
    nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
    rabbit_init: harbor.atmosphere.dev/docker.io/library/rabbitmq:4.1.4-management
    test: docker.io/xrally/xrally-openstack:2.0.0
jobs:
  archive_deleted_rows:
    cron: 0 */1 * * *
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  cell_setup:
    cron: 0 */1 * * *
    extended_wait:
      duration: 5
      enabled: false
      iteration: 3
    extra_command: null
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  service_cleaner:
    cron: 0 */1 * * *
    extra_command: null
    history:
      failed: 1
      success: 3
    sleep_time: 60
    starting_deadline: 600
labels:
  agent:
    compute:
      node_selector_key: openstack-compute-node
      node_selector_value: enabled
    compute_ironic:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
  api_metadata:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  conductor:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  job:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  novncproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  osapi:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  scheduler:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  spiceproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  test:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
manifests:
  certificates: false
  configmap_bin: true
  configmap_etc: true
  cron_job_archive_deleted_rows: false
  cron_job_cell_setup: true
  cron_job_service_cleaner: true
  daemonset_compute: true
  deployment_api_metadata: true
  deployment_api_osapi: true
  deployment_conductor: true
  deployment_consoleauth: false
  deployment_novncproxy: true
  deployment_placement: false
  deployment_scheduler: true
  deployment_spiceproxy: true
  ingress_metadata: false
  ingress_novncproxy: false
  ingress_osapi: false
  ingress_placement: false
  ingress_spiceproxy: false
  job_bootstrap: true
  job_cell_setup: true
  job_db_drop: false
  job_db_init: true
  job_db_init_placement: false
  job_db_sync: true
  job_image_repo_sync: true
  job_ks_endpoints: true
  job_ks_placement_endpoints: false
  job_ks_placement_service: false
  job_ks_placement_user: false
  job_ks_service: true
  job_ks_user: true
  job_rabbit_init: true
  job_storage_init: false
  network_policy: false
  pdb_metadata: true
  pdb_osapi: true
  pod_rally_test: true
  secret_db: true
  secret_db_api: true
  secret_db_cell0: true
  secret_ingress_tls: true
  secret_keystone: true
  secret_keystone_placement: false
  secret_rabbitmq: true
  secret_registry: true
  service_ingress_metadata: false
  service_ingress_novncproxy: false
  service_ingress_osapi: false
  service_ingress_placement: false
  service_ingress_spiceproxy: false
  service_metadata: true
  service_novncproxy: true
  service_osapi: true
  service_placement: false
  service_spiceproxy: true
  statefulset_compute_ironic: false
network:
  backend:
  - ovn
  metadata:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30775
    port: 8775
  novncproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30680
  osapi:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30774
    port: 8774
  spiceproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30682
  ssh:
    enabled: true
    from_subnet: 0.0.0.0/0
    key_types:
    - rsa
    - dsa
    - ecdsa
    - ed25519
    port: 8022
    private_key: |+
      -----BEGIN OPENSSH PRIVATE KEY-----
      b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
      NhAAAAAwEAAQAAAgEAsuEikT24WfsAfaqryVujPFgRs7/1i6UmkPtoRmCtJBQah/5/gvOX
      0FLQyDTtSmhZdSdhD8uVxA4R/WS0D3Ed8ewt/+87KQPIXerN///0UsPUyag42btNyjQL+m
      FCq1Kag+UzzyNgpMzc5lfK6Go9or0jp7duEKBxAuiBMW43HkSrgLydpKdJ71phgtt6Kmgd
      Eqg2lUtFfkJOtFc8Z8DKnriuvaAs9+IKgvMn+8QnLf3ti53N3hu0Kjle/5XgQhyvoCnxpz
      JjdvDQYvM76efoKfZtFylFYbQbB/kOJayOD+rsSxfGwGKFqUnAzqtbPBO0OvjA35WCY1rX
      LhV1wD2PFwVPG8F7swGEkuUyd5vT4nBIzEczadAibwgwDEGlJzlIECwexRROldbntQRBO1
      0S38y2MlhBmceGKgK+xg/8A8rb3cRdmYqi/kqpX8xRsp4ebqtW1Kna/w12vyanBaSHer5z
      d+WIeUAHogZUGuZ6xAN5MeThqVEXX2KlKzHTq9ELi1heF5OErzlGxE5H6GFvSLgg+nLCim
      9NLCml8UFXdG/E6+j50KwokbblZTi5x4f5jzYI6cd2jb8jelEXXENskRxMhcHtdL0Fv8de
      CQ3vw828t9DEgXl/YrTakIvJU7Bcm1XkCWEvI9sS3poPhO0B1BgQckS61600CFJ7n3MlbM
      MAAAc426EJMtuhCTIAAAAHc3NoLXJzYQAAAgEAsuEikT24WfsAfaqryVujPFgRs7/1i6Um
      kPtoRmCtJBQah/5/gvOX0FLQyDTtSmhZdSdhD8uVxA4R/WS0D3Ed8ewt/+87KQPIXerN//
      /0UsPUyag42btNyjQL+mFCq1Kag+UzzyNgpMzc5lfK6Go9or0jp7duEKBxAuiBMW43HkSr
      gLydpKdJ71phgtt6KmgdEqg2lUtFfkJOtFc8Z8DKnriuvaAs9+IKgvMn+8QnLf3ti53N3h
      u0Kjle/5XgQhyvoCnxpzJjdvDQYvM76efoKfZtFylFYbQbB/kOJayOD+rsSxfGwGKFqUnA
      zqtbPBO0OvjA35WCY1rXLhV1wD2PFwVPG8F7swGEkuUyd5vT4nBIzEczadAibwgwDEGlJz
      lIECwexRROldbntQRBO10S38y2MlhBmceGKgK+xg/8A8rb3cRdmYqi/kqpX8xRsp4ebqtW
      1Kna/w12vyanBaSHer5zd+WIeUAHogZUGuZ6xAN5MeThqVEXX2KlKzHTq9ELi1heF5OErz
      lGxE5H6GFvSLgg+nLCim9NLCml8UFXdG/E6+j50KwokbblZTi5x4f5jzYI6cd2jb8jelEX
      XENskRxMhcHtdL0Fv8deCQ3vw828t9DEgXl/YrTakIvJU7Bcm1XkCWEvI9sS3poPhO0B1B
      gQckS61600CFJ7n3MlbMMAAAADAQABAAACACjvqM+ARm4hKQ2ZP1MHa6XlqbdvGKxqkRbg
      0g45FaaKC0BQF7JKONXt/eWzZxJ+KurKpkXNG2sQpUFOCH74LhgtKqHFh85tFS+ZCqepVa
      qTMmEr5Ea/vJ+npY4ZL/6xGrdQfrjKqdVNJMh6Eu95UL2rdztsmP2pGfPGbbp4NJjysiT2
      BUWnGx5xmpstyxllVjHs2QD1huPMHOLci/BaiIQmU6cFvwTDcBBjwSS20RiPYgtvg6tuv5
      5iHsFy9S1sDy0iLDFNMMIJfkcGzxFKhzEDOhfw0/X2F7GNZ5Vrld9CrezJdiyU323MeRlS
      0cA/6iEMyGbiekJuiYv8UUQ7UkFZrzRCJQYnpMO71Sf1irO0stHQaE+cOrRsO4tx2GsFOF
      XtpNfTGa3IRAG6pCZqT4+RsVGKg7bvBtUp/4DH1axIAYYrXrS7VqiB+q1rqnjEVHU6B0Fg
      Uam1SBoQgAx/bR/2XdfBGjgH+eF1rRpAMP3OdPktS9kwOdW+IjyNbnHgdZL5B8BS/6Xvef
      5XQDaC20Z2DnqiOrTpDOxCz1Jp3MF9e1r2mNQzbS/n/FqHOgtBwtTIqxBExVXInqBoa2vJ
      ATucG+jPfEPsTmkSz73rFH3xey70qwlY3WQ4U6GyGYHrqU0mjk2YXe+6EH8g4HIhtK3nyd
      NE0KfTeMMNiFWWF+FxAAABAHRr7gK+tw8qx+DZEhS3WpmC+7WinMAy5hiioPISq4HZmEfj
      38Fn9ZM2k/IMifA7CZLJi6ARwXjpeDmVfXMV2myGpzfVuSe91MMaRRKMIA+YHodRk4pOIX
      +Dy0WMAv2bVs2al1eoiUpZm5VQvZhzmVTe36YnTZWOcLVidNPejZNgfy+ZnkuH1T7uaRPL
      HDQ3kw2j2pcoR1A23W8dVuBjxuGBHeH54MCwbNZhMGD5lsc6g6ExowTwW3LSrYx0c22ZqF
      L+XH3lp4FSlL1bwNoXt7JhHQ4QlogeDktvkBNJzJXPsKtF+PdhPv2xxTz8kGiQcU1QfP4D
      npvDeyL52t9kaKcAAAEBALw8ehTdwwrUkUwmxH+20M/mKAPcXWsOjmK1xJSWLI2PDgihWQ
      46BFfqYTyW291ReYN678u0reu+zgQPJ76753s/C5cZBN5G5CBg7PeTkLX8cemH6tvsZfVe
      DUsQ3WcVDswc4jqUbcYomIegva7PbebtNmOmpNuowUGXENCsRPOV1HYpzEzUidphAk+ZZ3
      yybo3a8HW0TRWnLiCEJ83vBvT6guolBd4pA/PnFjeu4wLKdn3faVeTVj3ejesz5iFo527H
      JgQmnupk/+MIV1/ZfbOtf2EA20jv/VZzAOOvJHcadRzPAVy5cKTwS8tD/XMDHfv/dVPzpk
      UP5ktC1iAfI08AAAEBAPNGWfYyQuhEAS2Qf+QduvXtByO56VN1Bdc3lH8lOOkLmjkngYgV
      /LChMG+o/U+E4AjVEG/9YYTm3YixsAMZ38o9O1/hIqboetUw8BO6m94kwpPrMIueVuJ4Fl
      C2MhSom+KJJApv+up1oQGk21W5OwP0R6ltbkju1xJXjloAh9Or718aOUXNC3mYMxtHnbAI
      Tv8St853+reZRUk3Rdz59/BY3CSxlRn3Ks9hieYqkHniAQCJe0A84zgjjZ0GLRrLOO5+XC
      rLdDZ5vZV2Wf/6Dvg1pvpDBHIx1vNISlyAHq5UmqK2zuSvdbHqSzs5Zt9NZ+RKJqVnJvVc
      CrT+fnPf0k0AAAAAAQID
      -----END OPENSSH PRIVATE KEY-----

    public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCy4SKRPbhZ+wB9qqvJW6M8WBGzv/WLpSaQ+2hGYK0kFBqH/n+C85fQUtDINO1KaFl1J2EPy5XEDhH9ZLQPcR3x7C3/7zspA8hd6s3///RSw9TJqDjZu03KNAv6YUKrUpqD5TPPI2CkzNzmV8roaj2ivSOnt24QoHEC6IExbjceRKuAvJ2kp0nvWmGC23oqaB0SqDaVS0V+Qk60VzxnwMqeuK69oCz34gqC8yf7xCct/e2Lnc3eG7QqOV7/leBCHK+gKfGnMmN28NBi8zvp5+gp9m0XKUVhtBsH+Q4lrI4P6uxLF8bAYoWpScDOq1s8E7Q6+MDflYJjWtcuFXXAPY8XBU8bwXuzAYSS5TJ3m9PicEjMRzNp0CJvCDAMQaUnOUgQLB7FFE6V1ue1BEE7XRLfzLYyWEGZx4YqAr7GD/wDytvdxF2ZiqL+SqlfzFGynh5uq1bUqdr/DXa/JqcFpId6vnN35Yh5QAeiBlQa5nrEA3kx5OGpURdfYqUrMdOr0QuLWF4Xk4SvOUbETkfoYW9IuCD6csKKb00sKaXxQVd0b8Tr6PnQrCiRtuVlOLnHh/mPNgjpx3aNvyN6URdcQ2yRHEyFwe10vQW/x14JDe/Dzby30MSBeX9itNqQi8lTsFybVeQJYS8j2xLemg+E7QHUGBByRLrXrTQIUnufcyVsww==
network_policy:
  nova:
    egress:
    - {}
    ingress:
    - {}
pod:
  affinity:
    anti:
      topologyKey:
        default: kubernetes.io/hostname
      type:
        default: preferredDuringSchedulingIgnoredDuringExecution
      weight:
        default: 10
  labels:
    include_app_kubernetes_io: false
  lifecycle:
    disruption_budget:
      metadata:
        min_available: 0
      osapi:
        min_available: 0
    termination_grace_period:
      metadata:
        timeout: 30
      osapi:
        timeout: 30
    upgrades:
      daemonsets:
        compute:
          enabled: true
          max_unavailable: 1
          min_ready_seconds: 0
        pod_replacement_strategy: RollingUpdate
      deployments:
        pod_replacement_strategy: RollingUpdate
        revision_history: 3
        rolling_update:
          max_surge: 3
          max_unavailable: 1
  mounts:
    nova_api_metadata:
      init_container: null
      nova_api_metadata:
        volumeMounts: null
        volumes: null
    nova_api_osapi:
      init_container: null
      nova_api_osapi:
        volumeMounts: null
        volumes: null
    nova_bootstrap:
      init_container: null
      nova_bootstrap:
        volumeMounts: null
        volumes: null
    nova_compute:
      init_container: null
      nova_compute:
        volumeMounts: null
        volumes: null
    nova_compute_ironic:
      init_container: null
      nova_compute_ironic:
        volumeMounts: null
        volumes: null
    nova_conductor:
      init_container: null
      nova_conductor:
        volumeMounts: null
        volumes: null
    nova_db_sync:
      nova_db_sync:
        volumeMounts: null
        volumes: null
    nova_novncproxy:
      init_novncproxy: null
      nova_novncproxy:
        volumeMounts: null
        volumes: null
    nova_scheduler:
      init_container: null
      nova_scheduler:
        volumeMounts: null
        volumes: null
    nova_spiceproxy:
      init_spiceproxy: null
      nova_spiceproxy:
        volumeMounts: null
        volumes: null
    nova_tests:
      init_container: null
      nova_tests:
        volumeMounts: null
        volumes: null
  probes:
    api-metadata:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    api-osapi:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    compute:
      default:
        liveness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        startup:
          enabled: true
          params:
            failureThreshold: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 70
    compute-spice-proxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    conductor:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
    novncproxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    rpc_retries: 2
    rpc_timeout: 60
    scheduler:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
  replicas:
    api_metadata: 1
    compute_ironic: 1
    conductor: 1
    novncproxy: 1
    osapi: 1
    scheduler: 1
    spiceproxy: 1
  resources:
    api:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    api_metadata:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute_ironic:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    conductor:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    enabled: false
    jobs:
      archive_deleted_rows:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      bootstrap:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      cell_setup:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_drop:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      image_repo_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_endpoints:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_service:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_user:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      rabbit_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      service_cleaner:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      storage_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      tests:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
    novncproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    scheduler:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    spiceproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    ssh:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
  security_context:
    archive_deleted_rows:
      container:
        nova_archive_deleted_rows:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_archive_deleted_rows_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    bootstrap:
      container:
        bootstrap:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova:
      container:
        ceph_perms:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_api:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_api_metadata_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute:
          privileged: true
          readOnlyRootFilesystem: true
        nova_compute_init:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_compute_spice_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute_ssh:
          privileged: true
          runAsUser: 0
        nova_compute_ssh_init:
          runAsUser: 0
        nova_compute_vnc_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_conductor:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init_assests:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_osapi:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_scheduler:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init_assets:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        tungstenfabric_compute_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova_cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_cell_setup_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    service_cleaner:
      container:
        nova_service_cleaner:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
  tolerations:
    nova:
      enabled: false
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/control-plane
        operator: Exists
  use_fqdn:
    compute: true
  useHostNetwork:
    novncproxy: false
rbd_pool:
  app_name: nova-vms
  chunk_size: 8
  crush_rule: replicated_rule
  replication: 3
release_group: null
secrets:
  identity:
    admin: nova-keystone-admin
    nova: nova-keystone-user
    test: nova-keystone-test
  oci_image_registry:
    nova: nova-oci-image-registry
  oslo_db:
    admin: nova-db-admin
    nova: nova-db-user
  oslo_db_api:
    admin: nova-db-api-admin
    nova: nova-db-api-user
  oslo_db_cell0:
    admin: nova-db-cell0-admin
    nova: nova-db-cell0-user
  oslo_messaging:
    admin: nova-rabbitmq-admin
    nova: nova-rabbitmq-user
  tls:
    compute:
      osapi:
        internal: nova-tls-api
        public: nova-tls-public
    compute_metadata:
      metadata:
        internal: metadata-tls-metadata
        public: metadata-tls-public
    compute_novnc_proxy:
      novncproxy:
        internal: nova-novncproxy-tls-proxy
        public: nova-novncproxy-tls-public
      vencrypt:
        internal: nova-novncproxy-vencrypt
    compute_spice_proxy:
      spiceproxy:
        internal: nova-spiceproxy-tls-proxy
        public: nova-spiceproxy-tls-public
tls:
  identity: false
  oslo_db: false
  oslo_messaging: false
