COMPUTED VALUES:
bootstrap:
  enabled: true
  ks_user: admin
  script: null
  structured:
    flavors:
      enabled: false
      options:
        m1_large:
          disk: 80
          name: m1.large
          ram: 8192
          vcpus: 4
        m1_medium:
          disk: 40
          name: m1.medium
          ram: 4096
          vcpus: 2
        m1_small:
          disk: 20
          name: m1.small
          ram: 2048
          vcpus: 1
        m1_tiny:
          disk: 1
          name: m1.tiny
          ram: 512
          vcpus: 1
        m1_xlarge:
          disk: 160
          name: m1.xlarge
          ram: 16384
          vcpus: 8
  wait_for_computes:
    enabled: false
    remaining_wait: 300
    scripts:
      init_script: |
        # This runs in a bootstrap init container. It counts the number of compute nodes.
        COMPUTE_NODES=$(kubectl get nodes -o custom-columns=NAME:.metadata.name -l openstack-compute-node=enabled --no-headers | sort)
        /bin/echo $COMPUTE_NODES > /tmp/compute_nodes.txt
      wait_script: |
        # This script runs in the main bootstrap container just before the
        # bootstrap.script is called.
        COMPUTE_HOSTS=`cat /tmp/compute_nodes.txt | wc -w`
        if [[ $COMPUTE_HOSTS == 0 ]]; then
          echo "There are no compute hosts found!"
          exit 1
        fi

        # Wait for all hypervisors to come up before moving on with the deployment
        HYPERVISOR_WAIT=true
        WAIT_AFTER_READY=0
        SLEEP=5
        while [[ $HYPERVISOR_WAIT == true ]]; do
          # Its possible that openstack command may fail due to not being able to
          # reach the compute service
          set +e
          HYPERVISORS=$(openstack hypervisor list -f value -c 'Hypervisor Hostname' | wc -w)
          set -e

          PERCENT_READY=$(( $HYPERVISORS * 100 / $COMPUTE_HOSTS ))
          if [[ $PERCENT_READY -ge $WAIT_PERCENTAGE ]]; then
            echo "Hypervisor ready percentage is $PERCENT_READY"
            if [[ $PERCENT_READY == 100 ]]; then
              HYPERVISOR_WAIT=false
              echo "All hypervisors are ready."
            elif [[ WAIT_AFTER_READY -ge $REMAINING_WAIT ]]; then
              HYPERVISOR_WAIT=false
              echo "Waited the configured time -- $HYPERVISORS out of $COMPUTE_HOSTS hypervisor(s) ready -- proceeding with the bootstrap."
            else
              sleep $SLEEP
              WAIT_AFTER_READY=$(( $WAIT_AFTER_READY + $SLEEP ))
            fi
          else
            echo "Waiting $SLEEP seconds for enough hypervisors to be discovered..."
            sleep $SLEEP
          fi
        done
    wait_percentage: 70
ceph_client:
  configmap: ceph-etc
  user_secret_name: pvc-ceph-client-key
conf:
  api_audit_map:
    DEFAULT:
      target_endpoint_type: None
    custom_actions:
      delete: delete
      disable: disable
      enable: enable
      os-migrations/get: read
      os-server-password/post: update
      reboot: start/reboot
      shutdown: stop/shutdown
      startup: start/startup
    path_keywords:
      action: None
      add: None
      configure-project: None
      defaults: None
      delete: None
      detail: None
      diagnostics: None
      disable: None
      enable: None
      entries: entry
      extensions: alias
      flavors: flavor
      images: image
      ips: label
      limits: None
      metadata: key
      os-agents: os-agent
      os-aggregates: os-aggregate
      os-availability-zone: None
      os-certificates: None
      os-cloudpipe: None
      os-extra_specs: key
      os-fixed-ips: ip
      os-flavor-access: None
      os-floating-ip-dns: domain
      os-floating-ip-pools: None
      os-floating-ips: floating-ip
      os-floating-ips-bulk: host
      os-hosts: host
      os-hypervisors: hypervisor
      os-instance-actions: instance-action
      os-keypairs: keypair
      os-migrations: None
      os-networks: network
      os-quota-sets: tenant
      os-security-group-rules: rule
      os-security-groups: security_group
      os-server-password: None
      os-services: None
      os-simple-tenant-usage: tenant
      os-snapshots: snapshot
      os-virtual-interfaces: None
      os-volume-types: volume-type
      os-volume_attachments: attachment
      os-volumes: volume
      os-volumes_boot: None
      reboot: None
      servers: server
      shutdown: None
      startup: None
      statistics: None
    service_endpoints:
      compute: service/compute
  archive_deleted_rows:
    all_cells: false
    before:
      date: nil
      enabled: false
    max_rows:
      enabled: false
      rows: 1000
    purge_deleted_rows: false
    until_completion: true
  ceph:
    admin_keyring: null
    cinder:
      keyring: null
      secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      user: cinder
    enabled: true
  enable_iscsi: false
  hypervisor:
    address_search_enabled: true
    host_interface: null
    host_network_cidr: 0/0
  libvirt:
    address_search_enabled: true
    live_migration_interface: null
    live_migration_network_cidr: 0/0
  logging:
    formatter_context:
      class: oslo_log.formatters.ContextFormatter
      datefmt: '%Y-%m-%d %H:%M:%S'
    formatter_default:
      datefmt: '%Y-%m-%d %H:%M:%S'
      format: '%(message)s'
    formatters:
      keys:
      - context
      - default
    handler_null:
      args: ()
      class: logging.NullHandler
      formatter: default
    handler_stderr:
      args: (sys.stderr,)
      class: StreamHandler
      formatter: context
    handler_stdout:
      args: (sys.stdout,)
      class: StreamHandler
      formatter: context
    handlers:
      keys:
      - stdout
      - stderr
      - "null"
    logger_amqp:
      handlers: stderr
      level: WARNING
      qualname: amqp
    logger_amqplib:
      handlers: stderr
      level: WARNING
      qualname: amqplib
    logger_boto:
      handlers: stderr
      level: WARNING
      qualname: boto
    logger_eventletwsgi:
      handlers: stderr
      level: WARNING
      qualname: eventlet.wsgi.server
    logger_nova:
      handlers:
      - stdout
      level: INFO
      qualname: nova
    logger_os.brick:
      handlers:
      - stdout
      level: INFO
      qualname: os.brick
    logger_root:
      handlers: "null"
      level: WARNING
    logger_sqlalchemy:
      handlers: stderr
      level: WARNING
      qualname: sqlalchemy
    loggers:
      keys:
      - root
      - nova
      - os.brick
  nova:
    DEFAULT:
      allow_resize_to_same_host: true
      compute_driver: libvirt.LibvirtDriver
      cpu_allocation_ratio: 4.5
      default_ephemeral_format: ext4
      disk_allocation_ratio: 3
      instance_usage_audit: true
      instance_usage_audit_period: hour
      metadata_workers: 2
      my_ip: 0.0.0.0
      osapi_compute_listen: 0.0.0.0
      osapi_compute_listen_port: null
      osapi_compute_workers: 2
      ram_allocation_ratio: 0.9
      resume_guests_state_on_host_boot: true
      state_path: /var/lib/nova
    api:
      list_records_by_skipping_down_cells: false
    api_database:
      connection: mysql+pymysql://nova:TZGv0AFFe0sBRCaz36BzcuFj59yeqXXd@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_api
      max_retries: -1
    barbican:
      barbican_endpoint_type: internal
    cache:
      backend: oslo_cache.memcache_pool
      enabled: true
    cell0_database:
      connection: mysql+pymysql://nova:TZGv0AFFe0sBRCaz36BzcuFj59yeqXXd@percona-xtradb-haproxy.openstack.svc.cluster.local:3306/nova_cell0
      max_retries: -1
    cinder:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      catalog_info: volumev3::internalURL
      os_region_name: RegionOne
      password: on94HMrCoimMI0O4nHRFOx5ykmeXDnbA
      project_domain_name: service
      project_name: service
      user_domain_name: service
      username: cinder-RegionOne
    compute:
      consecutive_build_service_disable_threshold: 0
    conductor:
      workers: 2
    cors:
      allow_headers: X-Auth-Token,X-OpenStack-Nova-API-Version
      allowed_origin: '*'
    database:
      connection_recycle_time: 600
      max_overflow: 50
      max_pool_size: 5
      max_retries: -1
      pool_timeout: 30
    filter_scheduler:
      available_filters:
        type: multistring
        values:
        - nova.scheduler.filters.all_filters
        - nova_scheduler_filters.failure_domain_filter.FailureDomainFilter
      enabled_filters: AvailabilityZoneFilter, ComputeFilter, AggregateTypeAffinityFilter,
        ComputeCapabilitiesFilter, PciPassthroughFilter, ImagePropertiesFilter, ServerGroupAntiAffinityFilter,
        ServerGroupAffinityFilter, FailureDomainFilter
      image_properties_default_architecture: x86_64
      max_instances_per_host: 200
    glance:
      enable_rbd_download: true
      num_retries: 3
    ironic:
      api_endpoint: http://ironic-api.openstack.svc.cluster.local:6385
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: X1uiRtZWTda3T43LlRCt9vSLDQicVmn7
      memcache_servers: memcached.openstack.svc.cluster.local:11211
      password: pcZPwaKRcSnRCCuJz4MP5umy3gLYJorW
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: ironic-RegionOne
    keystone_authtoken:
      auth_type: password
      auth_uri: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      memcache_secret_key: X1uiRtZWTda3T43LlRCt9vSLDQicVmn7
      memcache_security_strategy: ENCRYPT
      memcached_servers: memcached.openstack.svc.cluster.local:11211
      password: AjQQXBYFrZ1oWBtWta24DhPJZrw65Lqf
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      service_token_roles: service
      service_token_roles_required: true
      service_type: compute
      user_domain_name: service
      username: nova-RegionOne
    libvirt:
      connection_uri: qemu+unix:///system?socket=/run/libvirt/libvirt-sock
      disk_cachemodes: network=writeback
      hw_disk_discard: unmap
      images_rbd_ceph_conf: /etc/ceph/ceph.conf
      images_rbd_pool: vms
      images_type: qcow2
      live_migration_scheme: tls
      rbd_secret_uuid: 457eb676-33da-42ec-9a8c-9293d545c337
      rbd_user: cinder
      swtpm_enabled: true
      swtpm_group: swtpm
      swtpm_user: swtpm
    neutron:
      auth_type: password
      auth_version: v3
      metadata_proxy_shared_secret: QguhJvexxKr1WOTvEb2SxMFehhc2iZIH
      service_metadata_proxy: true
    notifications:
      notify_on_state_change: vm_and_task_state
    os_vif_ovs:
      ovsdb_connection: unix:/run/openvswitch/db.sock
    oslo_concurrency:
      lock_path: /var/lib/nova/tmp
    oslo_messaging_notifications:
      driver: noop
    oslo_messaging_rabbit:
      rabbit_ha_queues: true
    oslo_middleware:
      enable_proxy_headers_parsing: true
    oslo_policy:
      policy_file: /etc/nova/policy.yaml
    placement:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      auth_version: v3
      password: RVB8sdndbPWqmbqdeFXmOZ6e9BlqewST
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      user_domain_name: service
      username: placement-RegionOne
    privsep_osbrick:
      helper_command: sudo nova-rootwrap /etc/nova/rootwrap.conf privsep-helper --config-file
        /etc/nova/nova.conf
    scheduler:
      discover_hosts_in_cells_interval: 30
      max_attempts: 3
      workers: 2
    service_user:
      auth_type: password
      auth_url: http://keystone-api.openstack.svc.cluster.local:5000/
      password: AjQQXBYFrZ1oWBtWta24DhPJZrw65Lqf
      project_domain_name: service
      project_name: service
      region_name: RegionOne
      send_service_user_token: true
      user_domain_name: service
      username: nova-RegionOne
    spice:
      html5proxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    upgrade_levels:
      compute: auto
    vnc:
      auth_schemes: vencrypt,none
      novncproxy_host: 0.0.0.0
      server_listen: 0.0.0.0
    wsgi:
      api_paste_config: /etc/nova/api-paste.ini
  nova_api_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      need-app: true
      procname-prefix-spaced: 'nova-api:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
      wsgi-file: /var/lib/openstack/bin/nova-api-wsgi
  nova_compute_redactions:
  - database
  - api_database
  - cell0_database
  nova_ironic:
    DEFAULT:
      compute_driver: ironic.IronicDriver
      cpu_allocation_ratio: 1
      force_config_drive: true
      ram_allocation_ratio: 1
      reserved_host_memory_mb: 0
      scheduler_host_manager: ironic_host_manager
  nova_metadata_uwsgi:
    uwsgi:
      add-header: 'Connection: close'
      buffer-size: 65535
      chunked-input-limit: "4096000"
      die-on-term: true
      enable-threads: true
      exit-on-reload: false
      hook-master-start: unix_signal:15 gracefully_kill_them_all
      http-auto-chunked: true
      http-raw-body: true
      lazy-apps: true
      log-x-forwarded-for: true
      master: true
      need-app: true
      procname-prefix-spaced: 'nova-metadata:'
      route-user-agent: '^kube-probe.* donotlog:'
      socket-timeout: 10
      thunder-lock: true
      worker-reload-mercy: 80
      wsgi-file: /var/lib/openstack/bin/nova-metadata-wsgi
  nova_sudoers: |
    # This sudoers file supports rootwrap for both Kolla and LOCI Images.
    Defaults !requiretty
    Defaults secure_path="/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/snap/bin:/var/lib/openstack/bin:/var/lib/kolla/venv/bin"
    nova ALL = (root) NOPASSWD: /var/lib/kolla/venv/bin/nova-rootwrap /etc/nova/rootwrap.conf *, /var/lib/openstack/bin/nova-rootwrap /etc/nova/rootwrap.conf *
  paste:
    app:metaapp:
      paste.app_factory: nova.api.metadata.handler:MetadataRequestHandler.factory
    app:osapi_compute_app_v21:
      paste.app_factory: nova.api.openstack.compute:APIRouterV21.factory
    app:oscomputeversionapp:
      paste.app_factory: nova.api.openstack.compute.versions:Versions.factory
    composite:metadata:
      /: meta
      use: egg:Paste#urlmap
    composite:openstack_compute_api_v21:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:openstack_compute_api_v21_legacy_v2_compatible:
      keystone: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit authtoken
        audit keystonecontext legacy_v2_compatible osapi_compute_app_v21
      noauth2: cors http_proxy_to_wsgi compute_req_id faultwrap sizelimit noauth2
        legacy_v2_compatible osapi_compute_app_v21
      use: call:nova.api.auth:pipeline_factory_v21
    composite:osapi_compute:
      /: oscomputeversions
      /v2: openstack_compute_api_v21_legacy_v2_compatible
      /v2.1: openstack_compute_api_v21
      use: call:nova.api.openstack.urlmap:urlmap_factory
    filter:audit:
      audit_map_file: /etc/nova/api_audit_map.conf
      paste.filter_factory: keystonemiddleware.audit:filter_factory
    filter:authtoken:
      paste.filter_factory: keystonemiddleware.auth_token:filter_factory
    filter:compute_req_id:
      paste.filter_factory: nova.api.compute_req_id:ComputeReqIdMiddleware.factory
    filter:cors:
      oslo_config_project: nova
      paste.filter_factory: oslo_middleware.cors:filter_factory
    filter:faultwrap:
      paste.filter_factory: nova.api.openstack:FaultWrapper.factory
    filter:http_proxy_to_wsgi:
      paste.filter_factory: oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
    filter:keystonecontext:
      paste.filter_factory: nova.api.auth:NovaKeystoneContext.factory
    filter:legacy_v2_compatible:
      paste.filter_factory: nova.api.openstack:LegacyV2CompatibleWrapper.factory
    filter:noauth2:
      paste.filter_factory: nova.api.openstack.auth:NoAuthMiddleware.factory
    filter:request_id:
      paste.filter_factory: oslo_middleware:RequestId.factory
    filter:sizelimit:
      paste.filter_factory: oslo_middleware:RequestBodySizeLimiter.factory
    pipeline:meta:
      pipeline: cors metaapp
    pipeline:oscomputeversions:
      pipeline: faultwrap http_proxy_to_wsgi oscomputeversionapp
  policy: {}
  rabbitmq:
    policies:
    - apply-to: all
      definition:
        ha-mode: all
        ha-sync-mode: automatic
        message-ttl: 70000
      name: ha_ttl_nova
      pattern: ^(?!(amq\.|reply_)).*
      priority: 0
      vhost: nova
  rally_tests:
    clean_up: |
      FLAVORS=$(openstack flavor list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$FLAVORS" ]; then
        echo $FLAVORS | xargs openstack flavor delete
      fi
      SERVERS=$(openstack server list -f value --all | awk '$2 ~ /^s_rally_/ { print $1 }')
      if [ -n "$SERVERS" ]; then
        echo $SERVERS | xargs openstack server delete
      fi
      IMAGES=$(openstack image list -f value | awk '$2 ~ /^c_rally_/ { print $1 }')
      if [ -n "$IMAGES" ]; then
        echo $IMAGES | xargs openstack image delete
      fi
    run_tempest: false
    tests:
      NovaAggregates.create_and_get_aggregate_details:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.create_and_update_aggregate:
      - args:
          availability_zone: nova
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAggregates.list_aggregates:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaAvailabilityZones.list_availability_zones:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_delete_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_and_list_flavor_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_add_tenant_access:
      - args:
          disk: 1
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.create_flavor_and_set_keys:
      - args:
          disk: 1
          extra_specs:
            quota:disk_read_bytes_sec: 10240
          ram: 500
          vcpus: 1
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaFlavors.list_flavors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_get_uptime_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_and_search_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.list_hypervisors:
      - args:
          detailed: true
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaHypervisors.statistics_hypervisors:
      - args: {}
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_delete_keypair:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaKeypair.create_and_list_keypairs:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServerGroups.create_and_list_server_groups:
      - args:
          all_projects: false
          kwargs:
            policies:
            - affinity
        runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
      NovaServices.list_services:
      - runner:
          concurrency: 1
          times: 1
          type: constant
        sla:
          failure_rate:
            max: 0
  rootwrap: |
    # Configuration for nova-rootwrap
    # This file should be owned by (and only-writeable by) the root user

    [DEFAULT]
    # List of directories to load filter definitions from (separated by ',').
    # These directories MUST all be only writeable by root !
    filters_path=/etc/nova/rootwrap.d,/usr/share/nova/rootwrap

    # List of directories to search executables in, in case filters do not
    # explicitely specify a full path (separated by ',')
    # If not specified, defaults to system PATH environment variable.
    # These directories MUST all be only writeable by root !
    exec_dirs=/sbin,/usr/sbin,/bin,/usr/bin,/usr/local/bin,/usr/local/sbin,/var/lib/openstack/bin,/var/lib/kolla/venv/bin

    # Enable logging to syslog
    # Default value is False
    use_syslog=False

    # Which syslog facility to use.
    # Valid values include auth, authpriv, syslog, local0, local1...
    # Default value is 'syslog'
    syslog_log_facility=syslog

    # Which messages to log.
    # INFO means log all usage
    # ERROR means only log unsuccessful attempts
    syslog_log_level=ERROR
  rootwrap_filters:
    api_metadata:
      content: |
        # nova-rootwrap command filters for api-metadata nodes
        # This is needed on nova-api hosts running with "metadata" in enabled_apis
        # or when running nova-api-metadata
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root
      pods:
      - metadata
    compute:
      content: |
        # nova-rootwrap command filters for compute nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/disk/mount/api.py: 'kpartx', '-a', device
        # nova/virt/disk/mount/api.py: 'kpartx', '-d', device
        kpartx: CommandFilter, kpartx, root

        # nova/virt/xenapi/vm_utils.py: tune2fs, -O ^has_journal, part_path
        # nova/virt/xenapi/vm_utils.py: tune2fs, -j, partition_path
        tune2fs: CommandFilter, tune2fs, root

        # nova/virt/disk/mount/api.py: 'mount', mapped_device
        # nova/virt/disk/api.py: 'mount', '-o', 'bind', src, target
        # nova/virt/xenapi/vm_utils.py: 'mount', '-t', 'ext2,ext3,ext4,reiserfs'..
        # nova/virt/configdrive.py: 'mount', device, mountdir
        # nova/virt/libvirt/volume.py: 'mount', '-t', 'sofs' ...
        mount: CommandFilter, mount, root

        # nova/virt/disk/mount/api.py: 'umount', mapped_device
        # nova/virt/disk/api.py: 'umount' target
        # nova/virt/xenapi/vm_utils.py: 'umount', dev_path
        # nova/virt/configdrive.py: 'umount', mountdir
        umount: CommandFilter, umount, root

        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-c', device, image
        # nova/virt/disk/mount/nbd.py: 'qemu-nbd', '-d', device
        qemu-nbd: CommandFilter, qemu-nbd, root

        # nova/virt/disk/mount/loop.py: 'losetup', '--find', '--show', image
        # nova/virt/disk/mount/loop.py: 'losetup', '--detach', device
        losetup: CommandFilter, losetup, root

        # nova/virt/disk/vfs/localfs.py: 'blkid', '-o', 'value', '-s', 'TYPE', device
        blkid: CommandFilter, blkid, root

        # nova/virt/libvirt/utils.py: 'blockdev', '--getsize64', path
        # nova/virt/disk/mount/nbd.py: 'blockdev', '--flushbufs', device
        blockdev: RegExpFilter, blockdev, root, blockdev, (--getsize64|--flushbufs), /dev/.*

        # nova/virt/disk/vfs/localfs.py: 'tee', canonpath
        tee: CommandFilter, tee, root

        # nova/virt/disk/vfs/localfs.py: 'mkdir', canonpath
        mkdir: CommandFilter, mkdir, root

        # nova/virt/disk/vfs/localfs.py: 'chown'
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', os.getuid( console_log
        # nova/virt/libvirt/connection.py: 'chown', 'root', basepath('disk')
        chown: CommandFilter, chown, root

        # nova/virt/disk/vfs/localfs.py: 'chmod'
        chmod: CommandFilter, chmod, root

        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'tunctl', '-b', '-t', dev
        # nova/network/linux_net.py: 'tunctl', '-b', '-t', dev
        tunctl: CommandFilter, tunctl, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/virt/libvirt/vif.py: 'vrouter-port-control', ...
        vrouter-port-control: CommandFilter, vrouter-port-control, root

        # nova/virt/libvirt/vif.py: 'ebrctl', ...
        ebrctl: CommandFilter, ebrctl, root

        # nova/virt/libvirt/vif.py: 'mm-ctl', ...
        mm-ctl: CommandFilter, mm-ctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/connection.py: 'dd', if=%s % virsh_output, ...
        dd: CommandFilter, dd, root

        # nova/virt/xenapi/volume_utils.py: 'iscsiadm', '-m', ...
        iscsiadm: CommandFilter, iscsiadm, root

        # nova/virt/libvirt/volume/aoe.py: 'aoe-revalidate', aoedev
        # nova/virt/libvirt/volume/aoe.py: 'aoe-discover'
        aoe-revalidate: CommandFilter, aoe-revalidate, root
        aoe-discover: CommandFilter, aoe-discover, root

        # nova/virt/xenapi/vm_utils.py: parted, --script, ...
        # nova/virt/xenapi/vm_utils.py: 'parted', '--script', dev_path, ..*.
        parted: CommandFilter, parted, root

        # nova/virt/xenapi/vm_utils.py: 'pygrub', '-qn', dev_path
        pygrub: CommandFilter, pygrub, root

        # nova/virt/xenapi/vm_utils.py: fdisk %(dev_path)s
        fdisk: CommandFilter, fdisk, root

        # nova/virt/xenapi/vm_utils.py: e2fsck, -f, -p, partition_path
        # nova/virt/disk/api.py: e2fsck, -f, -p, image
        e2fsck: CommandFilter, e2fsck, root

        # nova/virt/xenapi/vm_utils.py: resize2fs, partition_path
        # nova/virt/disk/api.py: resize2fs, image
        resize2fs: CommandFilter, resize2fs, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/virt/libvirt/utils.py: 'mkswap'
        # nova/virt/xenapi/vm_utils.py: 'mkswap'
        mkswap: CommandFilter, mkswap, root

        # nova/virt/libvirt/utils.py: 'nova-idmapshift'
        nova-idmapshift: CommandFilter, nova-idmapshift, root

        # nova/virt/xenapi/vm_utils.py: 'mkfs'
        # nova/utils.py: 'mkfs', fs, path, label
        mkfs: CommandFilter, mkfs, root

        # nova/virt/libvirt/utils.py: 'qemu-img'
        qemu-img: CommandFilter, qemu-img, root

        # nova/virt/disk/vfs/localfs.py: 'readlink', '-e'
        readlink: CommandFilter, readlink, root

        # nova/virt/disk/api.py:
        mkfs.ext3: CommandFilter, mkfs.ext3, root
        mkfs.ext4: CommandFilter, mkfs.ext4, root
        mkfs.ntfs: CommandFilter, mkfs.ntfs, root

        # nova/virt/libvirt/connection.py:
        lvremove: CommandFilter, lvremove, root

        # nova/virt/libvirt/utils.py:
        lvcreate: CommandFilter, lvcreate, root

        # nova/virt/libvirt/utils.py:
        lvs: CommandFilter, lvs, root

        # nova/virt/libvirt/utils.py:
        vgs: CommandFilter, vgs, root

        # nova/utils.py:read_file_as_root: 'cat', file_path
        # (called from nova/virt/disk/vfs/localfs.py:VFSLocalFS.read_file)
        read_passwd: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/passwd
        read_shadow: RegExpFilter, cat, root, cat, (/var|/usr)?/tmp/openstack-vfs-localfs[^/]+/etc/shadow

        # os-brick needed commands
        read_initiator: ReadFileFilter, /etc/iscsi/initiatorname.iscsi
        multipath: CommandFilter, multipath, root
        # multipathd show status
        multipathd: CommandFilter, multipathd, root
        systool: CommandFilter, systool, root
        vgc-cluster: CommandFilter, vgc-cluster, root
        # os_brick/initiator/connector.py
        drv_cfg: CommandFilter, /opt/emc/scaleio/sdc/bin/drv_cfg, root, /opt/emc/scaleio/sdc/bin/drv_cfg, --query_guid

        # TODO(smcginnis) Temporary fix.
        # Need to pull in os-brick os-brick.filters file instead and clean
        # out stale brick values from this file.
        scsi_id: CommandFilter, /lib/udev/scsi_id, root
        # os_brick.privileged.default oslo.privsep context
        # This line ties the superuser privs with the config files, context name,
        # and (implicitly) the actual python code invoked.
        privsep-rootwrap: RegExpFilter, privsep-helper, root, privsep-helper, --config-file, /etc/(?!\.\.).*, --privsep_context, os_brick.privileged.default, --privsep_sock_path, /tmp/.*

        # nova/storage/linuxscsi.py: sg_scan device
        sg_scan: CommandFilter, sg_scan, root

        # nova/volume/encryptors/cryptsetup.py:
        # nova/volume/encryptors/luks.py:
        ln: RegExpFilter, ln, root, ln, --symbolic, --force, /dev/mapper/crypt-.+, .+

        # nova/volume/encryptors.py:
        # nova/virt/libvirt/dmcrypt.py:
        cryptsetup: CommandFilter, cryptsetup, root

        # nova/virt/xenapi/vm_utils.py:
        xenstore-read: CommandFilter, xenstore-read, root

        # nova/virt/libvirt/utils.py:
        rbd: CommandFilter, rbd, root

        # nova/virt/libvirt/utils.py: 'shred', '-n3', '-s%d' % volume_size, path
        shred: CommandFilter, shred, root

        # nova/virt/libvirt/volume.py: 'cp', '/dev/stdin', delete_control..
        cp: CommandFilter, cp, root

        # nova/virt/xenapi/vm_utils.py:
        sync: CommandFilter, sync, root

        # nova/virt/libvirt/imagebackend.py:
        ploop: RegExpFilter, ploop, root, ploop, restore-descriptor, .*
        prl_disk_tool: RegExpFilter, prl_disk_tool, root, prl_disk_tool, resize, --size, .*M$, --resize_partition, --hdd, .*

        # nova/virt/libvirt/utils.py: 'xend', 'status'
        xend: CommandFilter, xend, root

        # nova/virt/libvirt/utils.py:
        touch: CommandFilter, touch, root

        # nova/virt/libvirt/volume/vzstorage.py
        pstorage-mount: CommandFilter, pstorage-mount, root
      pods:
      - compute
    network:
      content: |
        # nova-rootwrap command filters for network nodes
        # This file should be owned by (and only-writeable by) the root user

        [Filters]
        # nova/virt/libvirt/vif.py: 'ip', 'tuntap', 'add', dev, 'mode', 'tap'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'set', dev, 'up'
        # nova/virt/libvirt/vif.py: 'ip', 'link', 'delete', dev
        # nova/network/linux_net.py: 'ip', 'addr', 'add', str(floating_ip)+'/32'i..
        # nova/network/linux_net.py: 'ip', 'addr', 'del', str(floating_ip)+'/32'..
        # nova/network/linux_net.py: 'ip', 'addr', 'add', '169.254.169.254/32',..
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', dev, 'scope',..
        # nova/network/linux_net.py: 'ip', 'addr', 'del/add', ip_params, dev)
        # nova/network/linux_net.py: 'ip', 'addr', 'del', params, fields[-1]
        # nova/network/linux_net.py: 'ip', 'addr', 'add', params, bridge
        # nova/network/linux_net.py: 'ip', '-f', 'inet6', 'addr', 'change', ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', 'dev', dev, 'promisc',..
        # nova/network/linux_net.py: 'ip', 'link', 'add', 'link', bridge_if ...
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, address,..
        # nova/network/linux_net.py: 'ip', 'link', 'set', interface, 'up'
        # nova/network/linux_net.py: 'ip', 'link', 'set', bridge, 'up'
        # nova/network/linux_net.py: 'ip', 'addr', 'show', 'dev', interface, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, address, ..
        # nova/network/linux_net.py: 'ip', 'link', 'set', dev, 'up'
        # nova/network/linux_net.py: 'ip', 'route', 'add', ..
        # nova/network/linux_net.py: 'ip', 'route', 'del', .
        # nova/network/linux_net.py: 'ip', 'route', 'show', 'dev', dev
        ip: CommandFilter, ip, root

        # nova/virt/libvirt/vif.py: 'ovs-vsctl', ...
        # nova/virt/libvirt/vif.py: 'ovs-vsctl', 'del-port', ...
        # nova/network/linux_net.py: 'ovs-vsctl', ....
        ovs-vsctl: CommandFilter, ovs-vsctl, root

        # nova/network/linux_net.py: 'ovs-ofctl', ....
        ovs-ofctl: CommandFilter, ovs-ofctl, root

        # nova/virt/libvirt/vif.py: 'ivs-ctl', ...
        # nova/virt/libvirt/vif.py: 'ivs-ctl', 'del-port', ...
        # nova/network/linux_net.py: 'ivs-ctl', ....
        ivs-ctl: CommandFilter, ivs-ctl, root

        # nova/virt/libvirt/vif.py: 'ifc_ctl', ...
        ifc_ctl: CommandFilter, /opt/pg/bin/ifc_ctl, root

        # nova/network/linux_net.py: 'ebtables', '-D' ...
        # nova/network/linux_net.py: 'ebtables', '-I' ...
        ebtables: CommandFilter, ebtables, root
        ebtables_usr: CommandFilter, ebtables, root

        # nova/network/linux_net.py: 'ip[6]tables-save' % (cmd, '-t', ...
        iptables-save: CommandFilter, iptables-save, root
        ip6tables-save: CommandFilter, ip6tables-save, root

        # nova/network/linux_net.py: 'ip[6]tables-restore' % (cmd,)
        iptables-restore: CommandFilter, iptables-restore, root
        ip6tables-restore: CommandFilter, ip6tables-restore, root

        # nova/network/linux_net.py: 'arping', '-U', floating_ip, '-A', '-I', ...
        # nova/network/linux_net.py: 'arping', '-U', network_ref['dhcp_server'],..
        arping: CommandFilter, arping, root

        # nova/network/linux_net.py: 'dhcp_release', dev, address, mac_address
        dhcp_release: CommandFilter, dhcp_release, root

        # nova/network/linux_net.py: 'kill', '-9', pid
        # nova/network/linux_net.py: 'kill', '-HUP', pid
        kill_dnsmasq: KillFilter, root, /usr/sbin/dnsmasq, -9, -HUP

        # nova/network/linux_net.py: 'kill', pid
        kill_radvd: KillFilter, root, /usr/sbin/radvd

        # nova/network/linux_net.py: dnsmasq call
        dnsmasq: EnvFilter, env, root, CONFIG_FILE=, NETWORK_ID=, dnsmasq

        # nova/network/linux_net.py: 'radvd', '-C', '%s' % _ra_file(dev, 'conf'..
        radvd: CommandFilter, radvd, root

        # nova/network/linux_net.py: 'brctl', 'addbr', bridge
        # nova/network/linux_net.py: 'brctl', 'setfd', bridge, 0
        # nova/network/linux_net.py: 'brctl', 'stp', bridge, 'off'
        # nova/network/linux_net.py: 'brctl', 'addif', bridge, interface
        brctl: CommandFilter, brctl, root

        # nova/network/linux_net.py: 'sysctl', ....
        sysctl: CommandFilter, sysctl, root

        # nova/network/linux_net.py: 'conntrack'
        conntrack: CommandFilter, conntrack, root

        # nova/network/linux_net.py: 'fp-vdev'
        fp-vdev: CommandFilter, fp-vdev, root
      pods:
      - compute
  security: |
    #
    # Disable access to the entire file system except for the directories that
    # are explicitly allowed later.
    #
    # This currently breaks the configurations that come with some web application
    # Debian packages.
    #
    #<Directory />
    #   AllowOverride None
    #   Require all denied
    #</Directory>

    # Changing the following options will not really affect the security of the
    # server, but might make attacks slightly more difficult in some cases.

    #
    # ServerTokens
    # This directive configures what you return as the Server HTTP response
    # Header. The default is 'Full' which sends information about the OS-Type
    # and compiled in modules.
    # Set to one of:  Full | OS | Minimal | Minor | Major | Prod
    # where Full conveys the most information, and Prod the least.
    ServerTokens Prod

    #
    # Optionally add a line containing the server version and virtual host
    # name to server-generated pages (internal error documents, FTP directory
    # listings, mod_status and mod_info output etc., but not CGI generated
    # documents or custom error documents).
    # Set to "EMail" to also include a mailto: link to the ServerAdmin.
    # Set to one of:  On | Off | EMail
    ServerSignature Off

    #
    # Allow TRACE method
    #
    # Set to "extended" to also reflect the request body (only for testing and
    # diagnostic purposes).
    #
    # Set to one of:  On | Off | extended
    TraceEnable Off

    #
    # Forbid access to version control directories
    #
    # If you use version control systems in your document root, you should
    # probably deny access to their directories. For example, for subversion:
    #
    #<DirectoryMatch "/\.svn">
    #   Require all denied
    #</DirectoryMatch>

    #
    # Setting this header will prevent MSIE from interpreting files as something
    # else than declared by the content type in the HTTP headers.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Content-Type-Options: "nosniff"

    #
    # Setting this header will prevent other sites from embedding pages from this
    # site as frames. This defends against clickjacking attacks.
    # Requires mod_headers to be enabled.
    #
    #Header set X-Frame-Options: "sameorigin"
  software:
    apache2:
      a2dismod: null
      a2enmod: null
      binary: apache2
      conf_dir: /etc/apache2/conf-enabled
      mods_dir: /etc/apache2/mods-available
      site_dir: /etc/apache2/sites-enable
      start_parameters: -DFOREGROUND
console:
  address_search_enabled: true
  console_kind: novnc
  novnc:
    compute:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
    vncproxy:
      vncserver_proxyclient_interface: null
      vncserver_proxyclient_network_cidr: 0/0
  serial: null
  spice:
    compute:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
    proxy:
      server_proxyclient_interface: null
      server_proxyclient_network_cidr: 0/0
dependencies:
  dynamic:
    common:
      local_image_registry:
        jobs:
        - nova-image-repo-sync
        services:
        - endpoint: node
          service: local_image_registry
    targeted:
      linuxbridge:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-lb-agent
            requireSameNode: true
      openvswitch:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-ovs-agent
            requireSameNode: true
      ovn:
        compute:
          pod:
          - labels:
              application: ovn
              component: ovn-controller
            requireSameNode: true
      sriov:
        compute:
          pod:
          - labels:
              application: neutron
              component: neutron-sriov-agent
            requireSameNode: true
  static:
    api:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    api_metadata:
      jobs:
      - nova-db-sync
      - nova-ks-user
      - nova-ks-endpoints
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
    archive_deleted_rows:
      jobs:
      - nova-db-init
      - nova-db-sync
    bootstrap:
      services:
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    cell_setup:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: nova
          component: compute
        requireSameNode: false
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    compute:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      pod:
      - labels:
          application: libvirt
          component: libvirt
        requireSameNode: true
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
    compute_ironic:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: baremetal
    conductor:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    db_drop:
      services:
      - endpoint: internal
        service: oslo_db
    db_init:
      services:
      - endpoint: internal
        service: oslo_db
    db_sync:
      jobs:
      - nova-db-init
      services:
      - endpoint: internal
        service: oslo_db
    image_repo_sync:
      services:
      - endpoint: internal
        service: local_image_registry
    ks_endpoints:
      jobs:
      - nova-ks-service
      services:
      - endpoint: internal
        service: identity
    ks_service:
      services:
      - endpoint: internal
        service: identity
    ks_user:
      services:
      - endpoint: internal
        service: identity
    novncproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    rabbit_init:
      services:
      - endpoint: internal
        service: oslo_messaging
    scheduler:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    service_cleaner:
      jobs:
      - nova-db-sync
      - nova-rabbit-init
      services:
      - endpoint: internal
        service: oslo_messaging
      - endpoint: internal
        service: oslo_db
      - endpoint: internal
        service: identity
      - endpoint: internal
        service: compute
    spiceproxy:
      jobs:
      - nova-db-sync
      services:
      - endpoint: internal
        service: oslo_db
    tests:
      services:
      - endpoint: internal
        service: image
      - endpoint: internal
        service: compute
      - endpoint: internal
        service: network
      - endpoint: internal
        service: compute_metadata
endpoints:
  baremetal:
    host_fqdn_override:
      default: null
      public:
        host: baremetal.199-19-213-133.nip.io
    hosts:
      default: ironic-api
      public: ironic
    name: ironic
    path:
      default: null
    port:
      api:
        default: 6385
        public: 443
    scheme:
      default: http
      public: https
  cluster_domain_suffix: cluster.local
  compute:
    host_fqdn_override:
      default: null
      public:
        host: compute.199-19-213-133.nip.io
    hosts:
      default: nova-api
      public: nova
    name: nova
    path:
      default: /v2.1
    port:
      api:
        default: 8774
        public: 443
        service: 8774
      novncproxy:
        default: 6080
    scheme:
      default: http
      public: https
      service: http
  compute_metadata:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-metadata
      public: nova-metadata
    ip:
      ingress: null
    name: nova
    path:
      default: /
    port:
      metadata:
        default: 8775
        public: 8775
    scheme:
      default: http
    secret: QguhJvexxKr1WOTvEb2SxMFehhc2iZIH
  compute_novnc_proxy:
    host_fqdn_override:
      default: null
      public:
        host: vnc.199-19-213-133.nip.io
    hosts:
      default: nova-novncproxy
      public: novncproxy
    name: nova
    path:
      default: /vnc_lite.html
    port:
      novnc_proxy:
        default: 6080
        public: 443
    scheme:
      default: http
      public: https
  compute_novnc_vencrypt:
    host_fqdn_override:
      default:
        commonName: nova-novncproxy
        tls:
          commonName: nova-novncproxy
          issuerRef:
            kind: Issuer
            name: libvirt-vnc
          secretName: nova-novncproxy-vencrypt
          usages:
          - client auth
        usages:
        - client auth
    hosts:
      default: nova-novncproxy
  compute_spice_proxy:
    host_fqdn_override:
      default: null
    hosts:
      default: nova-spiceproxy
      public: spiceproxy
    name: nova
    path:
      default: /spice_auto.html
    port:
      spice_proxy:
        default: 6082
        public: 80
    scheme:
      default: http
  fluentd:
    host_fqdn_override:
      default: null
    hosts:
      default: fluentd-logging
    name: fluentd
    namespace: null
    path:
      default: null
    port:
      metrics:
        default: 24220
      service:
        default: 24224
    scheme: http
  identity:
    auth:
      admin:
        password: YJE4RPf1KjWBKz5ENWAZJGbRqSFALY2z
        project_domain_name: default
        project_name: admin
        region_name: RegionOne
        user_domain_name: default
        username: admin-RegionOne
      cinder:
        password: on94HMrCoimMI0O4nHRFOx5ykmeXDnbA
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: cinder-RegionOne
      glance:
        password: 0zQmuiJwxnbD44KBrb9YjZ1nvSEhnWwy
        region_name: RegionOne
        username: glance-RegionOne
      ironic:
        auth_type: password
        auth_version: v3
        password: pcZPwaKRcSnRCCuJz4MP5umy3gLYJorW
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: ironic-RegionOne
      neutron:
        password: jd98bqImTgSMMlFF0ps9VDvIQ1Hfy7Yt
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        user_domain_name: service
        username: neutron-RegionOne
      nova:
        password: AjQQXBYFrZ1oWBtWta24DhPJZrw65Lqf
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin,service
        user_domain_name: service
        username: nova-RegionOne
      placement:
        password: RVB8sdndbPWqmbqdeFXmOZ6e9BlqewST
        project_domain_name: service
        project_name: service
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: placement-RegionOne
      test:
        password: password
        project_domain_name: service
        project_name: test
        region_name: RegionOne
        role: admin
        user_domain_name: service
        username: nova-test
    host_fqdn_override:
      default: null
      public:
        host: identity.199-19-213-133.nip.io
    hosts:
      default: keystone-api
      internal: keystone-api
    name: keystone
    path:
      default: /
    port:
      api:
        default: 5000
        internal: 5000
        public: 443
    scheme:
      default: http
      public: https
  image:
    host_fqdn_override:
      default: null
      public:
        host: image.199-19-213-133.nip.io
    hosts:
      default: glance-api
      public: glance
    name: glance
    path:
      default: null
    port:
      api:
        default: 9292
        public: 443
    scheme:
      default: http
      public: https
  ingress:
    hosts:
      default: ingress
    name: ingress
    namespace: null
    port:
      ingress:
        default: 80
  kube_dns:
    host_fqdn_override:
      default: null
    hosts:
      default: kube-dns
    name: kubernetes-dns
    namespace: kube-system
    path:
      default: null
    port:
      dns:
        default: 53
        protocol: UDP
    scheme: http
  local_image_registry:
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
      internal: docker-registry
      node: localhost
    name: docker-registry
    namespace: docker-registry
    port:
      registry:
        node: 5000
  network:
    host_fqdn_override:
      default: null
      public:
        host: network.199-19-213-133.nip.io
    hosts:
      default: neutron-server
      public: neutron
    name: neutron
    path:
      default: null
    port:
      api:
        default: 9696
        public: 443
    scheme:
      default: http
      public: https
  oci_image_registry:
    auth:
      enabled: false
      nova:
        password: password
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: localhost
    name: oci-image-registry
    namespace: oci-image-registry
    port:
      registry:
        default: null
  oslo_cache:
    auth:
      memcache_secret_key: X1uiRtZWTda3T43LlRCt9vSLDQicVmn7
    host_fqdn_override:
      default: null
    hosts:
      default: memcached
    port:
      memcache:
        default: 11211
  oslo_db:
    auth:
      admin:
        password: jjpXIkNs3z3YCgLNfkoJU2Dbe1bEDrjJ
        secret:
          tls:
            internal: mariadb-tls-direct
        username: root
      cinder:
        password: GWCzWXkLLtF1lfmPjvlVX7NWU37uBatC
      glance:
        password: ZTqo0leQqObf3yp3WUgf3Urv5812rqYk
      ironic:
        password: i90zce3pMPsR8idOEym7HeYibgrz1cRR
      keystone:
        password: 4x3Ba0vi1UScHu6W2LCQJ5NqtVXcrAjn
      neutron:
        password: HYWBK3Bymc8bIICD7HTVAuztwnmuRKMI
      nova:
        password: TZGv0AFFe0sBRCaz36BzcuFj59yeqXXd
        username: nova
      placement:
        password: CtQbPXw95Rl20C9x1Pagimdaza1S85jP
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_api:
    auth:
      admin:
        password: jjpXIkNs3z3YCgLNfkoJU2Dbe1bEDrjJ
        username: root
      nova:
        password: TZGv0AFFe0sBRCaz36BzcuFj59yeqXXd
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_api
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_db_cell0:
    auth:
      admin:
        password: jjpXIkNs3z3YCgLNfkoJU2Dbe1bEDrjJ
        username: root
      nova:
        password: TZGv0AFFe0sBRCaz36BzcuFj59yeqXXd
        username: nova
    host_fqdn_override:
      default: null
    hosts:
      default: percona-xtradb-haproxy
    path: /nova_cell0
    port:
      mysql:
        default: 3306
    scheme: mysql+pymysql
  oslo_messaging:
    auth:
      admin:
        password: krEskp7d2YIapCJsm8S2bNnEPxeceVaM
        secret:
          tls:
            internal: rabbitmq-tls-direct
        username: default_user_0YSPw2hPk4pXwH7Rz1h
      cinder:
        password: fz0KVyZHpaUyDATAr2fYKCzqQIm68nBz
      glance:
        password: XTHy4hJCkFyYrzYqB1A1ACoG40YCng0I
      ironic:
        password: T2KnIxN4EHK8glYIJulgdz24uRKRyPW0
      keystone:
        password: wwXl3JpZFX47DpWSY1T0jRhdY33giINN
      neutron:
        password: vD8wzzXqobDddqrc4BLFBsUobPquv6b6
      nova:
        password: 4fDXqya1VKgt1lxguV094fj7RUt0mkJg
        username: nova
      user:
        password: krEskp7d2YIapCJsm8S2bNnEPxeceVaM
        username: default_user_0YSPw2hPk4pXwH7Rz1h
    host_fqdn_override:
      default: null
    hosts:
      default: rabbitmq-nova
    path: /nova
    port:
      amqp:
        default: 5672
      http:
        default: 15672
    scheme: rabbit
  placement:
    host_fqdn_override:
      default: null
      public:
        host: placement.199-19-213-133.nip.io
    hosts:
      default: placement-api
      public: placement
    name: placement
    path:
      default: /
    port:
      api:
        default: 8778
        public: 443
        service: 8778
    scheme:
      default: http
      public: https
      service: http
  volumev3:
    host_fqdn_override:
      default: null
      public:
        host: volume.199-19-213-133.nip.io
    hosts:
      default: cinder-api
      public: cinder
    name: cinderv3
    path:
      default: /v3/%(tenant_id)s
      healthcheck: /healthcheck
    port:
      api:
        default: 8776
        public: 443
    scheme:
      default: http
      public: https
health_probe:
  logging:
    level: ERROR
helm-toolkit:
  global: {}
helm3_hook: true
images:
  local_registry:
    active: false
    exclude:
    - dep_check
    - image_repo_sync
  pull_policy: IfNotPresent
  tags:
    bootstrap: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    db_drop: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    db_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    dep_check: harbor.atmosphere.dev/ghcr.io/vexxhost/kubernetes-entrypoint:edge@sha256:8921b64b87af184a1421dd856b2703bcf3cff9f50863cd0d18371cf964a87bd3
    image_repo_sync: docker.io/docker:17.07.0
    ks_endpoints: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    ks_service: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    ks_user: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    nova_api: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_archive_deleted_rows: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_cell_setup: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_cell_setup_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    nova_compute: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_compute_ironic: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_compute_ssh: harbor.atmosphere.dev/ghcr.io/vexxhost/nova-ssh:latest@sha256:06b8670a7c16c1dad85a13ab113423dec6b0c08f30a98f354b228aa579c87c84
    nova_conductor: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_db_sync: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_novncproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_novncproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_scheduler: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_service_cleaner: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    nova_spiceproxy: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_spiceproxy_assets: harbor.atmosphere.dev/ghcr.io/vexxhost/nova:zed@sha256:3e92460f1841f86b40ec7e2eff2f5d979f62fa24107382bfd1737591e1276f95
    nova_storage_init: harbor.atmosphere.dev/ghcr.io/vexxhost/heat:zed@sha256:7634d0f354cdd12bf065617f3ffb614e9efbd57bba6f1de710ef95780e1f5bee
    nova_wait_for_computes_init: gcr.io/google_containers/hyperkube-amd64:v1.11.6
    rabbit_init: harbor.atmosphere.dev/docker.io/library/rabbitmq:3.10.2-management
    test: docker.io/xrally/xrally-openstack:2.0.0
jobs:
  archive_deleted_rows:
    cron: 0 */1 * * *
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  cell_setup:
    cron: 0 */1 * * *
    extended_wait:
      duration: 5
      enabled: false
      iteration: 3
    extra_command: null
    history:
      failed: 1
      success: 3
    starting_deadline: 600
  service_cleaner:
    cron: 0 */1 * * *
    extra_command: null
    history:
      failed: 1
      success: 3
    sleep_time: 60
    starting_deadline: 600
labels:
  agent:
    compute:
      node_selector_key: openstack-compute-node
      node_selector_value: enabled
    compute_ironic:
      node_selector_key: openstack-control-plane
      node_selector_value: enabled
  api_metadata:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  conductor:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  job:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  novncproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  osapi:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  scheduler:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  spiceproxy:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
  test:
    node_selector_key: openstack-control-plane
    node_selector_value: enabled
manifests:
  certificates: false
  configmap_bin: true
  configmap_etc: true
  cron_job_archive_deleted_rows: false
  cron_job_cell_setup: true
  cron_job_service_cleaner: true
  daemonset_compute: true
  deployment_api_metadata: true
  deployment_api_osapi: true
  deployment_conductor: true
  deployment_consoleauth: false
  deployment_novncproxy: true
  deployment_placement: false
  deployment_scheduler: true
  deployment_spiceproxy: true
  ingress_metadata: false
  ingress_novncproxy: false
  ingress_osapi: false
  ingress_placement: false
  ingress_spiceproxy: false
  job_bootstrap: true
  job_cell_setup: true
  job_db_drop: false
  job_db_init: true
  job_db_init_placement: false
  job_db_sync: true
  job_image_repo_sync: true
  job_ks_endpoints: true
  job_ks_placement_endpoints: false
  job_ks_placement_service: false
  job_ks_placement_user: false
  job_ks_service: true
  job_ks_user: true
  job_rabbit_init: true
  job_storage_init: false
  network_policy: false
  pdb_metadata: true
  pdb_osapi: true
  pod_rally_test: true
  secret_db: true
  secret_db_api: true
  secret_db_cell0: true
  secret_ingress_tls: true
  secret_keystone: true
  secret_keystone_placement: false
  secret_rabbitmq: true
  secret_registry: true
  service_ingress_metadata: false
  service_ingress_novncproxy: false
  service_ingress_osapi: false
  service_ingress_placement: false
  service_ingress_spiceproxy: false
  service_metadata: true
  service_novncproxy: true
  service_osapi: true
  service_placement: false
  service_spiceproxy: true
  statefulset_compute_ironic: false
network:
  backend:
  - ovn
  metadata:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30775
    port: 8775
  novncproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30680
  osapi:
    external_policy_local: false
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30774
    port: 8774
  spiceproxy:
    ingress:
      annotations:
        nginx.ingress.kubernetes.io/rewrite-target: /
      classes:
        cluster: nginx-cluster
        namespace: nginx
      public: true
    node_port:
      enabled: false
      port: 30682
  ssh:
    enabled: true
    from_subnet: 0.0.0.0/0
    key_types:
    - rsa
    - dsa
    - ecdsa
    - ed25519
    port: 8022
    private_key: |+
      -----BEGIN OPENSSH PRIVATE KEY-----
      b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
      NhAAAAAwEAAQAAAgEAua8faaajsi8zz+BopzR5pgb4GXRAcJjqB4ao7G3rpB+gdhLMr4qN
      9RqQT0kxt6Np9vtLNO0U9TFBZwGSJVJAda7764ljbj9sFfa59EFdJuFb/LQO66W1ox6eql
      LiOtagvoYWXqwPLONKRWvfQIm/PkETJoVrkOmSnqOVilcFJQx7ptFfQyVvpfy5G5n+RjOH
      BT7vDDc4CvjUIHGq6oW20qBcxfhKaYXApdpct2P/djTE3hvLEGePEznqD/4+FvGOe8njeb
      C3/OLUGYN0qSy86m6ESX6aoQ3S2mDLXASEFRFFQQyYNWRQm0N3ebHu6Z4tC6nPG9VfFpmt
      Ar0ZSi9xwmOJnAOVFBq52s9PgQ5mYwUBy4f87/efTb1eXg+eCNXQdhh+v6JdYCh6AJhyEA
      v8noko6DLfPbrIHLqbabvS0qI7mRWIS2Xfxqmv4foOUT9yKyjz2XZa4UVRofed23wRc3cO
      IiCRLvFTq4MNTVdKdw8+7iCVLraim1PoTbxLHRhauUuiC3G8Tstj90k9tykUPn1G9SeWqv
      3P872Rzs78CH0TJfUJxPrpV5BxWlyEEzT6QI0h3zOwU9CvTbz+GkjlHWrycVfWAEDuDfxD
      GfSAt4DNanq8PRDtIpegaVSdi079wbxvlxdNWU+i1f/MTEmI0YvqRQUtsc9TWzQUKaYocA
      8AAAc4xfvc2sX73NoAAAAHc3NoLXJzYQAAAgEAua8faaajsi8zz+BopzR5pgb4GXRAcJjq
      B4ao7G3rpB+gdhLMr4qN9RqQT0kxt6Np9vtLNO0U9TFBZwGSJVJAda7764ljbj9sFfa59E
      FdJuFb/LQO66W1ox6eqlLiOtagvoYWXqwPLONKRWvfQIm/PkETJoVrkOmSnqOVilcFJQx7
      ptFfQyVvpfy5G5n+RjOHBT7vDDc4CvjUIHGq6oW20qBcxfhKaYXApdpct2P/djTE3hvLEG
      ePEznqD/4+FvGOe8njebC3/OLUGYN0qSy86m6ESX6aoQ3S2mDLXASEFRFFQQyYNWRQm0N3
      ebHu6Z4tC6nPG9VfFpmtAr0ZSi9xwmOJnAOVFBq52s9PgQ5mYwUBy4f87/efTb1eXg+eCN
      XQdhh+v6JdYCh6AJhyEAv8noko6DLfPbrIHLqbabvS0qI7mRWIS2Xfxqmv4foOUT9yKyjz
      2XZa4UVRofed23wRc3cOIiCRLvFTq4MNTVdKdw8+7iCVLraim1PoTbxLHRhauUuiC3G8Ts
      tj90k9tykUPn1G9SeWqv3P872Rzs78CH0TJfUJxPrpV5BxWlyEEzT6QI0h3zOwU9CvTbz+
      GkjlHWrycVfWAEDuDfxDGfSAt4DNanq8PRDtIpegaVSdi079wbxvlxdNWU+i1f/MTEmI0Y
      vqRQUtsc9TWzQUKaYocA8AAAADAQABAAACAA5MUGBrzwYdAWHb92MMtONWfQtEuuOQutCq
      i15tVmxUbSA6cV2BD05U5dDdI4FvCW1Vy2Q/b1I0IUOjyNOr4j/NoiNweGIYayPD1GzdDa
      W9S76JRO7bW0G8zV3zNO1v8n/JRnayzBZaSDFRTackFEBgdW+NbYuCmOIzWYQGEGTacbkW
      PT4n51GCROS9tmlEdXyvbGvl4qBhqb/30L/vXPUaGRq7dEts27lJt8L9ZcH97TznKftQ5i
      EGYCIsIy7UGfMpSCVqOLi15Lw0NasDQWbR7MjWAWUucdKUCoTC6kLPCiSoqh3OZDCYgN4z
      l+CQaAqSYFrV0tUkL0DbO9PNFHbQWc0ma2k0183sp+GmQaTTZ9VBTuLH2e8C8W8Ndbqkn6
      vgYCDW8XngoWffdmOhQXPY7+vSDu6KdgNsGxqTUR0TH4g2uOYPWuM/DAPA678Ft7YJscsU
      Pd29Iv08vfbNjC0Wcz8M6rT9tDS8yt2Qbm2OJZlv20zdTZxM9NdErjtCFFUdyY9u8uYn4j
      P8WrjFLyuTJbgHk6APG7TXlJoAC8IL5JGnWGenqOHNR8igW0IQ2ZIZ11GNJIpxxabSY1Ap
      iWjWIaYbG7hQD1VjPg6/wvualmZkY52EjsJlyELT+mVWgzlMFNVH7Zy5Qiw+JG2ZhngEoq
      C1l7LU8Jbm4Krl4/gZAAABAQCocnwEYktf2LhfeOP8z31aGcKLneEOhSN/CHAiuO0wImEG
      HrbzcPG53OZxr0OzRQ3UM5p8wV9v2dhjljQxrvuljRSBE7dWcScJNERwgZlEsSFtGHIfXz
      eLm1l8LS4E/wlfnVq7dPx0cBNE6E2SD6eLJeKR4a1omdiEYC/1xiaF7TuvmfYJ/G5wFA9q
      dJrZc/m6jn9dwC1eEsX/XV/ptjSv/Swm3gFmW7B7myL1o1YedrwMBeElFMWvEilE+dPrik
      V/NC4B0t9i9HvqOp1xCOhQURuZSWhmsJSwMl3S74lweBXqiSHHr9nxqz4qigFJlZ16kEEQ
      ik+ow1dgyGvElJBEAAABAQDo5t2/x61tKN/gKX+p7uOqnfmlR6In+MJ1ghVltoqKT2xatc
      g32fTfn1Ed4LQ7Spo94upv1lGYNgDCnldNgctGzxBah9/kdqZmTvdNkHSCnAaKwWkGqSvr
      CN9GT4IddzP8ekfs41Umbdd/k9ziphCfFqaC9LIW7YZEk1prd2XMoEwyJ58TAcjgwmUu0U
      30O62xG++xDD5+NRF9VTKHXXN5J5GHYCHqqtv0mA+HkKEnywjDBIl17UbSV8gbBTRlUzLY
      PoVP2ExzoXVryHZcK8Rmr8tV3nzw6qVsclvjr/DQXhT/Ir2+OFrsQNVrANLrCzWGGpWVfv
      WLyMiaEx6igj03AAABAQDMGXJ9tJ0sLvWvkgqEO69J66lysXGwd5of/anJzEw70CYSGFhh
      I1LUA0STYs+R1sOfAxfP1LfQdt/GC5iv7IQpc4R7VP60uZNrak/AyG+Fh+g7FSe3MsmRVp
      CjMYIAlC3YJ3ixpkwC/kG9ImwacqLtf7sgSnDGCKbZc4c2eUVLMORbApeQjV6jd3KSnQ2S
      FmPjDpRF/Gc8emUW/2CdqrOSSHx2sHW8CIkLJGZyPVxImwsTua9NzM+/qYt25athrT1pAU
      U/xooW0oVpHeIGefZyTnlWOXJMlfVKfxxRVqVHbaQZpPIQkkjjoQgzKOC68Hs4vCmuG7qV
      MT+PpukMe4/pAAAAAAEC
      -----END OPENSSH PRIVATE KEY-----

    public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC5rx9ppqOyLzPP4GinNHmmBvgZdEBwmOoHhqjsbeukH6B2Esyvio31GpBPSTG3o2n2+0s07RT1MUFnAZIlUkB1rvvriWNuP2wV9rn0QV0m4Vv8tA7rpbWjHp6qUuI61qC+hhZerA8s40pFa99Aib8+QRMmhWuQ6ZKeo5WKVwUlDHum0V9DJW+l/Lkbmf5GM4cFPu8MNzgK+NQgcarqhbbSoFzF+EpphcCl2ly3Y/92NMTeG8sQZ48TOeoP/j4W8Y57yeN5sLf84tQZg3SpLLzqboRJfpqhDdLaYMtcBIQVEUVBDJg1ZFCbQ3d5se7pni0Lqc8b1V8Wma0CvRlKL3HCY4mcA5UUGrnaz0+BDmZjBQHLh/zv959NvV5eD54I1dB2GH6/ol1gKHoAmHIQC/yeiSjoMt89usgcuptpu9LSojuZFYhLZd/Gqa/h+g5RP3IrKPPZdlrhRVGh953bfBFzdw4iIJEu8VOrgw1NV0p3Dz7uIJUutqKbU+hNvEsdGFq5S6ILcbxOy2P3ST23KRQ+fUb1J5aq/c/zvZHOzvwIfRMl9QnE+ulXkHFaXIQTNPpAjSHfM7BT0K9NvP4aSOUdavJxV9YAQO4N/EMZ9IC3gM1qerw9EO0il6BpVJ2LTv3BvG+XF01ZT6LV/8xMSYjRi+pFBS2xz1NbNBQppihwDw==
network_policy:
  nova:
    egress:
    - {}
    ingress:
    - {}
pod:
  affinity:
    anti:
      topologyKey:
        default: kubernetes.io/hostname
      type:
        default: preferredDuringSchedulingIgnoredDuringExecution
      weight:
        default: 10
  labels:
    include_app_kubernetes_io: false
  lifecycle:
    disruption_budget:
      metadata:
        min_available: 0
      osapi:
        min_available: 0
    termination_grace_period:
      metadata:
        timeout: 30
      osapi:
        timeout: 30
    upgrades:
      daemonsets:
        compute:
          enabled: true
          max_unavailable: 1
          min_ready_seconds: 0
        pod_replacement_strategy: RollingUpdate
      deployments:
        pod_replacement_strategy: RollingUpdate
        revision_history: 3
        rolling_update:
          max_surge: 3
          max_unavailable: 1
  mounts:
    nova_api_metadata:
      init_container: null
      nova_api_metadata:
        volumeMounts: null
        volumes: null
    nova_api_osapi:
      init_container: null
      nova_api_osapi:
        volumeMounts: null
        volumes: null
    nova_bootstrap:
      init_container: null
      nova_bootstrap:
        volumeMounts: null
        volumes: null
    nova_compute:
      init_container: null
      nova_compute:
        volumeMounts: null
        volumes: null
    nova_compute_ironic:
      init_container: null
      nova_compute_ironic:
        volumeMounts: null
        volumes: null
    nova_conductor:
      init_container: null
      nova_conductor:
        volumeMounts: null
        volumes: null
    nova_db_sync:
      nova_db_sync:
        volumeMounts: null
        volumes: null
    nova_novncproxy:
      init_novncproxy: null
      nova_novncproxy:
        volumeMounts: null
        volumes: null
    nova_scheduler:
      init_container: null
      nova_scheduler:
        volumeMounts: null
        volumes: null
    nova_spiceproxy:
      init_spiceproxy: null
      nova_spiceproxy:
        volumeMounts: null
        volumes: null
    nova_tests:
      init_container: null
      nova_tests:
        volumeMounts: null
        volumes: null
  probes:
    api-metadata:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    api-osapi:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 5
            periodSeconds: 10
            timeoutSeconds: 5
    compute:
      default:
        liveness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            periodSeconds: 90
            timeoutSeconds: 70
        startup:
          enabled: true
          params:
            failureThreshold: 120
            periodSeconds: 10
            successThreshold: 1
            timeoutSeconds: 70
    compute-spice-proxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    conductor:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
    novncproxy:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 30
            periodSeconds: 60
            timeoutSeconds: 15
    rpc_retries: 2
    rpc_timeout: 60
    scheduler:
      default:
        liveness:
          enabled: true
          params:
            initialDelaySeconds: 120
            periodSeconds: 90
            timeoutSeconds: 70
        readiness:
          enabled: true
          params:
            initialDelaySeconds: 80
            periodSeconds: 90
            timeoutSeconds: 70
  replicas:
    api_metadata: 1
    compute_ironic: 1
    conductor: 1
    novncproxy: 1
    osapi: 1
    scheduler: 1
    spiceproxy: 1
  resources:
    api:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    api_metadata:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    compute_ironic:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    conductor:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    enabled: false
    jobs:
      archive_deleted_rows:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      bootstrap:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      cell_setup:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_drop:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      db_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      image_repo_sync:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_endpoints:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_service:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      ks_user:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      rabbit_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      service_cleaner:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      storage_init:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
      tests:
        limits:
          cpu: 2000m
          memory: 1024Mi
        requests:
          cpu: 100m
          memory: 128Mi
    novncproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    scheduler:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    spiceproxy:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
    ssh:
      limits:
        cpu: 2000m
        memory: 1024Mi
      requests:
        cpu: 100m
        memory: 128Mi
  security_context:
    archive_deleted_rows:
      container:
        nova_archive_deleted_rows:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_archive_deleted_rows_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    bootstrap:
      container:
        bootstrap:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova:
      container:
        ceph_perms:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_api:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_api_metadata_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute:
          privileged: true
          readOnlyRootFilesystem: true
        nova_compute_init:
          readOnlyRootFilesystem: true
          runAsUser: 0
        nova_compute_spice_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_compute_ssh:
          privileged: true
          runAsUser: 0
        nova_compute_ssh_init:
          runAsUser: 0
        nova_compute_vnc_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_conductor:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_novncproxy_init_assests:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_osapi:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_scheduler:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_spiceproxy_init_assets:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        tungstenfabric_compute_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    nova_cell_setup:
      container:
        nova_cell_setup:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_cell_setup_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
        nova_wait_for_computes_init:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
    service_cleaner:
      container:
        nova_service_cleaner:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
      pod:
        runAsUser: 42424
  tolerations:
    nova:
      enabled: false
      tolerations:
      - effect: NoSchedule
        key: node-role.kubernetes.io/master
        operator: Exists
      - effect: NoSchedule
        key: node-role.kubernetes.io/control-plane
        operator: Exists
  use_fqdn:
    compute: true
  useHostNetwork:
    novncproxy: false
rbd_pool:
  app_name: nova-vms
  chunk_size: 8
  crush_rule: replicated_rule
  replication: 3
release_group: null
secrets:
  identity:
    admin: nova-keystone-admin
    nova: nova-keystone-user
    test: nova-keystone-test
  oci_image_registry:
    nova: nova-oci-image-registry
  oslo_db:
    admin: nova-db-admin
    nova: nova-db-user
  oslo_db_api:
    admin: nova-db-api-admin
    nova: nova-db-api-user
  oslo_db_cell0:
    admin: nova-db-cell0-admin
    nova: nova-db-cell0-user
  oslo_messaging:
    admin: nova-rabbitmq-admin
    nova: nova-rabbitmq-user
  tls:
    compute:
      osapi:
        internal: nova-tls-api
        public: nova-tls-public
    compute_metadata:
      metadata:
        internal: metadata-tls-metadata
        public: metadata-tls-public
    compute_novnc_proxy:
      novncproxy:
        internal: nova-novncproxy-tls-proxy
        public: nova-novncproxy-tls-public
      vencrypt:
        internal: nova-novncproxy-vencrypt
    compute_spice_proxy:
      spiceproxy:
        internal: nova-spiceproxy-tls-proxy
        public: nova-spiceproxy-tls-public
tls:
  identity: false
  oslo_db: false
  oslo_messaging: false
