all:
  children:
    tempest:
      hosts:
        controller: null
    zuul_unreachable:
      hosts: {}
  hosts:
    controller:
      ansible_connection: ssh
      ansible_host: 199.204.45.44
      ansible_port: 22
      ansible_python_interpreter: auto
      ansible_user: zuul
      configure_swap_size: 8192
      devstack_local_conf:
        post-config:
          $NEUTRON_CONF:
            DEFAULT:
              global_physnet_mtu: '{{ external_bridge_mtu }}'
          /etc/magnum/magnum.conf:
            cluster_template:
              kubernetes_allowed_network_drivers: calico,cilium
              kubernetes_default_network_driver: calico
            nova_client:
              api_version: 2.15
          /etc/manila/manila.conf:
            generic:
              connect_share_server_to_tenant_network: true
              driver_handles_share_servers: true
      devstack_localrc:
        ADMIN_PASSWORD: secretadmin
        DATABASE_PASSWORD: secretdatabase
        DEBUG_LIBVIRT_COREDUMPS: true
        DISABLE_AMP_IMAGE_BUILD: true
        ENABLE_SYSCTL_MEM_TUNING: true
        ENABLE_SYSCTL_NET_TUNING: true
        ENABLE_ZSWAP: true
        ERROR_ON_CLONE: true
        FIXED_RANGE: 10.1.0.0/20
        FLOATING_RANGE: 172.24.5.0/24
        GIT_BASE: https://github.com
        HOST_IP: '{{ hostvars[''controller''][''nodepool''][''private_ipv4''] }}'
        IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
        LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}'
        LOGFILE: /opt/stack/logs/devstacklog.txt
        LOG_COLOR: false
        MAGNUM_GUEST_IMAGE_URL: '{{ image_url }}'
        MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: snapshot_support=True create_share_from_snapshot_support=True
        MANILA_ENABLED_BACKENDS: generic
        MANILA_USE_SERVICE_INSTANCE_PASSWORD: true
        NETWORK_GATEWAY: 10.1.0.1
        NOVA_LIBVIRT_TB_CACHE_SIZE: 128
        NOVA_VNC_ENABLED: true
        OCTAVIA_NODE: api
        OVN_DBS_LOG_LEVEL: dbg
        PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
        PUBLIC_NETWORK_GATEWAY: 172.24.5.1
        RABBIT_PASSWORD: secretrabbit
        SERVICE_HOST: '{{ hostvars[''controller''][''nodepool''][''private_ipv4'']
          }}'
        SERVICE_PASSWORD: secretservice
        SWIFT_HASH: 1234123412341234
        SWIFT_REPLICAS: 1
        SWIFT_START_ALL_SERVICES: false
        VERBOSE: true
        VERBOSE_NO_TIMESTAMP: true
      devstack_plugins:
        barbican: https://github.com/openstack/barbican
        magnum: https://review.opendev.org/openstack/magnum
        magnum-cluster-api: https://github.com/vexxhost/magnum-cluster-api
        manila: https://github.com/openstack/manila
        octavia: https://github.com/openstack/octavia
        ovn-octavia-provider: https://github.com/openstack/ovn-octavia-provider
      devstack_services:
        base: false
        c-api: true
        c-bak: true
        c-sch: true
        c-vol: true
        dstat: false
        etcd3: true
        file_tracker: true
        g-api: true
        horizon: false
        key: true
        memory_tracker: true
        mysql: true
        n-api: true
        n-api-meta: true
        n-cond: true
        n-cpu: true
        n-novnc: true
        n-sch: true
        o-api: true
        o-da: true
        o-hk: true
        octavia: true
        openstack-cli-server: true
        ovn-controller: true
        ovn-northd: true
        ovs-vswitchd: true
        ovsdb-server: true
        placement-api: true
        q-ovn-agent: true
        q-svc: true
        rabbit: true
        s-account: false
        s-container: false
        s-object: false
        s-proxy: false
        tempest: false
        tls-proxy: true
      extensions_to_txt:
        auto: true
        conf: true
        localrc: true
        log: true
        stackenv: true
      image_url: https://github.com/vexxhost/capo-image-elements/releases/latest/download/ubuntu-22.04-{{
        kube_tag }}.qcow2
      kube_tag: v1.33.9
      network_driver: calico
      nodepool:
        az: nova
        cloud: public
        external_id: 9368bdd2-f956-4a7d-b44d-70ceef402076
        host_id: a14e37c14509a0e10156ccf8c706cd5613db7e363735e5577c330644
        interface_ip: 199.204.45.44
        label: ubuntu-noble-16
        node_properties: {}
        private_ipv4: 199.204.45.44
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.44
        public_ipv6: 2604:e100:1:0:f816:3eff:fe1d:b355
        region: ca-ymq-1
        slot: null
      zuul_copy_output:
        /etc/ceph: logs
        /etc/glusterfs/glusterd.vol: logs
        /etc/libvirt: logs
        /etc/lvm: logs
        /etc/resolv.conf: logs
        /etc/sudoers: logs
        /etc/sudoers.d: logs
        /var/log/ceph: logs
        /var/log/glusterfs: logs
        /var/log/libvirt: logs
        /var/log/mysql: logs
        /var/log/openvswitch: logs
        /var/log/postgresql: logs
        /var/log/rabbitmq: logs
        /var/log/unbound.log: logs
        '{{ devstack_conf_dir }}/.localrc.auto': logs
        '{{ devstack_conf_dir }}/.stackenv': logs
        '{{ devstack_conf_dir }}/local.conf': logs
        '{{ devstack_conf_dir }}/localrc': logs
        '{{ devstack_full_log}}': logs
        '{{ devstack_log_dir }}/atop': logs
        '{{ devstack_log_dir }}/devstacklog.txt': logs
        '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
        '{{ devstack_log_dir }}/dstat-csv.log': logs
        '{{ devstack_log_dir }}/qemu.coredump': logs
        '{{ devstack_log_dir }}/tcpdump.pcap': logs
        '{{ devstack_log_dir }}/worlddump-latest.txt': logs
        '{{ stage_dir }}/apache': logs
        '{{ stage_dir }}/apache_config': logs
        '{{ stage_dir }}/audit.log': logs
        '{{ stage_dir }}/core': logs
        '{{ stage_dir }}/deprecations.log': logs
        '{{ stage_dir }}/df.txt': logs
        '{{ stage_dir }}/dpkg-l.txt': logs
        '{{ stage_dir }}/etc': logs
        '{{ stage_dir }}/iptables.txt': logs
        '{{ stage_dir }}/listen53.txt': logs
        '{{ stage_dir }}/mount.txt': logs
        '{{ stage_dir }}/performance.json': logs
        '{{ stage_dir }}/pip2-freeze.txt': logs
        '{{ stage_dir }}/pip3-freeze.txt': logs
        '{{ stage_dir }}/rpm-qa.txt': logs
        '{{ stage_dir }}/services.txt': logs
        '{{ stage_dir }}/verify_tempest_conf.log': logs
      zuul_node:
        az: nova
        cloud: public
        external_id: 9368bdd2-f956-4a7d-b44d-70ceef402076
        host_id: a14e37c14509a0e10156ccf8c706cd5613db7e363735e5577c330644
        interface_ip: 199.204.45.44
        label: ubuntu-noble-16
        node_properties: {}
        private_ipv4: 199.204.45.44
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.44
        public_ipv6: 2604:e100:1:0:f816:3eff:fe1d:b355
        region: ca-ymq-1
        slot: null
        uuid: null
  vars:
    configure_swap_size: 8192
    devstack_local_conf:
      post-config:
        $NEUTRON_CONF:
          DEFAULT:
            global_physnet_mtu: '{{ external_bridge_mtu }}'
        /etc/magnum/magnum.conf:
          cluster_template:
            kubernetes_allowed_network_drivers: calico,cilium
            kubernetes_default_network_driver: calico
          nova_client:
            api_version: 2.15
        /etc/manila/manila.conf:
          generic:
            connect_share_server_to_tenant_network: true
            driver_handles_share_servers: true
    devstack_localrc:
      ADMIN_PASSWORD: secretadmin
      DATABASE_PASSWORD: secretdatabase
      DEBUG_LIBVIRT_COREDUMPS: true
      DISABLE_AMP_IMAGE_BUILD: true
      ENABLE_SYSCTL_MEM_TUNING: true
      ENABLE_SYSCTL_NET_TUNING: true
      ENABLE_ZSWAP: true
      ERROR_ON_CLONE: true
      FIXED_RANGE: 10.1.0.0/20
      FLOATING_RANGE: 172.24.5.0/24
      GIT_BASE: https://github.com
      HOST_IP: '{{ hostvars[''controller''][''nodepool''][''private_ipv4''] }}'
      IPV4_ADDRS_SAFE_TO_USE: 10.1.0.0/20
      LIBVIRT_TYPE: '{{ devstack_libvirt_type | default("qemu") }}'
      LOGFILE: /opt/stack/logs/devstacklog.txt
      LOG_COLOR: false
      MAGNUM_GUEST_IMAGE_URL: '{{ image_url }}'
      MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: snapshot_support=True create_share_from_snapshot_support=True
      MANILA_ENABLED_BACKENDS: generic
      MANILA_USE_SERVICE_INSTANCE_PASSWORD: true
      NETWORK_GATEWAY: 10.1.0.1
      NOVA_LIBVIRT_TB_CACHE_SIZE: 128
      NOVA_VNC_ENABLED: true
      OCTAVIA_NODE: api
      OVN_DBS_LOG_LEVEL: dbg
      PUBLIC_BRIDGE_MTU: '{{ external_bridge_mtu }}'
      PUBLIC_NETWORK_GATEWAY: 172.24.5.1
      RABBIT_PASSWORD: secretrabbit
      SERVICE_HOST: '{{ hostvars[''controller''][''nodepool''][''private_ipv4''] }}'
      SERVICE_PASSWORD: secretservice
      SWIFT_HASH: 1234123412341234
      SWIFT_REPLICAS: 1
      SWIFT_START_ALL_SERVICES: false
      VERBOSE: true
      VERBOSE_NO_TIMESTAMP: true
    devstack_plugins:
      barbican: https://github.com/openstack/barbican
      magnum: https://review.opendev.org/openstack/magnum
      magnum-cluster-api: https://github.com/vexxhost/magnum-cluster-api
      manila: https://github.com/openstack/manila
      octavia: https://github.com/openstack/octavia
      ovn-octavia-provider: https://github.com/openstack/ovn-octavia-provider
    devstack_services:
      base: false
      c-api: true
      c-bak: true
      c-sch: true
      c-vol: true
      dstat: false
      etcd3: true
      file_tracker: true
      g-api: true
      horizon: false
      key: true
      memory_tracker: true
      mysql: true
      n-api: true
      n-api-meta: true
      n-cond: true
      n-cpu: true
      n-novnc: true
      n-sch: true
      o-api: true
      o-da: true
      o-hk: true
      octavia: true
      openstack-cli-server: true
      ovn-controller: true
      ovn-northd: true
      ovs-vswitchd: true
      ovsdb-server: true
      placement-api: true
      q-ovn-agent: true
      q-svc: true
      rabbit: true
      s-account: false
      s-container: false
      s-object: false
      s-proxy: false
      tempest: false
      tls-proxy: true
    extensions_to_txt:
      auto: true
      conf: true
      localrc: true
      log: true
      stackenv: true
    image_url: https://github.com/vexxhost/capo-image-elements/releases/latest/download/ubuntu-22.04-{{
      kube_tag }}.qcow2
    kube_tag: v1.33.9
    network_driver: calico
    zuul:
      _inheritance_path:
      - '<Job base explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/zuul-config/zuul.d/jobs.yaml@main#1>'
      - '<Job openstack-multinode-fips explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/zuul-config/zuul.d/jobs.yaml@main#17>'
      - '<Job devstack-base explicit: None implied: {MatchAny:{ImpliedBranchMatcher:master}}
        source: openstack/devstack/.zuul.yaml@master#426>'
      - '<Job devstack-minimal explicit: None implied: {MatchAny:{ImpliedBranchMatcher:master}}
        source: openstack/devstack/.zuul.yaml@master#558>'
      - '<Job devstack explicit: None implied: {MatchAny:{ImpliedBranchMatcher:master}}
        source: openstack/devstack/.zuul.yaml@master#601>'
      - '<Job magnum-cluster-api-devstack explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/magnum-cluster-api/zuul.d/jobs.yaml@main#1>'
      - '<Job magnum-cluster-api-hydrophone explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/magnum-cluster-api/zuul.d/jobs.yaml@main#60>'
      - '<Job magnum-cluster-api-hydrophone-v1.33.9 explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/magnum-cluster-api/zuul.d/jobs.yaml@main#72>'
      - '<Job magnum-cluster-api-hydrophone-v1.33.9-calico explicit: None implied:
        {MatchAny:{ImpliedBranchMatcher:main}} source: vexxhost/magnum-cluster-api/zuul.d/jobs.yaml@main#78>'
      - '<Job magnum-cluster-api-hydrophone-v1.33.9-calico explicit: None implied:
        None source: vexxhost/magnum-cluster-api/zuul.d/project.yaml@main#1>'
      ansible_version: '9'
      attempts: 1
      branch: main
      build: 441837f343c24f4ea51ca106ad55faaf
      build_refs:
      - branch: main
        change: '909'
        change_message: "fix: add autoscaler labels annotation to MachineDeployment
          for scale-from-zero support\n\nWhen a nodegroup has `min_node_count=0`,
          the cluster autoscaler evaluates scheduling using a synthetic template node.
          Without explicit label hints, this template node lacks the role and nodegroup
          labels, causing `NodeAffinity` predicate failures and preventing scale-up
          from zero.\n\n## Changes\n\n- **`resources.py`**: When autoscaling is enabled,
          adds the `capacity.cluster-autoscaler.kubernetes.io/labels` annotation to
          the `MachineDeployment`, informing the autoscaler of the labels that will
          be present on nodes in this group:\n\n```python\n\"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n    f\"node-role.kubernetes.io/{node_group.role}=,\"\n    f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n),\n```\n\n-
          **`tests/unit/test_resources.py`**: Extends the existing `TestExistingMutateMachineDeployment`
          test to assert the new annotation is correctly set when autoscaling is enabled.\n\n<!--
          START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n> \n> ----\n> \n> *This section details on the original
          issue you should resolve*\n> \n> <issue_title>bug: Nodegroups cannot scale
          up from 0 replicas (autoscaler)</issue_title>\n> <issue_description>Hi,\n>
          I found out a bug in the clusterAPI driver linked to nodegroups and autoscalers.\n>
          **When a nodegroup is defined and has a min_size=0 (specified in the labels),
          the autoscaler fails to schedule pods on the group and nodes fail to create.**\n>
          \n> Commands used to create cluster and nodegroup:\n> ```\n> openstack coe
          cluster create --cluster-template \"noble-latest\" --master-count 1 --node-count
          1 --labels kube_tag=v1.35.2,server_group_policies=affinity,octavia_provider=amphora,boot_volume_size=50,auto_scaling_enabled=true,min_node_count=1,max_node_count=3
          cluster-noble\n> openstack coe nodegroup create cluster-noble mini-nodes
          --node-count 1 --flavor m2.mini --labels auto_scaling_enabled=true,min_node_count=0,max_node_count=2
          --role mini-node\n> ```\n> \n> Versions used for the CAPI controller:\n>
          k3s: v1.33.9+k3s1\n> Clusterctl: v1.10.10\n> kubeadm: v1.10.10\n> openstack:
          v0.12.7\n> \n> Example of error on the autoscaler:\n> ```\n> predicate \"NodeAffinity\"
          didn't pass\n> nodeName: \"template-node-for-MachineDeployment/magnum-system/kube-v0ane-mini-nodes-kcvcq-...\"\n>
          ```\n> As it can be seen the autoscaler uses a template to see if it is
          possible to schedule pods on the specific type of node.\n> This is linked
          on how the autoscaler tries to create the node and how it compares the required
          resources on the template rather than with a real node.\n> In the template
          of the node it is not specified the role/nodegroup, therefore, whenever
          it's tried to select the node using the role or the nodegroup, the creation
          fails.\n> \n> *TEMPORARY FIX*\n> To fix the above I did the following:\n>
          ```\n> kubectl annotate machinedeployment -n magnum-system kube-v0ane-mini-nodes-kcvcq
          \\\n>   \"capacity.cluster-autoscaler.kubernetes.io/labels=node-role.kubernetes.io/mini-node=,node.cluster.x-k8s.io/nodegroup=mini-nodes\"
          \\\n>   --overwrite\n> ```\n> The fix works fine for the newly created nodegroup
          but it won't work for future nodegroups.\n> \n> *PERMANENT FIX*\n> Edit
          the block below in file \"/var/lib/kolla/venv/lib/python3.12/site-packages/magnum_cluster_api/resources.py\"
          of the magnum_conductor:\n> ```\n>         machine_deployment[\"replicas\"]
          = None\n>         machine_deployment[\"metadata\"][\"annotations\"] = {\n>
          \            AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>         }\n>
          \    else:\n>         machine_deployment[\"replicas\"] = node_group.node_count\n>
          \        machine_deployment[\"metadata\"][\"annotations\"] = {}\n> ```\n>
          to:\n> ```\n>         machine_deployment[\"replicas\"] = None\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {\n>             AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n>                 f\"node-role.kubernetes.io/{node_group.role}=,\"\n>
          \                f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n>
          \            ),\n>         }\n>     else:\n>         machine_deployment[\"replicas\"]
          = node_group.node_count\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {}\n> ```\n> With the above addition after restarting the container autoscaler
          works as intended and it is possible to create nodes with min_size=0.\n>
          \n> I would create a PR for this but I don't know if it was brought up before
          or not. :)</issue_description>\n> \n> ## Comments on the Issue (you are
          @copilot in this section)\n> \n> <comments>\n> </comments>\n> \n\n\n</details>\n\n\n\n<!--
          START COPILOT CODING AGENT SUFFIX -->\n\n- Fixes vexxhost/magnum-cluster-api#907\n\n<!--
          START COPILOT CODING AGENT TIPS -->\n---\n\n\U0001F512 GitHub Advanced Security
          automatically protects Copilot coding agent pull requests. You can protect
          all pull requests by enabling Advanced Security for your repositories. [Learn
          more about Advanced Security.](https://gh.io/cca-advanced-security)"
        change_url: https://github.com/vexxhost/magnum-cluster-api/pull/909
        commit_id: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        patchset: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/magnum-cluster-api
          name: vexxhost/magnum-cluster-api
          short_name: magnum-cluster-api
          src_dir: src/github.com/vexxhost/magnum-cluster-api
        src_dir: src/github.com/vexxhost/magnum-cluster-api
        topic: null
      buildset: 234d5740330445fbaf1e5546861bbc28
      buildset_refs:
      - branch: main
        change: '909'
        change_message: "fix: add autoscaler labels annotation to MachineDeployment
          for scale-from-zero support\n\nWhen a nodegroup has `min_node_count=0`,
          the cluster autoscaler evaluates scheduling using a synthetic template node.
          Without explicit label hints, this template node lacks the role and nodegroup
          labels, causing `NodeAffinity` predicate failures and preventing scale-up
          from zero.\n\n## Changes\n\n- **`resources.py`**: When autoscaling is enabled,
          adds the `capacity.cluster-autoscaler.kubernetes.io/labels` annotation to
          the `MachineDeployment`, informing the autoscaler of the labels that will
          be present on nodes in this group:\n\n```python\n\"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n    f\"node-role.kubernetes.io/{node_group.role}=,\"\n    f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n),\n```\n\n-
          **`tests/unit/test_resources.py`**: Extends the existing `TestExistingMutateMachineDeployment`
          test to assert the new annotation is correctly set when autoscaling is enabled.\n\n<!--
          START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n> \n> ----\n> \n> *This section details on the original
          issue you should resolve*\n> \n> <issue_title>bug: Nodegroups cannot scale
          up from 0 replicas (autoscaler)</issue_title>\n> <issue_description>Hi,\n>
          I found out a bug in the clusterAPI driver linked to nodegroups and autoscalers.\n>
          **When a nodegroup is defined and has a min_size=0 (specified in the labels),
          the autoscaler fails to schedule pods on the group and nodes fail to create.**\n>
          \n> Commands used to create cluster and nodegroup:\n> ```\n> openstack coe
          cluster create --cluster-template \"noble-latest\" --master-count 1 --node-count
          1 --labels kube_tag=v1.35.2,server_group_policies=affinity,octavia_provider=amphora,boot_volume_size=50,auto_scaling_enabled=true,min_node_count=1,max_node_count=3
          cluster-noble\n> openstack coe nodegroup create cluster-noble mini-nodes
          --node-count 1 --flavor m2.mini --labels auto_scaling_enabled=true,min_node_count=0,max_node_count=2
          --role mini-node\n> ```\n> \n> Versions used for the CAPI controller:\n>
          k3s: v1.33.9+k3s1\n> Clusterctl: v1.10.10\n> kubeadm: v1.10.10\n> openstack:
          v0.12.7\n> \n> Example of error on the autoscaler:\n> ```\n> predicate \"NodeAffinity\"
          didn't pass\n> nodeName: \"template-node-for-MachineDeployment/magnum-system/kube-v0ane-mini-nodes-kcvcq-...\"\n>
          ```\n> As it can be seen the autoscaler uses a template to see if it is
          possible to schedule pods on the specific type of node.\n> This is linked
          on how the autoscaler tries to create the node and how it compares the required
          resources on the template rather than with a real node.\n> In the template
          of the node it is not specified the role/nodegroup, therefore, whenever
          it's tried to select the node using the role or the nodegroup, the creation
          fails.\n> \n> *TEMPORARY FIX*\n> To fix the above I did the following:\n>
          ```\n> kubectl annotate machinedeployment -n magnum-system kube-v0ane-mini-nodes-kcvcq
          \\\n>   \"capacity.cluster-autoscaler.kubernetes.io/labels=node-role.kubernetes.io/mini-node=,node.cluster.x-k8s.io/nodegroup=mini-nodes\"
          \\\n>   --overwrite\n> ```\n> The fix works fine for the newly created nodegroup
          but it won't work for future nodegroups.\n> \n> *PERMANENT FIX*\n> Edit
          the block below in file \"/var/lib/kolla/venv/lib/python3.12/site-packages/magnum_cluster_api/resources.py\"
          of the magnum_conductor:\n> ```\n>         machine_deployment[\"replicas\"]
          = None\n>         machine_deployment[\"metadata\"][\"annotations\"] = {\n>
          \            AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>         }\n>
          \    else:\n>         machine_deployment[\"replicas\"] = node_group.node_count\n>
          \        machine_deployment[\"metadata\"][\"annotations\"] = {}\n> ```\n>
          to:\n> ```\n>         machine_deployment[\"replicas\"] = None\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {\n>             AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n>                 f\"node-role.kubernetes.io/{node_group.role}=,\"\n>
          \                f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n>
          \            ),\n>         }\n>     else:\n>         machine_deployment[\"replicas\"]
          = node_group.node_count\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {}\n> ```\n> With the above addition after restarting the container autoscaler
          works as intended and it is possible to create nodes with min_size=0.\n>
          \n> I would create a PR for this but I don't know if it was brought up before
          or not. :)</issue_description>\n> \n> ## Comments on the Issue (you are
          @copilot in this section)\n> \n> <comments>\n> </comments>\n> \n\n\n</details>\n\n\n\n<!--
          START COPILOT CODING AGENT SUFFIX -->\n\n- Fixes vexxhost/magnum-cluster-api#907\n\n<!--
          START COPILOT CODING AGENT TIPS -->\n---\n\n\U0001F512 GitHub Advanced Security
          automatically protects Copilot coding agent pull requests. You can protect
          all pull requests by enabling Advanced Security for your repositories. [Learn
          more about Advanced Security.](https://gh.io/cca-advanced-security)"
        change_url: https://github.com/vexxhost/magnum-cluster-api/pull/909
        commit_id: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        patchset: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/magnum-cluster-api
          name: vexxhost/magnum-cluster-api
          short_name: magnum-cluster-api
          src_dir: src/github.com/vexxhost/magnum-cluster-api
        src_dir: src/github.com/vexxhost/magnum-cluster-api
        topic: null
      change: '909'
      change_message: "fix: add autoscaler labels annotation to MachineDeployment
        for scale-from-zero support\n\nWhen a nodegroup has `min_node_count=0`, the
        cluster autoscaler evaluates scheduling using a synthetic template node. Without
        explicit label hints, this template node lacks the role and nodegroup labels,
        causing `NodeAffinity` predicate failures and preventing scale-up from zero.\n\n##
        Changes\n\n- **`resources.py`**: When autoscaling is enabled, adds the `capacity.cluster-autoscaler.kubernetes.io/labels`
        annotation to the `MachineDeployment`, informing the autoscaler of the labels
        that will be present on nodes in this group:\n\n```python\n\"capacity.cluster-autoscaler.kubernetes.io/labels\":
        (\n    f\"node-role.kubernetes.io/{node_group.role}=,\"\n    f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n),\n```\n\n-
        **`tests/unit/test_resources.py`**: Extends the existing `TestExistingMutateMachineDeployment`
        test to assert the new annotation is correctly set when autoscaling is enabled.\n\n<!--
        START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original prompt</summary>\n\n>
        \n> ----\n> \n> *This section details on the original issue you should resolve*\n>
        \n> <issue_title>bug: Nodegroups cannot scale up from 0 replicas (autoscaler)</issue_title>\n>
        <issue_description>Hi,\n> I found out a bug in the clusterAPI driver linked
        to nodegroups and autoscalers.\n> **When a nodegroup is defined and has a
        min_size=0 (specified in the labels), the autoscaler fails to schedule pods
        on the group and nodes fail to create.**\n> \n> Commands used to create cluster
        and nodegroup:\n> ```\n> openstack coe cluster create --cluster-template \"noble-latest\"
        --master-count 1 --node-count 1 --labels kube_tag=v1.35.2,server_group_policies=affinity,octavia_provider=amphora,boot_volume_size=50,auto_scaling_enabled=true,min_node_count=1,max_node_count=3
        cluster-noble\n> openstack coe nodegroup create cluster-noble mini-nodes --node-count
        1 --flavor m2.mini --labels auto_scaling_enabled=true,min_node_count=0,max_node_count=2
        --role mini-node\n> ```\n> \n> Versions used for the CAPI controller:\n> k3s:
        v1.33.9+k3s1\n> Clusterctl: v1.10.10\n> kubeadm: v1.10.10\n> openstack: v0.12.7\n>
        \n> Example of error on the autoscaler:\n> ```\n> predicate \"NodeAffinity\"
        didn't pass\n> nodeName: \"template-node-for-MachineDeployment/magnum-system/kube-v0ane-mini-nodes-kcvcq-...\"\n>
        ```\n> As it can be seen the autoscaler uses a template to see if it is possible
        to schedule pods on the specific type of node.\n> This is linked on how the
        autoscaler tries to create the node and how it compares the required resources
        on the template rather than with a real node.\n> In the template of the node
        it is not specified the role/nodegroup, therefore, whenever it's tried to
        select the node using the role or the nodegroup, the creation fails.\n> \n>
        *TEMPORARY FIX*\n> To fix the above I did the following:\n> ```\n> kubectl
        annotate machinedeployment -n magnum-system kube-v0ane-mini-nodes-kcvcq \\\n>
        \  \"capacity.cluster-autoscaler.kubernetes.io/labels=node-role.kubernetes.io/mini-node=,node.cluster.x-k8s.io/nodegroup=mini-nodes\"
        \\\n>   --overwrite\n> ```\n> The fix works fine for the newly created nodegroup
        but it won't work for future nodegroups.\n> \n> *PERMANENT FIX*\n> Edit the
        block below in file \"/var/lib/kolla/venv/lib/python3.12/site-packages/magnum_cluster_api/resources.py\"
        of the magnum_conductor:\n> ```\n>         machine_deployment[\"replicas\"]
        = None\n>         machine_deployment[\"metadata\"][\"annotations\"] = {\n>
        \            AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
        \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
        \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
        f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
        str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
        str(\n>                 boot_volume_size\n>             ),\n>         }\n>
        \    else:\n>         machine_deployment[\"replicas\"] = node_group.node_count\n>
        \        machine_deployment[\"metadata\"][\"annotations\"] = {}\n> ```\n>
        to:\n> ```\n>         machine_deployment[\"replicas\"] = None\n>         machine_deployment[\"metadata\"][\"annotations\"]
        = {\n>             AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
        \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
        \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
        f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
        str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
        str(\n>                 boot_volume_size\n>             ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/labels\":
        (\n>                 f\"node-role.kubernetes.io/{node_group.role}=,\"\n>                 f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n>
        \            ),\n>         }\n>     else:\n>         machine_deployment[\"replicas\"]
        = node_group.node_count\n>         machine_deployment[\"metadata\"][\"annotations\"]
        = {}\n> ```\n> With the above addition after restarting the container autoscaler
        works as intended and it is possible to create nodes with min_size=0.\n> \n>
        I would create a PR for this but I don't know if it was brought up before
        or not. :)</issue_description>\n> \n> ## Comments on the Issue (you are @copilot
        in this section)\n> \n> <comments>\n> </comments>\n> \n\n\n</details>\n\n\n\n<!--
        START COPILOT CODING AGENT SUFFIX -->\n\n- Fixes vexxhost/magnum-cluster-api#907\n\n<!--
        START COPILOT CODING AGENT TIPS -->\n---\n\n\U0001F512 GitHub Advanced Security
        automatically protects Copilot coding agent pull requests. You can protect
        all pull requests by enabling Advanced Security for your repositories. [Learn
        more about Advanced Security.](https://gh.io/cca-advanced-security)"
      change_url: https://github.com/vexxhost/magnum-cluster-api/pull/909
      child_jobs: []
      commit_id: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
      event_id: 4a097be0-287b-11f1-9ee6-1b394afcaa02
      executor:
        hostname: 0a8996d2b663
        inventory_file: /var/lib/zuul/builds/441837f343c24f4ea51ca106ad55faaf/ansible/inventory.yaml
        log_root: /var/lib/zuul/builds/441837f343c24f4ea51ca106ad55faaf/work/logs
        result_data_file: /var/lib/zuul/builds/441837f343c24f4ea51ca106ad55faaf/work/results.json
        src_root: /var/lib/zuul/builds/441837f343c24f4ea51ca106ad55faaf/work/src
        work_root: /var/lib/zuul/builds/441837f343c24f4ea51ca106ad55faaf/work
      include_vars: []
      items:
      - branch: main
        change: '909'
        change_message: "fix: add autoscaler labels annotation to MachineDeployment
          for scale-from-zero support\n\nWhen a nodegroup has `min_node_count=0`,
          the cluster autoscaler evaluates scheduling using a synthetic template node.
          Without explicit label hints, this template node lacks the role and nodegroup
          labels, causing `NodeAffinity` predicate failures and preventing scale-up
          from zero.\n\n## Changes\n\n- **`resources.py`**: When autoscaling is enabled,
          adds the `capacity.cluster-autoscaler.kubernetes.io/labels` annotation to
          the `MachineDeployment`, informing the autoscaler of the labels that will
          be present on nodes in this group:\n\n```python\n\"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n    f\"node-role.kubernetes.io/{node_group.role}=,\"\n    f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n),\n```\n\n-
          **`tests/unit/test_resources.py`**: Extends the existing `TestExistingMutateMachineDeployment`
          test to assert the new annotation is correctly set when autoscaling is enabled.\n\n<!--
          START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n> \n> ----\n> \n> *This section details on the original
          issue you should resolve*\n> \n> <issue_title>bug: Nodegroups cannot scale
          up from 0 replicas (autoscaler)</issue_title>\n> <issue_description>Hi,\n>
          I found out a bug in the clusterAPI driver linked to nodegroups and autoscalers.\n>
          **When a nodegroup is defined and has a min_size=0 (specified in the labels),
          the autoscaler fails to schedule pods on the group and nodes fail to create.**\n>
          \n> Commands used to create cluster and nodegroup:\n> ```\n> openstack coe
          cluster create --cluster-template \"noble-latest\" --master-count 1 --node-count
          1 --labels kube_tag=v1.35.2,server_group_policies=affinity,octavia_provider=amphora,boot_volume_size=50,auto_scaling_enabled=true,min_node_count=1,max_node_count=3
          cluster-noble\n> openstack coe nodegroup create cluster-noble mini-nodes
          --node-count 1 --flavor m2.mini --labels auto_scaling_enabled=true,min_node_count=0,max_node_count=2
          --role mini-node\n> ```\n> \n> Versions used for the CAPI controller:\n>
          k3s: v1.33.9+k3s1\n> Clusterctl: v1.10.10\n> kubeadm: v1.10.10\n> openstack:
          v0.12.7\n> \n> Example of error on the autoscaler:\n> ```\n> predicate \"NodeAffinity\"
          didn't pass\n> nodeName: \"template-node-for-MachineDeployment/magnum-system/kube-v0ane-mini-nodes-kcvcq-...\"\n>
          ```\n> As it can be seen the autoscaler uses a template to see if it is
          possible to schedule pods on the specific type of node.\n> This is linked
          on how the autoscaler tries to create the node and how it compares the required
          resources on the template rather than with a real node.\n> In the template
          of the node it is not specified the role/nodegroup, therefore, whenever
          it's tried to select the node using the role or the nodegroup, the creation
          fails.\n> \n> *TEMPORARY FIX*\n> To fix the above I did the following:\n>
          ```\n> kubectl annotate machinedeployment -n magnum-system kube-v0ane-mini-nodes-kcvcq
          \\\n>   \"capacity.cluster-autoscaler.kubernetes.io/labels=node-role.kubernetes.io/mini-node=,node.cluster.x-k8s.io/nodegroup=mini-nodes\"
          \\\n>   --overwrite\n> ```\n> The fix works fine for the newly created nodegroup
          but it won't work for future nodegroups.\n> \n> *PERMANENT FIX*\n> Edit
          the block below in file \"/var/lib/kolla/venv/lib/python3.12/site-packages/magnum_cluster_api/resources.py\"
          of the magnum_conductor:\n> ```\n>         machine_deployment[\"replicas\"]
          = None\n>         machine_deployment[\"metadata\"][\"annotations\"] = {\n>
          \            AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>         }\n>
          \    else:\n>         machine_deployment[\"replicas\"] = node_group.node_count\n>
          \        machine_deployment[\"metadata\"][\"annotations\"] = {}\n> ```\n>
          to:\n> ```\n>         machine_deployment[\"replicas\"] = None\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {\n>             AUTOSCALE_ANNOTATION_MIN: str(node_group.min_node_count),\n>
          \            AUTOSCALE_ANNOTATION_MAX: str(\n>                 utils.get_node_group_max_node_count(node_group)\n>
          \            ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/memory\":
          f\"{math.ceil(flavor.ram / 1024)}G\",\n>             \"capacity.cluster-autoscaler.kubernetes.io/cpu\":
          str(flavor.vcpus),\n>             \"capacity.cluster-autoscaler.kubernetes.io/ephemeral-disk\":
          str(\n>                 boot_volume_size\n>             ),\n>             \"capacity.cluster-autoscaler.kubernetes.io/labels\":
          (\n>                 f\"node-role.kubernetes.io/{node_group.role}=,\"\n>
          \                f\"node.cluster.x-k8s.io/nodegroup={node_group.name}\"\n>
          \            ),\n>         }\n>     else:\n>         machine_deployment[\"replicas\"]
          = node_group.node_count\n>         machine_deployment[\"metadata\"][\"annotations\"]
          = {}\n> ```\n> With the above addition after restarting the container autoscaler
          works as intended and it is possible to create nodes with min_size=0.\n>
          \n> I would create a PR for this but I don't know if it was brought up before
          or not. :)</issue_description>\n> \n> ## Comments on the Issue (you are
          @copilot in this section)\n> \n> <comments>\n> </comments>\n> \n\n\n</details>\n\n\n\n<!--
          START COPILOT CODING AGENT SUFFIX -->\n\n- Fixes vexxhost/magnum-cluster-api#907\n\n<!--
          START COPILOT CODING AGENT TIPS -->\n---\n\n\U0001F512 GitHub Advanced Security
          automatically protects Copilot coding agent pull requests. You can protect
          all pull requests by enabling Advanced Security for your repositories. [Learn
          more about Advanced Security.](https://gh.io/cca-advanced-security)"
        change_url: https://github.com/vexxhost/magnum-cluster-api/pull/909
        commit_id: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        patchset: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/magnum-cluster-api
          name: vexxhost/magnum-cluster-api
          short_name: magnum-cluster-api
          src_dir: src/github.com/vexxhost/magnum-cluster-api
        topic: null
      job: magnum-cluster-api-hydrophone-v1.33.9-calico
      jobtags: []
      max_attempts: 3
      message: Zml4OiBhZGQgYXV0b3NjYWxlciBsYWJlbHMgYW5ub3RhdGlvbiB0byBNYWNoaW5lRGVwbG95bWVudCBmb3Igc2NhbGUtZnJvbS16ZXJvIHN1cHBvcnQKCldoZW4gYSBub2RlZ3JvdXAgaGFzIGBtaW5fbm9kZV9jb3VudD0wYCwgdGhlIGNsdXN0ZXIgYXV0b3NjYWxlciBldmFsdWF0ZXMgc2NoZWR1bGluZyB1c2luZyBhIHN5bnRoZXRpYyB0ZW1wbGF0ZSBub2RlLiBXaXRob3V0IGV4cGxpY2l0IGxhYmVsIGhpbnRzLCB0aGlzIHRlbXBsYXRlIG5vZGUgbGFja3MgdGhlIHJvbGUgYW5kIG5vZGVncm91cCBsYWJlbHMsIGNhdXNpbmcgYE5vZGVBZmZpbml0eWAgcHJlZGljYXRlIGZhaWx1cmVzIGFuZCBwcmV2ZW50aW5nIHNjYWxlLXVwIGZyb20gemVyby4KCiMjIENoYW5nZXMKCi0gKipgcmVzb3VyY2VzLnB5YCoqOiBXaGVuIGF1dG9zY2FsaW5nIGlzIGVuYWJsZWQsIGFkZHMgdGhlIGBjYXBhY2l0eS5jbHVzdGVyLWF1dG9zY2FsZXIua3ViZXJuZXRlcy5pby9sYWJlbHNgIGFubm90YXRpb24gdG8gdGhlIGBNYWNoaW5lRGVwbG95bWVudGAsIGluZm9ybWluZyB0aGUgYXV0b3NjYWxlciBvZiB0aGUgbGFiZWxzIHRoYXQgd2lsbCBiZSBwcmVzZW50IG9uIG5vZGVzIGluIHRoaXMgZ3JvdXA6CgpgYGBweXRob24KImNhcGFjaXR5LmNsdXN0ZXItYXV0b3NjYWxlci5rdWJlcm5ldGVzLmlvL2xhYmVscyI6ICgKICAgIGYibm9kZS1yb2xlLmt1YmVybmV0ZXMuaW8ve25vZGVfZ3JvdXAucm9sZX09LCIKICAgIGYibm9kZS5jbHVzdGVyLngtazhzLmlvL25vZGVncm91cD17bm9kZV9ncm91cC5uYW1lfSIKKSwKYGBgCgotICoqYHRlc3RzL3VuaXQvdGVzdF9yZXNvdXJjZXMucHlgKio6IEV4dGVuZHMgdGhlIGV4aXN0aW5nIGBUZXN0RXhpc3RpbmdNdXRhdGVNYWNoaW5lRGVwbG95bWVudGAgdGVzdCB0byBhc3NlcnQgdGhlIG5ldyBhbm5vdGF0aW9uIGlzIGNvcnJlY3RseSBzZXQgd2hlbiBhdXRvc2NhbGluZyBpcyBlbmFibGVkLgoKPCEtLSBTVEFSVCBDT1BJTE9UIE9SSUdJTkFMIFBST01QVCAtLT4KCgoKPGRldGFpbHM+Cgo8c3VtbWFyeT5PcmlnaW5hbCBwcm9tcHQ8L3N1bW1hcnk+Cgo+IAo+IC0tLS0KPiAKPiAqVGhpcyBzZWN0aW9uIGRldGFpbHMgb24gdGhlIG9yaWdpbmFsIGlzc3VlIHlvdSBzaG91bGQgcmVzb2x2ZSoKPiAKPiA8aXNzdWVfdGl0bGU+YnVnOiBOb2RlZ3JvdXBzIGNhbm5vdCBzY2FsZSB1cCBmcm9tIDAgcmVwbGljYXMgKGF1dG9zY2FsZXIpPC9pc3N1ZV90aXRsZT4KPiA8aXNzdWVfZGVzY3JpcHRpb24+SGksCj4gSSBmb3VuZCBvdXQgYSBidWcgaW4gdGhlIGNsdXN0ZXJBUEkgZHJpdmVyIGxpbmtlZCB0byBub2RlZ3JvdXBzIGFuZCBhdXRvc2NhbGVycy4KPiAqKldoZW4gYSBub2RlZ3JvdXAgaXMgZGVmaW5lZCBhbmQgaGFzIGEgbWluX3NpemU9MCAoc3BlY2lmaWVkIGluIHRoZSBsYWJlbHMpLCB0aGUgYXV0b3NjYWxlciBmYWlscyB0byBzY2hlZHVsZSBwb2RzIG9uIHRoZSBncm91cCBhbmQgbm9kZXMgZmFpbCB0byBjcmVhdGUuKioKPiAKPiBDb21tYW5kcyB1c2VkIHRvIGNyZWF0ZSBjbHVzdGVyIGFuZCBub2RlZ3JvdXA6Cj4gYGBgCj4gb3BlbnN0YWNrIGNvZSBjbHVzdGVyIGNyZWF0ZSAtLWNsdXN0ZXItdGVtcGxhdGUgIm5vYmxlLWxhdGVzdCIgLS1tYXN0ZXItY291bnQgMSAtLW5vZGUtY291bnQgMSAtLWxhYmVscyBrdWJlX3RhZz12MS4zNS4yLHNlcnZlcl9ncm91cF9wb2xpY2llcz1hZmZpbml0eSxvY3RhdmlhX3Byb3ZpZGVyPWFtcGhvcmEsYm9vdF92b2x1bWVfc2l6ZT01MCxhdXRvX3NjYWxpbmdfZW5hYmxlZD10cnVlLG1pbl9ub2RlX2NvdW50PTEsbWF4X25vZGVfY291bnQ9MyBjbHVzdGVyLW5vYmxlCj4gb3BlbnN0YWNrIGNvZSBub2RlZ3JvdXAgY3JlYXRlIGNsdXN0ZXItbm9ibGUgbWluaS1ub2RlcyAtLW5vZGUtY291bnQgMSAtLWZsYXZvciBtMi5taW5pIC0tbGFiZWxzIGF1dG9fc2NhbGluZ19lbmFibGVkPXRydWUsbWluX25vZGVfY291bnQ9MCxtYXhfbm9kZV9jb3VudD0yIC0tcm9sZSBtaW5pLW5vZGUKPiBgYGAKPiAKPiBWZXJzaW9ucyB1c2VkIGZvciB0aGUgQ0FQSSBjb250cm9sbGVyOgo+IGszczogdjEuMzMuOStrM3MxCj4gQ2x1c3RlcmN0bDogdjEuMTAuMTAKPiBrdWJlYWRtOiB2MS4xMC4xMAo+IG9wZW5zdGFjazogdjAuMTIuNwo+IAo+IEV4YW1wbGUgb2YgZXJyb3Igb24gdGhlIGF1dG9zY2FsZXI6Cj4gYGBgCj4gcHJlZGljYXRlICJOb2RlQWZmaW5pdHkiIGRpZG4ndCBwYXNzCj4gbm9kZU5hbWU6ICJ0ZW1wbGF0ZS1ub2RlLWZvci1NYWNoaW5lRGVwbG95bWVudC9tYWdudW0tc3lzdGVtL2t1YmUtdjBhbmUtbWluaS1ub2Rlcy1rY3ZjcS0uLi4iCj4gYGBgCj4gQXMgaXQgY2FuIGJlIHNlZW4gdGhlIGF1dG9zY2FsZXIgdXNlcyBhIHRlbXBsYXRlIHRvIHNlZSBpZiBpdCBpcyBwb3NzaWJsZSB0byBzY2hlZHVsZSBwb2RzIG9uIHRoZSBzcGVjaWZpYyB0eXBlIG9mIG5vZGUuCj4gVGhpcyBpcyBsaW5rZWQgb24gaG93IHRoZSBhdXRvc2NhbGVyIHRyaWVzIHRvIGNyZWF0ZSB0aGUgbm9kZSBhbmQgaG93IGl0IGNvbXBhcmVzIHRoZSByZXF1aXJlZCByZXNvdXJjZXMgb24gdGhlIHRlbXBsYXRlIHJhdGhlciB0aGFuIHdpdGggYSByZWFsIG5vZGUuCj4gSW4gdGhlIHRlbXBsYXRlIG9mIHRoZSBub2RlIGl0IGlzIG5vdCBzcGVjaWZpZWQgdGhlIHJvbGUvbm9kZWdyb3VwLCB0aGVyZWZvcmUsIHdoZW5ldmVyIGl0J3MgdHJpZWQgdG8gc2VsZWN0IHRoZSBub2RlIHVzaW5nIHRoZSByb2xlIG9yIHRoZSBub2RlZ3JvdXAsIHRoZSBjcmVhdGlvbiBmYWlscy4KPiAKPiAqVEVNUE9SQVJZIEZJWCoKPiBUbyBmaXggdGhlIGFib3ZlIEkgZGlkIHRoZSBmb2xsb3dpbmc6Cj4gYGBgCj4ga3ViZWN0bCBhbm5vdGF0ZSBtYWNoaW5lZGVwbG95bWVudCAtbiBtYWdudW0tc3lzdGVtIGt1YmUtdjBhbmUtbWluaS1ub2Rlcy1rY3ZjcSBcCj4gICAiY2FwYWNpdHkuY2x1c3Rlci1hdXRvc2NhbGVyLmt1YmVybmV0ZXMuaW8vbGFiZWxzPW5vZGUtcm9sZS5rdWJlcm5ldGVzLmlvL21pbmktbm9kZT0sbm9kZS5jbHVzdGVyLngtazhzLmlvL25vZGVncm91cD1taW5pLW5vZGVzIiBcCj4gICAtLW92ZXJ3cml0ZQo+IGBgYAo+IFRoZSBmaXggd29ya3MgZmluZSBmb3IgdGhlIG5ld2x5IGNyZWF0ZWQgbm9kZWdyb3VwIGJ1dCBpdCB3b24ndCB3b3JrIGZvciBmdXR1cmUgbm9kZWdyb3Vwcy4KPiAKPiAqUEVSTUFORU5UIEZJWCoKPiBFZGl0IHRoZSBibG9jayBiZWxvdyBpbiBmaWxlICIvdmFyL2xpYi9rb2xsYS92ZW52L2xpYi9weXRob24zLjEyL3NpdGUtcGFja2FnZXMvbWFnbnVtX2NsdXN0ZXJfYXBpL3Jlc291cmNlcy5weSIgb2YgdGhlIG1hZ251bV9jb25kdWN0b3I6Cj4gYGBgCj4gICAgICAgICBtYWNoaW5lX2RlcGxveW1lbnRbInJlcGxpY2FzIl0gPSBOb25lCj4gICAgICAgICBtYWNoaW5lX2RlcGxveW1lbnRbIm1ldGFkYXRhIl1bImFubm90YXRpb25zIl0gPSB7Cj4gICAgICAgICAgICAgQVVUT1NDQUxFX0FOTk9UQVRJT05fTUlOOiBzdHIobm9kZV9ncm91cC5taW5fbm9kZV9jb3VudCksCj4gICAgICAgICAgICAgQVVUT1NDQUxFX0FOTk9UQVRJT05fTUFYOiBzdHIoCj4gICAgICAgICAgICAgICAgIHV0aWxzLmdldF9ub2RlX2dyb3VwX21heF9ub2RlX2NvdW50KG5vZGVfZ3JvdXApCj4gICAgICAgICAgICAgKSwKPiAgICAgICAgICAgICAiY2FwYWNpdHkuY2x1c3Rlci1hdXRvc2NhbGVyLmt1YmVybmV0ZXMuaW8vbWVtb3J5IjogZiJ7bWF0aC5jZWlsKGZsYXZvci5yYW0gLyAxMDI0KX1HIiwKPiAgICAgICAgICAgICAiY2FwYWNpdHkuY2x1c3Rlci1hdXRvc2NhbGVyLmt1YmVybmV0ZXMuaW8vY3B1Ijogc3RyKGZsYXZvci52Y3B1cyksCj4gICAgICAgICAgICAgImNhcGFjaXR5LmNsdXN0ZXItYXV0b3NjYWxlci5rdWJlcm5ldGVzLmlvL2VwaGVtZXJhbC1kaXNrIjogc3RyKAo+ICAgICAgICAgICAgICAgICBib290X3ZvbHVtZV9zaXplCj4gICAgICAgICAgICAgKSwKPiAgICAgICAgIH0KPiAgICAgZWxzZToKPiAgICAgICAgIG1hY2hpbmVfZGVwbG95bWVudFsicmVwbGljYXMiXSA9IG5vZGVfZ3JvdXAubm9kZV9jb3VudAo+ICAgICAgICAgbWFjaGluZV9kZXBsb3ltZW50WyJtZXRhZGF0YSJdWyJhbm5vdGF0aW9ucyJdID0ge30KPiBgYGAKPiB0bzoKPiBgYGAKPiAgICAgICAgIG1hY2hpbmVfZGVwbG95bWVudFsicmVwbGljYXMiXSA9IE5vbmUKPiAgICAgICAgIG1hY2hpbmVfZGVwbG95bWVudFsibWV0YWRhdGEiXVsiYW5ub3RhdGlvbnMiXSA9IHsKPiAgICAgICAgICAgICBBVVRPU0NBTEVfQU5OT1RBVElPTl9NSU46IHN0cihub2RlX2dyb3VwLm1pbl9ub2RlX2NvdW50KSwKPiAgICAgICAgICAgICBBVVRPU0NBTEVfQU5OT1RBVElPTl9NQVg6IHN0cigKPiAgICAgICAgICAgICAgICAgdXRpbHMuZ2V0X25vZGVfZ3JvdXBfbWF4X25vZGVfY291bnQobm9kZV9ncm91cCkKPiAgICAgICAgICAgICApLAo+ICAgICAgICAgICAgICJjYXBhY2l0eS5jbHVzdGVyLWF1dG9zY2FsZXIua3ViZXJuZXRlcy5pby9tZW1vcnkiOiBmInttYXRoLmNlaWwoZmxhdm9yLnJhbSAvIDEwMjQpfUciLAo+ICAgICAgICAgICAgICJjYXBhY2l0eS5jbHVzdGVyLWF1dG9zY2FsZXIua3ViZXJuZXRlcy5pby9jcHUiOiBzdHIoZmxhdm9yLnZjcHVzKSwKPiAgICAgICAgICAgICAiY2FwYWNpdHkuY2x1c3Rlci1hdXRvc2NhbGVyLmt1YmVybmV0ZXMuaW8vZXBoZW1lcmFsLWRpc2siOiBzdHIoCj4gICAgICAgICAgICAgICAgIGJvb3Rfdm9sdW1lX3NpemUKPiAgICAgICAgICAgICApLAo+ICAgICAgICAgICAgICJjYXBhY2l0eS5jbHVzdGVyLWF1dG9zY2FsZXIua3ViZXJuZXRlcy5pby9sYWJlbHMiOiAoCj4gICAgICAgICAgICAgICAgIGYibm9kZS1yb2xlLmt1YmVybmV0ZXMuaW8ve25vZGVfZ3JvdXAucm9sZX09LCIKPiAgICAgICAgICAgICAgICAgZiJub2RlLmNsdXN0ZXIueC1rOHMuaW8vbm9kZWdyb3VwPXtub2RlX2dyb3VwLm5hbWV9Igo+ICAgICAgICAgICAgICksCj4gICAgICAgICB9Cj4gICAgIGVsc2U6Cj4gICAgICAgICBtYWNoaW5lX2RlcGxveW1lbnRbInJlcGxpY2FzIl0gPSBub2RlX2dyb3VwLm5vZGVfY291bnQKPiAgICAgICAgIG1hY2hpbmVfZGVwbG95bWVudFsibWV0YWRhdGEiXVsiYW5ub3RhdGlvbnMiXSA9IHt9Cj4gYGBgCj4gV2l0aCB0aGUgYWJvdmUgYWRkaXRpb24gYWZ0ZXIgcmVzdGFydGluZyB0aGUgY29udGFpbmVyIGF1dG9zY2FsZXIgd29ya3MgYXMgaW50ZW5kZWQgYW5kIGl0IGlzIHBvc3NpYmxlIHRvIGNyZWF0ZSBub2RlcyB3aXRoIG1pbl9zaXplPTAuCj4gCj4gSSB3b3VsZCBjcmVhdGUgYSBQUiBmb3IgdGhpcyBidXQgSSBkb24ndCBrbm93IGlmIGl0IHdhcyBicm91Z2h0IHVwIGJlZm9yZSBvciBub3QuIDopPC9pc3N1ZV9kZXNjcmlwdGlvbj4KPiAKPiAjIyBDb21tZW50cyBvbiB0aGUgSXNzdWUgKHlvdSBhcmUgQGNvcGlsb3QgaW4gdGhpcyBzZWN0aW9uKQo+IAo+IDxjb21tZW50cz4KPiA8L2NvbW1lbnRzPgo+IAoKCjwvZGV0YWlscz4KCgoKPCEtLSBTVEFSVCBDT1BJTE9UIENPRElORyBBR0VOVCBTVUZGSVggLS0+CgotIEZpeGVzIHZleHhob3N0L21hZ251bS1jbHVzdGVyLWFwaSM5MDcKCjwhLS0gU1RBUlQgQ09QSUxPVCBDT0RJTkcgQUdFTlQgVElQUyAtLT4KLS0tCgrwn5SSIEdpdEh1YiBBZHZhbmNlZCBTZWN1cml0eSBhdXRvbWF0aWNhbGx5IHByb3RlY3RzIENvcGlsb3QgY29kaW5nIGFnZW50IHB1bGwgcmVxdWVzdHMuIFlvdSBjYW4gcHJvdGVjdCBhbGwgcHVsbCByZXF1ZXN0cyBieSBlbmFibGluZyBBZHZhbmNlZCBTZWN1cml0eSBmb3IgeW91ciByZXBvc2l0b3JpZXMuIFtMZWFybiBtb3JlIGFib3V0IEFkdmFuY2VkIFNlY3VyaXR5Ll0oaHR0cHM6Ly9naC5pby9jY2EtYWR2YW5jZWQtc2VjdXJpdHkp
      override_checkout: master
      patchset: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
      pipeline: check
      playbook_context:
        playbook_projects:
          trusted/project_0/github.com/vexxhost/zuul-config:
            canonical_name: github.com/vexxhost/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          trusted/project_1/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: c75fe6ef19c05b98349573c971950c51bbf24758
          trusted/project_2/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_0/opendev.org/openstack/devstack:
            canonical_name: opendev.org/openstack/devstack
            checkout: master
            commit: 416d27e89e0c1891921fee2a692086eb8fcd0307
          untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs:
            canonical_name: opendev.org/openstack/openstack-zuul-jobs
            checkout: master
            commit: 3d5175f90e389f3240a8400a792abaaeb51bee3b
          untrusted/project_2/github.com/vexxhost/zuul-config:
            canonical_name: github.com/vexxhost/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          untrusted/project_3/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: c75fe6ef19c05b98349573c971950c51bbf24758
          untrusted/project_4/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_5/github.com/vexxhost/magnum-cluster-api:
            canonical_name: github.com/vexxhost/magnum-cluster-api
            checkout: main
            commit: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
        playbooks:
        - path: untrusted/project_5/github.com/vexxhost/magnum-cluster-api/zuul.d/playbooks/hydrophone/run.yml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/playbook_0/role_1/devstack
            link_target: untrusted/project_0/opendev.org/openstack/devstack
            role_path: ansible/playbook_0/role_1/devstack/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/playbook_0/role_2/openstack-zuul-jobs
            link_target: untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs
            role_path: ansible/playbook_0/role_2/openstack-zuul-jobs/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/playbook_0/role_4/zuul-jobs
            link_target: untrusted/project_3/opendev.org/zuul/zuul-jobs
            role_path: ansible/playbook_0/role_4/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/playbook_0/role_5/zuul-jobs
            link_target: untrusted/project_4/github.com/vexxhost/zuul-jobs
            role_path: ansible/playbook_0/role_5/zuul-jobs/roles
        post_playbooks:
        - path: untrusted/project_5/github.com/vexxhost/magnum-cluster-api/zuul.d/playbooks/hydrophone/post.yml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_0/role_1/devstack
            link_target: untrusted/project_0/opendev.org/openstack/devstack
            role_path: ansible/post_playbook_0/role_1/devstack/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_0/role_2/openstack-zuul-jobs
            link_target: untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs
            role_path: ansible/post_playbook_0/role_2/openstack-zuul-jobs/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_0/role_4/zuul-jobs
            link_target: untrusted/project_3/opendev.org/zuul/zuul-jobs
            role_path: ansible/post_playbook_0/role_4/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/post_playbook_0/role_5/zuul-jobs
            link_target: untrusted/project_4/github.com/vexxhost/zuul-jobs
            role_path: ansible/post_playbook_0/role_5/zuul-jobs/roles
        - path: untrusted/project_0/opendev.org/openstack/devstack/playbooks/post.yaml
          roles:
          - checkout: master
            checkout_description: playbook branch
            link_name: ansible/post_playbook_1/role_0/devstack
            link_target: untrusted/project_0/opendev.org/openstack/devstack
            role_path: ansible/post_playbook_1/role_0/devstack/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_1/role_1/openstack-zuul-jobs
            link_target: untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs
            role_path: ansible/post_playbook_1/role_1/openstack-zuul-jobs/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_1/role_3/zuul-jobs
            link_target: untrusted/project_3/opendev.org/zuul/zuul-jobs
            role_path: ansible/post_playbook_1/role_3/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/post_playbook_1/role_4/zuul-jobs
            link_target: untrusted/project_4/github.com/vexxhost/zuul-jobs
            role_path: ansible/post_playbook_1/role_4/zuul-jobs/roles
        - path: trusted/project_0/github.com/vexxhost/zuul-config/playbooks/base/post.yaml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_2/role_1/zuul-jobs
            link_target: trusted/project_1/opendev.org/zuul/zuul-jobs
            role_path: ansible/post_playbook_2/role_1/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/post_playbook_2/role_2/zuul-jobs
            link_target: trusted/project_2/github.com/vexxhost/zuul-jobs
            role_path: ansible/post_playbook_2/role_2/zuul-jobs/roles
        - path: trusted/project_0/github.com/vexxhost/zuul-config/playbooks/base/post-logs.yaml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/post_playbook_3/role_1/zuul-jobs
            link_target: trusted/project_1/opendev.org/zuul/zuul-jobs
            role_path: ansible/post_playbook_3/role_1/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/post_playbook_3/role_2/zuul-jobs
            link_target: trusted/project_2/github.com/vexxhost/zuul-jobs
            role_path: ansible/post_playbook_3/role_2/zuul-jobs/roles
        pre_playbooks:
        - path: trusted/project_0/github.com/vexxhost/zuul-config/playbooks/base/pre.yaml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_0/role_1/zuul-jobs
            link_target: trusted/project_1/opendev.org/zuul/zuul-jobs
            role_path: ansible/pre_playbook_0/role_1/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/pre_playbook_0/role_2/zuul-jobs
            link_target: trusted/project_2/github.com/vexxhost/zuul-jobs
            role_path: ansible/pre_playbook_0/role_2/zuul-jobs/roles
        - path: untrusted/project_0/opendev.org/openstack/devstack/playbooks/pre.yaml
          roles:
          - checkout: master
            checkout_description: playbook branch
            link_name: ansible/pre_playbook_1/role_0/devstack
            link_target: untrusted/project_0/opendev.org/openstack/devstack
            role_path: ansible/pre_playbook_1/role_0/devstack/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_1/role_1/openstack-zuul-jobs
            link_target: untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs
            role_path: ansible/pre_playbook_1/role_1/openstack-zuul-jobs/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_1/role_3/zuul-jobs
            link_target: untrusted/project_3/opendev.org/zuul/zuul-jobs
            role_path: ansible/pre_playbook_1/role_3/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/pre_playbook_1/role_4/zuul-jobs
            link_target: untrusted/project_4/github.com/vexxhost/zuul-jobs
            role_path: ansible/pre_playbook_1/role_4/zuul-jobs/roles
        - path: untrusted/project_5/github.com/vexxhost/magnum-cluster-api/zuul.d/playbooks/hydrophone/pre.yml
          roles:
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_2/role_1/devstack
            link_target: untrusted/project_0/opendev.org/openstack/devstack
            role_path: ansible/pre_playbook_2/role_1/devstack/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_2/role_2/openstack-zuul-jobs
            link_target: untrusted/project_1/opendev.org/openstack/openstack-zuul-jobs
            role_path: ansible/pre_playbook_2/role_2/openstack-zuul-jobs/roles
          - checkout: master
            checkout_description: job override ref
            link_name: ansible/pre_playbook_2/role_4/zuul-jobs
            link_target: untrusted/project_3/opendev.org/zuul/zuul-jobs
            role_path: ansible/pre_playbook_2/role_4/zuul-jobs/roles
          - checkout: main
            checkout_description: zuul branch
            link_name: ansible/pre_playbook_2/role_5/zuul-jobs
            link_target: untrusted/project_4/github.com/vexxhost/zuul-jobs
            role_path: ansible/pre_playbook_2/role_5/zuul-jobs/roles
      post_review: false
      post_timeout: null
      pre_timeout: null
      project:
        canonical_hostname: github.com
        canonical_name: github.com/vexxhost/magnum-cluster-api
        name: vexxhost/magnum-cluster-api
        short_name: magnum-cluster-api
        src_dir: src/github.com/vexxhost/magnum-cluster-api
      projects:
        github.com/novnc/novnc:
          canonical_hostname: github.com
          canonical_name: github.com/novnc/novnc
          checkout: master
          checkout_description: job override ref
          commit: 8e1ebdffba02e651c399dacef841f8941f6ad6e4
          name: novnc/novnc
          required: true
          short_name: novnc
          src_dir: src/github.com/novnc/novnc
        github.com/vexxhost/magnum-cluster-api:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/magnum-cluster-api
          checkout: main
          checkout_description: zuul branch
          commit: 068ce80eaa56c0b2a0663165e67328ffea18f2a0
          name: vexxhost/magnum-cluster-api
          required: false
          short_name: magnum-cluster-api
          src_dir: src/github.com/vexxhost/magnum-cluster-api
        opendev.org/openstack/barbican:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/barbican
          checkout: master
          checkout_description: job override ref
          commit: f8a331a40eb21e6c8f37e07794d57aa98b120af9
          name: openstack/barbican
          required: true
          short_name: barbican
          src_dir: src/opendev.org/openstack/barbican
        opendev.org/openstack/cinder:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/cinder
          checkout: master
          checkout_description: job override ref
          commit: c805ec75c136e7938e33aa02db3d2ddda5d9f38d
          name: openstack/cinder
          required: true
          short_name: cinder
          src_dir: src/opendev.org/openstack/cinder
        opendev.org/openstack/devstack:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/devstack
          checkout: master
          checkout_description: job override ref
          commit: 416d27e89e0c1891921fee2a692086eb8fcd0307
          name: openstack/devstack
          required: true
          short_name: devstack
          src_dir: src/opendev.org/openstack/devstack
        opendev.org/openstack/glance:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/glance
          checkout: master
          checkout_description: job override ref
          commit: 8c43caaefd8820e3e45b3c95b60bc6e1c668c55b
          name: openstack/glance
          required: true
          short_name: glance
          src_dir: src/opendev.org/openstack/glance
        opendev.org/openstack/keystone:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/keystone
          checkout: master
          checkout_description: job override ref
          commit: 1e13c5f205bed9b7a2ebe5a437c1cc80b7072ab0
          name: openstack/keystone
          required: true
          short_name: keystone
          src_dir: src/opendev.org/openstack/keystone
        opendev.org/openstack/magnum:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/magnum
          checkout: master
          checkout_description: job override ref
          commit: 75ca7d532cd6894ac79a4633bfa19842547d372c
          name: openstack/magnum
          required: true
          short_name: magnum
          src_dir: src/opendev.org/openstack/magnum
        opendev.org/openstack/manila:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/manila
          checkout: master
          checkout_description: job override ref
          commit: 9b2f3dd900797a70dcb4b8948f67b1a1be8dec2f
          name: openstack/manila
          required: true
          short_name: manila
          src_dir: src/opendev.org/openstack/manila
        opendev.org/openstack/neutron:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/neutron
          checkout: master
          checkout_description: job override ref
          commit: 25c08272ca5fd33ba88549e078318409fc619cf8
          name: openstack/neutron
          required: true
          short_name: neutron
          src_dir: src/opendev.org/openstack/neutron
        opendev.org/openstack/nova:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/nova
          checkout: master
          checkout_description: job override ref
          commit: 3f4b947614fffbdd09c8105eb60f20aebe9e5d50
          name: openstack/nova
          required: true
          short_name: nova
          src_dir: src/opendev.org/openstack/nova
        opendev.org/openstack/octavia:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/octavia
          checkout: master
          checkout_description: job override ref
          commit: b2e084e8734255a5a7027fd2b8c7ae6831ce8f2f
          name: openstack/octavia
          required: true
          short_name: octavia
          src_dir: src/opendev.org/openstack/octavia
        opendev.org/openstack/os-test-images:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/os-test-images
          checkout: master
          checkout_description: job override ref
          commit: 5d0367e03788764f41da8effffa14e3eac513201
          name: openstack/os-test-images
          required: true
          short_name: os-test-images
          src_dir: src/opendev.org/openstack/os-test-images
        opendev.org/openstack/ovn-octavia-provider:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/ovn-octavia-provider
          checkout: master
          checkout_description: job override ref
          commit: 83b1d9f26be4f4bcd941a5b339a8858ef250e24f
          name: openstack/ovn-octavia-provider
          required: true
          short_name: ovn-octavia-provider
          src_dir: src/opendev.org/openstack/ovn-octavia-provider
        opendev.org/openstack/placement:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/placement
          checkout: master
          checkout_description: job override ref
          commit: e3db398fba279721121892323e6260c6932797c1
          name: openstack/placement
          required: true
          short_name: placement
          src_dir: src/opendev.org/openstack/placement
        opendev.org/openstack/python-magnumclient:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/python-magnumclient
          checkout: master
          checkout_description: job override ref
          commit: bfc9dbc2aa9a113c12e591a87f774a6d986a981f
          name: openstack/python-magnumclient
          required: true
          short_name: python-magnumclient
          src_dir: src/opendev.org/openstack/python-magnumclient
        opendev.org/openstack/requirements:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/requirements
          checkout: master
          checkout_description: job override ref
          commit: 2897d874a900d28bbeb8ed59a78193477c76296c
          name: openstack/requirements
          required: true
          short_name: requirements
          src_dir: src/opendev.org/openstack/requirements
        opendev.org/openstack/swift:
          canonical_hostname: opendev.org
          canonical_name: opendev.org/openstack/swift
          checkout: master
          checkout_description: job override ref
          commit: 020312b8ad30bcb7838198db72ce6166f9833724
          name: openstack/swift
          required: true
          short_name: swift
          src_dir: src/opendev.org/openstack/swift
      ref: refs/pull/909/head
      resources: {}
      tenant: oss
      timeout: 7200
      topic: null
      voting: true
    zuul_copy_output:
      /etc/ceph: logs
      /etc/glusterfs/glusterd.vol: logs
      /etc/libvirt: logs
      /etc/lvm: logs
      /etc/resolv.conf: logs
      /etc/sudoers: logs
      /etc/sudoers.d: logs
      /var/log/ceph: logs
      /var/log/glusterfs: logs
      /var/log/libvirt: logs
      /var/log/mysql: logs
      /var/log/openvswitch: logs
      /var/log/postgresql: logs
      /var/log/rabbitmq: logs
      /var/log/unbound.log: logs
      '{{ devstack_conf_dir }}/.localrc.auto': logs
      '{{ devstack_conf_dir }}/.stackenv': logs
      '{{ devstack_conf_dir }}/local.conf': logs
      '{{ devstack_conf_dir }}/localrc': logs
      '{{ devstack_full_log}}': logs
      '{{ devstack_log_dir }}/atop': logs
      '{{ devstack_log_dir }}/devstacklog.txt': logs
      '{{ devstack_log_dir }}/devstacklog.txt.summary': logs
      '{{ devstack_log_dir }}/dstat-csv.log': logs
      '{{ devstack_log_dir }}/qemu.coredump': logs
      '{{ devstack_log_dir }}/tcpdump.pcap': logs
      '{{ devstack_log_dir }}/worlddump-latest.txt': logs
      '{{ stage_dir }}/apache': logs
      '{{ stage_dir }}/apache_config': logs
      '{{ stage_dir }}/audit.log': logs
      '{{ stage_dir }}/core': logs
      '{{ stage_dir }}/deprecations.log': logs
      '{{ stage_dir }}/df.txt': logs
      '{{ stage_dir }}/dpkg-l.txt': logs
      '{{ stage_dir }}/etc': logs
      '{{ stage_dir }}/iptables.txt': logs
      '{{ stage_dir }}/listen53.txt': logs
      '{{ stage_dir }}/mount.txt': logs
      '{{ stage_dir }}/performance.json': logs
      '{{ stage_dir }}/pip2-freeze.txt': logs
      '{{ stage_dir }}/pip3-freeze.txt': logs
      '{{ stage_dir }}/rpm-qa.txt': logs
      '{{ stage_dir }}/services.txt': logs
      '{{ stage_dir }}/verify_tempest_conf.log': logs
