all:
  children:
    cephs:
      hosts:
        instance: null
    computes:
      hosts:
        instance: null
    controllers:
      hosts:
        instance: null
    zuul_unreachable:
      hosts: {}
  hosts:
    instance:
      ansible_connection: ssh
      ansible_host: 199.204.45.3
      ansible_port: 22
      ansible_python_interpreter: auto
      ansible_user: zuul
      ceph_conf_overrides:
      - option: mon allow pool size one
        section: global
        value: true
      - option: osd crush chooseleaf type
        section: global
        value: 0
      - option: auth allow insecure global id reclaim
        section: mon
        value: false
      ceph_csi_rbd_helm_values:
        provisioner:
          replicaCount: 1
      ceph_fsid: 4837cbf8-4f90-4300-b3f6-726c9b9f89b4
      ceph_osd_devices:
      - /dev/ceph-{{ inventory_hostname_short }}-osd0/data
      - /dev/ceph-{{ inventory_hostname_short }}-osd1/data
      - /dev/ceph-{{ inventory_hostname_short }}-osd2/data
      cilium_helm_values:
        operator:
          replicas: 1
      cilium_ipv4_cidr: 172.24.0.0/16
      csi_driver: rbd
      kube_vip_address: 172.17.0.100
      kube_vip_interface: '{{ ansible_facts[''default_ipv4''].interface }}'
      kubernetes_hostname: '{{ ansible_facts[''default_ipv4''].address }}'
      molecule_scenario: csi
      nodepool:
        az: nova
        cloud: public
        external_id: 1b65194e-15eb-4af9-8686-338a4ddad1ce
        host_id: 46a3119b92b21b95fc3b5fbffd17c6ebf9fffaf9848b0a50b2d8d56a
        interface_ip: 199.204.45.3
        label: ubuntu-jammy
        node_properties: {}
        private_ipv4: 199.204.45.3
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.3
        public_ipv6: 2604:e100:1:0:f816:3eff:fe0a:c21c
        region: ca-ymq-1
        slot: null
      zuul_node:
        az: nova
        cloud: public
        external_id: 1b65194e-15eb-4af9-8686-338a4ddad1ce
        host_id: 46a3119b92b21b95fc3b5fbffd17c6ebf9fffaf9848b0a50b2d8d56a
        interface_ip: 199.204.45.3
        label: ubuntu-jammy
        node_properties: {}
        private_ipv4: 199.204.45.3
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.3
        public_ipv6: 2604:e100:1:0:f816:3eff:fe0a:c21c
        region: ca-ymq-1
        slot: null
        uuid: null
  vars:
    ceph_conf_overrides:
    - option: mon allow pool size one
      section: global
      value: true
    - option: osd crush chooseleaf type
      section: global
      value: 0
    - option: auth allow insecure global id reclaim
      section: mon
      value: false
    ceph_csi_rbd_helm_values:
      provisioner:
        replicaCount: 1
    ceph_fsid: 4837cbf8-4f90-4300-b3f6-726c9b9f89b4
    ceph_osd_devices:
    - /dev/ceph-{{ inventory_hostname_short }}-osd0/data
    - /dev/ceph-{{ inventory_hostname_short }}-osd1/data
    - /dev/ceph-{{ inventory_hostname_short }}-osd2/data
    cilium_helm_values:
      operator:
        replicas: 1
    cilium_ipv4_cidr: 172.24.0.0/16
    csi_driver: rbd
    kube_vip_address: 172.17.0.100
    kube_vip_interface: '{{ ansible_facts[''default_ipv4''].interface }}'
    kubernetes_hostname: '{{ ansible_facts[''default_ipv4''].address }}'
    molecule_scenario: csi
    zuul:
      _inheritance_path:
      - '<Job base explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: zuul-config/zuul.d/jobs.yaml@main#1>'
      - '<Job molecule explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/zuul-jobs/zuul.d/ansible-jobs.yaml@main#1>'
      - '<Job atmosphere-molecule explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#17>'
      - '<Job atmosphere-molecule-csi explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#53>'
      - '<Job atmosphere-molecule-csi-rbd explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#66>'
      - '<Job atmosphere-molecule-csi-rbd explicit: None implied: None source: vexxhost/atmosphere/.zuul.yaml@main#72>'
      ansible_version: '9'
      attempts: 1
      branch: main
      build: d768832f837a440abb0edd6c987b1f69
      build_refs:
      - branch: main
        change: '3581'
        change_message: "[WIP] Fix false positive in AlertmanagerClusterCrashlooping
          alert\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml` to remove
          `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't break
          existing functionality\n- [x] Add release note for this change\n- [ ] Final
          verification and testing\n\n<!-- START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n\n----\n\n*This section details on the original issue
          you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping false
          positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        patchset: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      buildset: 353c6584e29340cd8f29cc2b94ee4f40
      buildset_refs:
      - branch: main
        change: '3581'
        change_message: "[WIP] Fix false positive in AlertmanagerClusterCrashlooping
          alert\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml` to remove
          `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't break
          existing functionality\n- [x] Add release note for this change\n- [ ] Final
          verification and testing\n\n<!-- START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n\n----\n\n*This section details on the original issue
          you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping false
          positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        patchset: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      change: '3581'
      change_message: "[WIP] Fix false positive in AlertmanagerClusterCrashlooping
        alert\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml` to remove
        `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't break
        existing functionality\n- [x] Add release note for this change\n- [ ] Final
        verification and testing\n\n<!-- START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
        prompt</summary>\n\n\n----\n\n*This section details on the original issue
        you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping false
        positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
        from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
        `AlertmanagerClusterCrashlooping` alert is permanently firing as a false positive
        across **all environments**. The root cause is the ServiceMonitor relabeling
        configuration in `roles/kube_prometheus_stack/vars/main.yml` that drops the
        `endpoint` label, causing `process_start_time_seconds` from two different
        processes to be merged into a single time series.\n\n## Root Cause Analysis\n\n###
        The Alert\n\nThe `AlertmanagerClusterCrashlooping` alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
        > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes per
        10-minute window minus 1), despite the alertmanager pod having **0 restarts**
        and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
        scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
        process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader sidecar\n\nBoth
        processes export `process_start_time_seconds`, but with **different values**
        because they are different PIDs with different start times:\n\n- alertmanager
        (9093): `process_start_time_seconds = 1.76520548151e+09` (timestamp ending
        in .51)\n- config-reloader (8080): `process_start_time_seconds = 1.76520548159e+09`
        (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name` YAML
        anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this relabeling
        to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause
        `endpoint` is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
        job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
        samples from both targets into a **single time series**, alternating between
        the two different `process_start_time_seconds` values on every scrape. The
        `changes()` function counts every sample as a change because the value alternates,
        resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
        All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
        is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
        including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
        prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
        that scrapes multiple ports/endpoints from the same pod will have this same
        label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
        environment) by querying each endpoint individually:\n\n```\n# alertmanager
        on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
        on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
        two values alternate in the merged series, causing `changes()` to equal the
        number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
        the alertmanager ServiceMonitor on customer env to remove `endpoint` from
        the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
        After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
        applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
        (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
        series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**: `changes=0`
        \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
        update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
        action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
        After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
        Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
        label will now be preserved on all metrics scraped through these ServiceMonitors.
        This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
        which port/path a metric was scraped from.\n\nServices affected by this change
        (all use the same YAML anchor):\n- alertmanager\n- prometheus (also has a
        reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n- coreDns\n-
        kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n- openstack-database-exporter\n-
        percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard queries and alert rules
        that reference these metrics may need to be checked to ensure they handle
        the additional `endpoint` label correctly (e.g., aggregations should still
        work since `sum by (instance)` will aggregate across endpoints).\n\n## Additional
        Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
        on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
        so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
        SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
        AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
        up custom instructions, customizing its development environment and configuring
        Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
        in the docs."
      change_url: https://github.com/vexxhost/atmosphere/pull/3581
      child_jobs: []
      commit_id: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
      event_id: 34ff3ad0-055f-11f1-8d15-a82b0f0ae16d
      executor:
        hostname: 3a2793d2bd32
        inventory_file: /var/lib/zuul/builds/d768832f837a440abb0edd6c987b1f69/ansible/inventory.yaml
        log_root: /var/lib/zuul/builds/d768832f837a440abb0edd6c987b1f69/work/logs
        result_data_file: /var/lib/zuul/builds/d768832f837a440abb0edd6c987b1f69/work/results.json
        src_root: /var/lib/zuul/builds/d768832f837a440abb0edd6c987b1f69/work/src
        work_root: /var/lib/zuul/builds/d768832f837a440abb0edd6c987b1f69/work
      include_vars: []
      items:
      - branch: main
        change: '3581'
        change_message: "[WIP] Fix false positive in AlertmanagerClusterCrashlooping
          alert\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml` to remove
          `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't break
          existing functionality\n- [x] Add release note for this change\n- [ ] Final
          verification and testing\n\n<!-- START COPILOT ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original
          prompt</summary>\n\n\n----\n\n*This section details on the original issue
          you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping false
          positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        patchset: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
          src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      job: atmosphere-molecule-csi-rbd
      jobtags: []
      max_attempts: 3
      message: W1dJUF0gRml4IGZhbHNlIHBvc2l0aXZlIGluIEFsZXJ0bWFuYWdlckNsdXN0ZXJDcmFzaGxvb3BpbmcgYWxlcnQKCi0gW3hdIFVwZGF0ZSBgcm9sZXMva3ViZV9wcm9tZXRoZXVzX3N0YWNrL3ZhcnMvbWFpbi55bWxgIHRvIHJlbW92ZSBgZW5kcG9pbnRgIGZyb20gdGhlIGxhYmVsZHJvcCByZWdleAotIFt4XSBWZXJpZnkgdGhlIGNoYW5nZSBkb2Vzbid0IGJyZWFrIGV4aXN0aW5nIGZ1bmN0aW9uYWxpdHkKLSBbeF0gQWRkIHJlbGVhc2Ugbm90ZSBmb3IgdGhpcyBjaGFuZ2UKLSBbIF0gRmluYWwgdmVyaWZpY2F0aW9uIGFuZCB0ZXN0aW5nCgo8IS0tIFNUQVJUIENPUElMT1QgT1JJR0lOQUwgUFJPTVBUIC0tPgoKCgo8ZGV0YWlscz4KCjxzdW1tYXJ5Pk9yaWdpbmFsIHByb21wdDwvc3VtbWFyeT4KCgotLS0tCgoqVGhpcyBzZWN0aW9uIGRldGFpbHMgb24gdGhlIG9yaWdpbmFsIGlzc3VlIHlvdSBzaG91bGQgcmVzb2x2ZSoKCjxpc3N1ZV90aXRsZT5BbGVydG1hbmFnZXJDbHVzdGVyQ3Jhc2hsb29waW5nIGZhbHNlIHBvc2l0aXZlIGNhdXNlZCBieSBTZXJ2aWNlTW9uaXRvciBsYWJlbGRyb3AgbWVyZ2luZyBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kcyBmcm9tIG11bHRpcGxlIGVuZHBvaW50czwvaXNzdWVfdGl0bGU+Cjxpc3N1ZV9kZXNjcmlwdGlvbj4jIyBTdW1tYXJ5CgpUaGUgYEFsZXJ0bWFuYWdlckNsdXN0ZXJDcmFzaGxvb3BpbmdgIGFsZXJ0IGlzIHBlcm1hbmVudGx5IGZpcmluZyBhcyBhIGZhbHNlIHBvc2l0aXZlIGFjcm9zcyAqKmFsbCBlbnZpcm9ubWVudHMqKi4gVGhlIHJvb3QgY2F1c2UgaXMgdGhlIFNlcnZpY2VNb25pdG9yIHJlbGFiZWxpbmcgY29uZmlndXJhdGlvbiBpbiBgcm9sZXMva3ViZV9wcm9tZXRoZXVzX3N0YWNrL3ZhcnMvbWFpbi55bWxgIHRoYXQgZHJvcHMgdGhlIGBlbmRwb2ludGAgbGFiZWwsIGNhdXNpbmcgYHByb2Nlc3Nfc3RhcnRfdGltZV9zZWNvbmRzYCBmcm9tIHR3byBkaWZmZXJlbnQgcHJvY2Vzc2VzIHRvIGJlIG1lcmdlZCBpbnRvIGEgc2luZ2xlIHRpbWUgc2VyaWVzLgoKIyMgUm9vdCBDYXVzZSBBbmFseXNpcwoKIyMjIFRoZSBBbGVydAoKVGhlIGBBbGVydG1hbmFnZXJDbHVzdGVyQ3Jhc2hsb29waW5nYCBhbGVydCBmaXJlcyB3aGVuOgoKYGBgcHJvbXFsCmNoYW5nZXMocHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHN7am9iPSJrdWJlLXByb21ldGhldXMtc3RhY2stYWxlcnRtYW5hZ2VyIixuYW1lc3BhY2U9Im1vbml0b3JpbmcifVsxMG1dKSA+IDQKYGBgCgpUaGlzIHdhcyBzaG93aW5nIGBjaGFuZ2VzKCkgPSAzOWAgKGFwcHJveGltYXRlbHkgNDAgc2NyYXBlcyBwZXIgMTAtbWludXRlIHdpbmRvdyBtaW51cyAxKSwgZGVzcGl0ZSB0aGUgYWxlcnRtYW5hZ2VyIHBvZCBoYXZpbmcgKiowIHJlc3RhcnRzKiogYW5kIGJlaW5nIHN0YWJsZSBmb3IgbW9udGhzLgoKIyMjIFRoZSBQcm9ibGVtCgpUaGUgYWxlcnRtYW5hZ2VyIFNlcnZpY2VNb25pdG9yIHNjcmFwZXMgKip0d28gZW5kcG9pbnRzKio6CgoxLiBgaHR0cC13ZWJgIChwb3J0IDkwOTMpIOKAlCB0aGUgYWxlcnRtYW5hZ2VyIHByb2Nlc3MgaXRzZWxmCjIuIGByZWxvYWRlci13ZWJgIChwb3J0IDgwODApIOKAlCB0aGUgY29uZmlnLXJlbG9hZGVyIHNpZGVjYXIKCkJvdGggcHJvY2Vzc2VzIGV4cG9ydCBgcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHNgLCBidXQgd2l0aCAqKmRpZmZlcmVudCB2YWx1ZXMqKiBiZWNhdXNlIHRoZXkgYXJlIGRpZmZlcmVudCBQSURzIHdpdGggZGlmZmVyZW50IHN0YXJ0IHRpbWVzOgoKLSBhbGVydG1hbmFnZXIgKDkwOTMpOiBgcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHMgPSAxLjc2NTIwNTQ4MTUxZSswOWAgKHRpbWVzdGFtcCBlbmRpbmcgaW4gLjUxKQotIGNvbmZpZy1yZWxvYWRlciAoODA4MCk6IGBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kcyA9IDEuNzY1MjA1NDgxNTllKzA5YCAodGltZXN0YW1wIGVuZGluZyBpbiAuNTkpCgpUaGUgYCZyZWxhYmVsaW5nc19pbnN0YW5jZV90b19wb2RfbmFtZWAgWUFNTCBhbmNob3IgaW4gYHJvbGVzL2t1YmVfcHJvbWV0aGV1c19zdGFjay92YXJzL21haW4ueW1sYCBhcHBsaWVzIHRoaXMgcmVsYWJlbGluZyB0byBib3RoIGVuZHBvaW50czoKCmBgYHlhbWwKLSBhY3Rpb246IGxhYmVsZHJvcAogIHJlZ2V4OiBeKGNvbnRhaW5lcnxlbmRwb2ludHxuYW1lc3BhY2V8cG9kfG5vZGV8c2VydmljZSkkCmBgYAoKQmVjYXVzZSBgZW5kcG9pbnRgIGlzIGRyb3BwZWQsIGJvdGggc2NyYXBlIHRhcmdldHMgcHJvZHVjZSBpZGVudGljYWwgbGFiZWwgc2V0czoKCmBgYAp7aW5zdGFuY2U9ImFsZXJ0bWFuYWdlci1rdWJlLXByb21ldGhldXMtc3RhY2stYWxlcnRtYW5hZ2VyLTAiLCBqb2I9Imt1YmUtcHJvbWV0aGV1cy1zdGFjay1hbGVydG1hbmFnZXIifQpgYGAKClByb21ldGhldXMgaW50ZXJsZWF2ZXMgc2FtcGxlcyBmcm9tIGJvdGggdGFyZ2V0cyBpbnRvIGEgKipzaW5nbGUgdGltZSBzZXJpZXMqKiwgYWx0ZXJuYXRpbmcgYmV0d2VlbiB0aGUgdHdvIGRpZmZlcmVudCBgcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHNgIHZhbHVlcyBvbiBldmVyeSBzY3JhcGUuIFRoZSBgY2hhbmdlcygpYCBmdW5jdGlvbiBjb3VudHMgZXZlcnkgc2FtcGxlIGFzIGEgY2hhbmdlIGJlY2F1c2UgdGhlIHZhbHVlIGFsdGVybmF0ZXMsIHJlc3VsdGluZyBpbiBgY2hhbmdlcygpIOKJiCBudW1iZXJfb2Zfc2NyYXBlcyAtIDFgLgoKIyMjIFdoeSBUaGlzIEFmZmVjdHMgQWxsIEVudmlyb25tZW50cwoKVGhlIHNhbWUgYCZyZWxhYmVsaW5nc19pbnN0YW5jZV90b19wb2RfbmFtZWAgWUFNTCBhbmNob3IgaXMgdXNlZCBieSBhbGwgU2VydmljZU1vbml0b3JzIGRlZmluZWQgaW4gYHJvbGVzL2t1YmVfcHJvbWV0aGV1c19zdGFjay92YXJzL21haW4ueW1sYCwgaW5jbHVkaW5nIGFsZXJ0bWFuYWdlciwgcHJvbWV0aGV1cywgZ3JhZmFuYSwgY29yZURucywga3ViZS1zdGF0ZS1tZXRyaWNzLCBwcm9tZXRoZXVzT3BlcmF0b3IsIGFuZCBhbGwgYWRkaXRpb25hbFNlcnZpY2VNb25pdG9ycy4gQW55IFNlcnZpY2VNb25pdG9yIHRoYXQgc2NyYXBlcyBtdWx0aXBsZSBwb3J0cy9lbmRwb2ludHMgZnJvbSB0aGUgc2FtZSBwb2Qgd2lsbCBoYXZlIHRoaXMgc2FtZSBsYWJlbCBjb2xsaXNpb24gaXNzdWUuCgojIyBWZXJpZmljYXRpb24KCkNvbmZpcm1lZCBvbiBleHQtY29ydmV4ICh1bnRvdWNoZWQgZW52aXJvbm1lbnQpIGJ5IHF1ZXJ5aW5nIGVhY2ggZW5kcG9pbnQgaW5kaXZpZHVhbGx5OgoKYGBgCiMgYWxlcnRtYW5hZ2VyIG9uIHBvcnQgOTA5Mwpwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kcyA9IDEuNzY1MjA1NDgxNTFlKzA5CgojIGNvbmZpZy1yZWxvYWRlciBvbiBwb3J0IDgwODAKcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHMgPSAxLjc2NTIwNTQ4MTU5ZSswOQpgYGAKClRoZXNlIHR3byB2YWx1ZXMgYWx0ZXJuYXRlIGluIHRoZSBtZXJnZWQgc2VyaWVzLCBjYXVzaW5nIGBjaGFuZ2VzKClgIHRvIGVxdWFsIHRoZSBudW1iZXIgb2YgaW50ZXJsZWF2ZWQgc2NyYXBlcyBtaW51cyAxLgoKIyMgRml4CgojIyMgVmVyaWZpZWQgRml4CgpQYXRjaGVkIHRoZSBhbGVydG1hbmFnZXIgU2VydmljZU1vbml0b3Igb24gY3VzdG9tZXIgZW52IHRvIHJlbW92ZSBgZW5kcG9pbnRgIGZyb20gdGhlIGBsYWJlbGRyb3BgIHJlZ2V4OgoKYGBgeWFtbAojIEJlZm9yZToKcmVnZXg6IF4oY29udGFpbmVyfGVuZHBvaW50fG5hbWVzcGFjZXxwb2R8bm9kZXxzZXJ2aWNlKSQKIyBBZnRlcjoKcmVnZXg6IF4oY29udGFpbmVyfG5hbWVzcGFjZXxwb2R8bm9kZXxzZXJ2aWNlKSQKYGBgCgpSZXN1bHQgYWZ0ZXIgYXBwbHlpbmcgdGhlIGZpeDoKLSAqKk9sZCBtZXJnZWQgc2VyaWVzKiogKG5vIGBlbmRwb2ludGAgbGFiZWwpOiBgY2hhbmdlcz0yNWAgKGRlY2F5aW5nIGFzIHRoZSAxMG0gd2luZG93IGNsZWFycyBvbGQgc2FtcGxlcykKLSAqKk5ldyBgaHR0cC13ZWJgIGVuZHBvaW50IHNlcmllcyoqOiBgY2hhbmdlcz0wYCDinIUKLSAqKk5ldyBgcmVsb2FkZXItd2ViYCBlbmRwb2ludCBzZXJpZXMqKjogYGNoYW5nZXM9MGAg4pyFCgojIyMgUmVxdWlyZWQgQ29kZSBDaGFuZ2UKCkluIGByb2xlcy9rdWJlX3Byb21ldGhldXNfc3RhY2svdmFycy9tYWluLnltbGAsIHVwZGF0ZSB0aGUgYCZyZWxhYmVsaW5nc19pbnN0YW5jZV90b19wb2RfbmFtZWAgYW5jaG9yOgoKYGBgeWFtbAojIEJlZm9yZToKLSBhY3Rpb246IGxhYmVsZHJvcAogIHJlZ2V4OiBeKGNvbnRhaW5lcnxlbmRwb2ludHxuYW1lc3BhY2V8cG9kfG5vZGV8c2VydmljZSkkCgojIEFmdGVyOgotIGFjdGlvbjogbGFiZWxkcm9wCiAgcmVnZXg6IF4oY29udGFpbmVyfG5hbWVzcGFjZXxwb2R8bm9kZXxzZXJ2aWNlKSQKYGBgCgojIyMgSW1wYWN0IEFzc2Vzc21lbnQKClJlbW92aW5nIGBlbmRwb2ludGAgZnJvbSB0aGUgbGFiZWxkcm9wIG1lYW5zIHRoZSBgZW5kcG9pbnRgIGxhYmVsIHdpbGwgbm93IGJlIHByZXNlcnZlZCBvbiBhbGwgbWV0cmljcyBzY3JhcGVkIHRocm91Z2ggdGhlc2UgU2VydmljZU1vbml0b3JzLiBUaGlzIGlzIHRoZSBzdGFuZGFyZCBQcm9tZXRoZXVzIGJlaGF2aW9yIOKAlCB0aGUgYGVuZHBvaW50YCBsYWJlbCBkaXN0aW5ndWlzaGVzIHdoaWNoIHBvcnQvcGF0aCBhIG1ldHJpYyB3YXMgc2NyYXBlZCBmcm9tLgoKU2VydmljZXMgYWZmZWN0ZWQgYnkgdGhpcyBjaGFuZ2UgKGFsbCB1c2UgdGhlIHNhbWUgWUFNTCBhbmNob3IpOgotIGFsZXJ0bWFuYWdlcgotIHByb21ldGhldXMgKGFsc28gaGFzIGEgcmVsb2FkZXItd2ViIGVuZHBvaW50IOKAlCBzYW1lIGlzc3VlIGxpa2VseSBhcHBsaWVzKQotIGdyYWZhbmEKLSBjb3JlRG5zCi0ga3ViZS1zdGF0ZS1tZXRyaWNzCi0gcHJvbWV0aGV1c09wZXJhdG9yCi0ga2V5Y2xvYWsKLSBtZW1jYWNoZWQKLSBvcGVuc3RhY2stZGF0YWJhc2UtZXhwb3J0ZXIKLSBwZXJjb25hLXh0cmFkYi1weGMKLSByYWJiaXRtcQotIHZhbGtleQoKRGFzaGJvYXJkIHF1ZXJpZXMgYW5kIGFsZXJ0IHJ1bGVzIHRoYXQgcmVmZXJlbmNlIHRoZXNlIG1ldHJpY3MgbWF5IG5lZWQgdG8gYmUgY2hlY2tlZCB0byBlbnN1cmUgdGhleSBoYW5kbGUgdGhlIGFkZGl0aW9uYWwgYGVuZHBvaW50YCBsYWJlbCBjb3JyZWN0bHkgKGUuZy4sIGFnZ3JlZ2F0aW9ucyBzaG91bGQgc3RpbGwgd29yayBzaW5jZSBgc3VtIGJ5IChpbnN0YW5jZSlgIHdpbGwgYWdncmVnYXRlIGFjcm9zcyBlbmRwb2ludHMpLgoKIyMgQWRkaXRpb25hbCBDb250ZXh0CgotIFRoZSBgcHJvbWV0aGV1cy9jbGllbnRfZ29sYW5nYCBsaWJyYXJ5IHJlYWRzIGAvcHJvYy9bcGlkXS9zdGF0YCBvbiBldmVyeSBzY3JhcGUgdmlhIGBwcm9jZnMuU3RhcnRUaW1lKClgIHRvIGV4cG9ydCBgcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHNgLCBzbyB0aGUgdmFsdWUgaXMgYWwuLi4KCjwvZGV0YWlscz4KCgoKPCEtLSBTVEFSVCBDT1BJTE9UIENPRElORyBBR0VOVCBTVUZGSVggLS0+CgotIEZpeGVzIHZleHhob3N0L2F0bW9zcGhlcmUjMzU4MAoKPCEtLSBTVEFSVCBDT1BJTE9UIENPRElORyBBR0VOVCBUSVBTIC0tPgotLS0KCvCfkqEgWW91IGNhbiBtYWtlIENvcGlsb3Qgc21hcnRlciBieSBzZXR0aW5nIHVwIGN1c3RvbSBpbnN0cnVjdGlvbnMsIGN1c3RvbWl6aW5nIGl0cyBkZXZlbG9wbWVudCBlbnZpcm9ubWVudCBhbmQgY29uZmlndXJpbmcgTW9kZWwgQ29udGV4dCBQcm90b2NvbCAoTUNQKSBzZXJ2ZXJzLiBMZWFybiBtb3JlIFtDb3BpbG90IGNvZGluZyBhZ2VudCB0aXBzXShodHRwczovL2doLmlvL2NvcGlsb3QtY29kaW5nLWFnZW50LXRpcHMpIGluIHRoZSBkb2NzLg==
      patchset: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
      pipeline: check
      playbook_context:
        playbook_projects:
          trusted/project_0/vexxhost.dev/zuul-config:
            canonical_name: vexxhost.dev/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          trusted/project_1/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: d73b78cc624f363c6b7fcfe833f2db4571e4e979
          trusted/project_2/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_0/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_1/vexxhost.dev/zuul-config:
            canonical_name: vexxhost.dev/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          untrusted/project_2/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: d73b78cc624f363c6b7fcfe833f2db4571e4e979
          untrusted/project_3/github.com/vexxhost/atmosphere:
            canonical_name: github.com/vexxhost/atmosphere
            checkout: main
            commit: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
          untrusted/project_4/opendev.org/openstack/openstack-helm:
            canonical_name: opendev.org/openstack/openstack-helm
            checkout: master
            commit: 3a57ef7049b4b76a5a29f8331975931464a14d51
        playbooks:
        - path: untrusted/project_0/github.com/vexxhost/zuul-jobs/playbooks/molecule/run.yaml
          roles:
          - checkout: master
            checkout_description: project default branch
            link_name: ansible/playbook_0/role_1/zuul-jobs
            link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs
            role_path: ansible/playbook_0/role_1/zuul-jobs/roles
          - checkout: main
            checkout_description: playbook branch
            link_name: ansible/playbook_0/role_2/zuul-jobs
            link_target: untrusted/project_0/github.com/vexxhost/zuul-jobs
            role_path: ansible/playbook_0/role_2/zuul-jobs/roles
      post_review: false
      post_timeout: null
      pre_timeout: null
      project:
        canonical_hostname: github.com
        canonical_name: github.com/vexxhost/atmosphere
        name: vexxhost/atmosphere
        short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
      projects:
        github.com/vexxhost/atmosphere:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          checkout: main
          checkout_description: zuul branch
          commit: 1d3ea2df76cca23faa16615fd9f227c05c2819ae
          name: vexxhost/atmosphere
          required: false
          short_name: atmosphere
          src_dir: src/github.com/vexxhost/atmosphere
      ref: refs/pull/3581/head
      resources: {}
      tenant: oss
      timeout: 1800
      topic: null
      voting: true
