all:
  children:
    cephs:
      hosts:
        instance: null
    computes:
      hosts:
        instance: null
    controllers:
      hosts:
        instance: null
    zuul_unreachable:
      hosts: {}
  hosts:
    instance:
      ansible_connection: ssh
      ansible_host: 199.204.45.240
      ansible_port: 22
      ansible_python_interpreter: auto
      ansible_user: zuul
      ceph_conf_overrides:
      - option: mon allow pool size one
        section: global
        value: true
      - option: osd crush chooseleaf type
        section: global
        value: 0
      - option: auth allow insecure global id reclaim
        section: mon
        value: false
      ceph_csi_rbd_helm_values:
        provisioner:
          replicaCount: 1
      ceph_fsid: 4837cbf8-4f90-4300-b3f6-726c9b9f89b4
      ceph_osd_devices:
      - /dev/ceph-{{ inventory_hostname_short }}-osd0/data
      - /dev/ceph-{{ inventory_hostname_short }}-osd1/data
      - /dev/ceph-{{ inventory_hostname_short }}-osd2/data
      cilium_helm_values:
        operator:
          replicas: 1
      cilium_ipv4_cidr: 172.24.0.0/16
      csi_driver: rbd
      kube_vip_address: 172.17.0.100
      kube_vip_interface: '{{ ansible_facts[''default_ipv4''].interface }}'
      kubernetes_hostname: '{{ ansible_facts[''default_ipv4''].address }}'
      molecule_scenario: csi
      nodepool:
        az: nova
        cloud: public
        external_id: 42ece4e8-ca32-4b08-a836-b2436820dc95
        host_id: a14e37c14509a0e10156ccf8c706cd5613db7e363735e5577c330644
        interface_ip: 199.204.45.240
        label: ubuntu-jammy
        node_properties: {}
        private_ipv4: 199.204.45.240
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.240
        public_ipv6: 2604:e100:1:0:f816:3eff:fe15:c7fb
        region: ca-ymq-1
        slot: null
      zuul_node:
        az: nova
        cloud: public
        external_id: 42ece4e8-ca32-4b08-a836-b2436820dc95
        host_id: a14e37c14509a0e10156ccf8c706cd5613db7e363735e5577c330644
        interface_ip: 199.204.45.240
        label: ubuntu-jammy
        node_properties: {}
        private_ipv4: 199.204.45.240
        private_ipv6: null
        provider: yul1
        public_ipv4: 199.204.45.240
        public_ipv6: 2604:e100:1:0:f816:3eff:fe15:c7fb
        region: ca-ymq-1
        slot: null
        uuid: null
  vars:
    ceph_conf_overrides:
    - option: mon allow pool size one
      section: global
      value: true
    - option: osd crush chooseleaf type
      section: global
      value: 0
    - option: auth allow insecure global id reclaim
      section: mon
      value: false
    ceph_csi_rbd_helm_values:
      provisioner:
        replicaCount: 1
    ceph_fsid: 4837cbf8-4f90-4300-b3f6-726c9b9f89b4
    ceph_osd_devices:
    - /dev/ceph-{{ inventory_hostname_short }}-osd0/data
    - /dev/ceph-{{ inventory_hostname_short }}-osd1/data
    - /dev/ceph-{{ inventory_hostname_short }}-osd2/data
    cilium_helm_values:
      operator:
        replicas: 1
    cilium_ipv4_cidr: 172.24.0.0/16
    csi_driver: rbd
    kube_vip_address: 172.17.0.100
    kube_vip_interface: '{{ ansible_facts[''default_ipv4''].interface }}'
    kubernetes_hostname: '{{ ansible_facts[''default_ipv4''].address }}'
    molecule_scenario: csi
    zuul:
      _inheritance_path:
      - '<Job base explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: zuul-config/zuul.d/jobs.yaml@main#1>'
      - '<Job molecule explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/zuul-jobs/zuul.d/ansible-jobs.yaml@main#1>'
      - '<Job atmosphere-molecule explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#17>'
      - '<Job atmosphere-molecule-csi explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#53>'
      - '<Job atmosphere-molecule-csi-rbd explicit: None implied: {MatchAny:{ImpliedBranchMatcher:main}}
        source: vexxhost/atmosphere/.zuul.yaml@main#66>'
      - '<Job atmosphere-molecule-csi-rbd explicit: None implied: None source: vexxhost/atmosphere/.zuul.yaml@main#72>'
      ansible_version: '9'
      attempts: 1
      branch: main
      build: b4abcffe32f5454dbee5eeaad9154da9
      build_refs:
      - branch: main
        change: '3581'
        change_message: "Fix AlertmanagerClusterCrashlooping false positive by preserving
          endpoint label\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml`
          to remove `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't
          break existing functionality\n- [x] Add release note for this change\n-
          [x] Code review passed with no issues\n- [x] Security scan (CodeQL) completed
          with no vulnerabilities\n- [x] Fix vale linting error by adding \"Alertmanager\"
          to vocabulary\n- [x] All changes complete and verified\n\n<!-- START COPILOT
          ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original prompt</summary>\n\n\n----\n\n*This
          section details on the original issue you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping
          false positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 978d67e419e7a88ff3701d69c758cc7b1030014c
        patchset: 978d67e419e7a88ff3701d69c758cc7b1030014c
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      buildset: 435a5bca309744798b201b7717850a72
      buildset_refs:
      - branch: main
        change: '3581'
        change_message: "Fix AlertmanagerClusterCrashlooping false positive by preserving
          endpoint label\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml`
          to remove `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't
          break existing functionality\n- [x] Add release note for this change\n-
          [x] Code review passed with no issues\n- [x] Security scan (CodeQL) completed
          with no vulnerabilities\n- [x] Fix vale linting error by adding \"Alertmanager\"
          to vocabulary\n- [x] All changes complete and verified\n\n<!-- START COPILOT
          ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original prompt</summary>\n\n\n----\n\n*This
          section details on the original issue you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping
          false positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 978d67e419e7a88ff3701d69c758cc7b1030014c
        patchset: 978d67e419e7a88ff3701d69c758cc7b1030014c
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      change: '3581'
      change_message: "Fix AlertmanagerClusterCrashlooping false positive by preserving
        endpoint label\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml`
        to remove `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't
        break existing functionality\n- [x] Add release note for this change\n- [x]
        Code review passed with no issues\n- [x] Security scan (CodeQL) completed
        with no vulnerabilities\n- [x] Fix vale linting error by adding \"Alertmanager\"
        to vocabulary\n- [x] All changes complete and verified\n\n<!-- START COPILOT
        ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original prompt</summary>\n\n\n----\n\n*This
        section details on the original issue you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping
        false positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
        from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
        `AlertmanagerClusterCrashlooping` alert is permanently firing as a false positive
        across **all environments**. The root cause is the ServiceMonitor relabeling
        configuration in `roles/kube_prometheus_stack/vars/main.yml` that drops the
        `endpoint` label, causing `process_start_time_seconds` from two different
        processes to be merged into a single time series.\n\n## Root Cause Analysis\n\n###
        The Alert\n\nThe `AlertmanagerClusterCrashlooping` alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
        > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes per
        10-minute window minus 1), despite the alertmanager pod having **0 restarts**
        and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
        scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
        process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader sidecar\n\nBoth
        processes export `process_start_time_seconds`, but with **different values**
        because they are different PIDs with different start times:\n\n- alertmanager
        (9093): `process_start_time_seconds = 1.76520548151e+09` (timestamp ending
        in .51)\n- config-reloader (8080): `process_start_time_seconds = 1.76520548159e+09`
        (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name` YAML
        anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this relabeling
        to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause
        `endpoint` is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
        job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
        samples from both targets into a **single time series**, alternating between
        the two different `process_start_time_seconds` values on every scrape. The
        `changes()` function counts every sample as a change because the value alternates,
        resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
        All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
        is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
        including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
        prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
        that scrapes multiple ports/endpoints from the same pod will have this same
        label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
        environment) by querying each endpoint individually:\n\n```\n# alertmanager
        on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
        on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
        two values alternate in the merged series, causing `changes()` to equal the
        number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
        the alertmanager ServiceMonitor on customer env to remove `endpoint` from
        the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
        After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
        applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
        (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
        series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**: `changes=0`
        \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
        update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
        action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
        After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
        Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
        label will now be preserved on all metrics scraped through these ServiceMonitors.
        This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
        which port/path a metric was scraped from.\n\nServices affected by this change
        (all use the same YAML anchor):\n- alertmanager\n- prometheus (also has a
        reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n- coreDns\n-
        kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n- openstack-database-exporter\n-
        percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard queries and alert rules
        that reference these metrics may need to be checked to ensure they handle
        the additional `endpoint` label correctly (e.g., aggregations should still
        work since `sum by (instance)` will aggregate across endpoints).\n\n## Additional
        Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
        on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
        so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
        SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
        AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
        up custom instructions, customizing its development environment and configuring
        Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
        in the docs."
      change_url: https://github.com/vexxhost/atmosphere/pull/3581
      child_jobs: []
      commit_id: 978d67e419e7a88ff3701d69c758cc7b1030014c
      event_id: 3778a270-0563-11f1-94d4-2d6ae42d347f
      executor:
        hostname: 3a2793d2bd32
        inventory_file: /var/lib/zuul/builds/b4abcffe32f5454dbee5eeaad9154da9/ansible/inventory.yaml
        log_root: /var/lib/zuul/builds/b4abcffe32f5454dbee5eeaad9154da9/work/logs
        result_data_file: /var/lib/zuul/builds/b4abcffe32f5454dbee5eeaad9154da9/work/results.json
        src_root: /var/lib/zuul/builds/b4abcffe32f5454dbee5eeaad9154da9/work/src
        work_root: /var/lib/zuul/builds/b4abcffe32f5454dbee5eeaad9154da9/work
      include_vars: []
      items:
      - branch: main
        change: '3581'
        change_message: "Fix AlertmanagerClusterCrashlooping false positive by preserving
          endpoint label\n\n- [x] Update `roles/kube_prometheus_stack/vars/main.yml`
          to remove `endpoint` from the labeldrop regex\n- [x] Verify the change doesn't
          break existing functionality\n- [x] Add release note for this change\n-
          [x] Code review passed with no issues\n- [x] Security scan (CodeQL) completed
          with no vulnerabilities\n- [x] Fix vale linting error by adding \"Alertmanager\"
          to vocabulary\n- [x] All changes complete and verified\n\n<!-- START COPILOT
          ORIGINAL PROMPT -->\n\n\n\n<details>\n\n<summary>Original prompt</summary>\n\n\n----\n\n*This
          section details on the original issue you should resolve*\n\n<issue_title>AlertmanagerClusterCrashlooping
          false positive caused by ServiceMonitor labeldrop merging process_start_time_seconds
          from multiple endpoints</issue_title>\n<issue_description>## Summary\n\nThe
          `AlertmanagerClusterCrashlooping` alert is permanently firing as a false
          positive across **all environments**. The root cause is the ServiceMonitor
          relabeling configuration in `roles/kube_prometheus_stack/vars/main.yml`
          that drops the `endpoint` label, causing `process_start_time_seconds` from
          two different processes to be merged into a single time series.\n\n## Root
          Cause Analysis\n\n### The Alert\n\nThe `AlertmanagerClusterCrashlooping`
          alert fires when:\n\n```promql\nchanges(process_start_time_seconds{job=\"kube-prometheus-stack-alertmanager\",namespace=\"monitoring\"}[10m])
          > 4\n```\n\nThis was showing `changes() = 39` (approximately 40 scrapes
          per 10-minute window minus 1), despite the alertmanager pod having **0 restarts**
          and being stable for months.\n\n### The Problem\n\nThe alertmanager ServiceMonitor
          scrapes **two endpoints**:\n\n1. `http-web` (port 9093) \u2014 the alertmanager
          process itself\n2. `reloader-web` (port 8080) \u2014 the config-reloader
          sidecar\n\nBoth processes export `process_start_time_seconds`, but with
          **different values** because they are different PIDs with different start
          times:\n\n- alertmanager (9093): `process_start_time_seconds = 1.76520548151e+09`
          (timestamp ending in .51)\n- config-reloader (8080): `process_start_time_seconds
          = 1.76520548159e+09` (timestamp ending in .59)\n\nThe `&relabelings_instance_to_pod_name`
          YAML anchor in `roles/kube_prometheus_stack/vars/main.yml` applies this
          relabeling to both endpoints:\n\n```yaml\n- action: labeldrop\n  regex:
          ^(container|endpoint|namespace|pod|node|service)$\n```\n\nBecause `endpoint`
          is dropped, both scrape targets produce identical label sets:\n\n```\n{instance=\"alertmanager-kube-prometheus-stack-alertmanager-0\",
          job=\"kube-prometheus-stack-alertmanager\"}\n```\n\nPrometheus interleaves
          samples from both targets into a **single time series**, alternating between
          the two different `process_start_time_seconds` values on every scrape. The
          `changes()` function counts every sample as a change because the value alternates,
          resulting in `changes() \u2248 number_of_scrapes - 1`.\n\n### Why This Affects
          All Environments\n\nThe same `&relabelings_instance_to_pod_name` YAML anchor
          is used by all ServiceMonitors defined in `roles/kube_prometheus_stack/vars/main.yml`,
          including alertmanager, prometheus, grafana, coreDns, kube-state-metrics,
          prometheusOperator, and all additionalServiceMonitors. Any ServiceMonitor
          that scrapes multiple ports/endpoints from the same pod will have this same
          label collision issue.\n\n## Verification\n\nConfirmed on ext-corvex (untouched
          environment) by querying each endpoint individually:\n\n```\n# alertmanager
          on port 9093\nprocess_start_time_seconds = 1.76520548151e+09\n\n# config-reloader
          on port 8080\nprocess_start_time_seconds = 1.76520548159e+09\n```\n\nThese
          two values alternate in the merged series, causing `changes()` to equal
          the number of interleaved scrapes minus 1.\n\n## Fix\n\n### Verified Fix\n\nPatched
          the alertmanager ServiceMonitor on customer env to remove `endpoint` from
          the `labeldrop` regex:\n\n```yaml\n# Before:\nregex: ^(container|endpoint|namespace|pod|node|service)$\n#
          After:\nregex: ^(container|namespace|pod|node|service)$\n```\n\nResult after
          applying the fix:\n- **Old merged series** (no `endpoint` label): `changes=25`
          (decaying as the 10m window clears old samples)\n- **New `http-web` endpoint
          series**: `changes=0` \u2705\n- **New `reloader-web` endpoint series**:
          `changes=0` \u2705\n\n### Required Code Change\n\nIn `roles/kube_prometheus_stack/vars/main.yml`,
          update the `&relabelings_instance_to_pod_name` anchor:\n\n```yaml\n# Before:\n-
          action: labeldrop\n  regex: ^(container|endpoint|namespace|pod|node|service)$\n\n#
          After:\n- action: labeldrop\n  regex: ^(container|namespace|pod|node|service)$\n```\n\n###
          Impact Assessment\n\nRemoving `endpoint` from the labeldrop means the `endpoint`
          label will now be preserved on all metrics scraped through these ServiceMonitors.
          This is the standard Prometheus behavior \u2014 the `endpoint` label distinguishes
          which port/path a metric was scraped from.\n\nServices affected by this
          change (all use the same YAML anchor):\n- alertmanager\n- prometheus (also
          has a reloader-web endpoint \u2014 same issue likely applies)\n- grafana\n-
          coreDns\n- kube-state-metrics\n- prometheusOperator\n- keycloak\n- memcached\n-
          openstack-database-exporter\n- percona-xtradb-pxc\n- rabbitmq\n- valkey\n\nDashboard
          queries and alert rules that reference these metrics may need to be checked
          to ensure they handle the additional `endpoint` label correctly (e.g., aggregations
          should still work since `sum by (instance)` will aggregate across endpoints).\n\n##
          Additional Context\n\n- The `prometheus/client_golang` library reads `/proc/[pid]/stat`
          on every scrape via `procfs.StartTime()` to export `process_start_time_seconds`,
          so the value is al...\n\n</details>\n\n\n\n<!-- START COPILOT CODING AGENT
          SUFFIX -->\n\n- Fixes vexxhost/atmosphere#3580\n\n<!-- START COPILOT CODING
          AGENT TIPS -->\n---\n\n\U0001F4A1 You can make Copilot smarter by setting
          up custom instructions, customizing its development environment and configuring
          Model Context Protocol (MCP) servers. Learn more [Copilot coding agent tips](https://gh.io/copilot-coding-agent-tips)
          in the docs."
        change_url: https://github.com/vexxhost/atmosphere/pull/3581
        commit_id: 978d67e419e7a88ff3701d69c758cc7b1030014c
        patchset: 978d67e419e7a88ff3701d69c758cc7b1030014c
        project:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          name: vexxhost/atmosphere
          short_name: atmosphere
          src_dir: src/github.com/vexxhost/atmosphere
        topic: null
      job: atmosphere-molecule-csi-rbd
      jobtags: []
      max_attempts: 3
      message: Rml4IEFsZXJ0bWFuYWdlckNsdXN0ZXJDcmFzaGxvb3BpbmcgZmFsc2UgcG9zaXRpdmUgYnkgcHJlc2VydmluZyBlbmRwb2ludCBsYWJlbAoKLSBbeF0gVXBkYXRlIGByb2xlcy9rdWJlX3Byb21ldGhldXNfc3RhY2svdmFycy9tYWluLnltbGAgdG8gcmVtb3ZlIGBlbmRwb2ludGAgZnJvbSB0aGUgbGFiZWxkcm9wIHJlZ2V4Ci0gW3hdIFZlcmlmeSB0aGUgY2hhbmdlIGRvZXNuJ3QgYnJlYWsgZXhpc3RpbmcgZnVuY3Rpb25hbGl0eQotIFt4XSBBZGQgcmVsZWFzZSBub3RlIGZvciB0aGlzIGNoYW5nZQotIFt4XSBDb2RlIHJldmlldyBwYXNzZWQgd2l0aCBubyBpc3N1ZXMKLSBbeF0gU2VjdXJpdHkgc2NhbiAoQ29kZVFMKSBjb21wbGV0ZWQgd2l0aCBubyB2dWxuZXJhYmlsaXRpZXMKLSBbeF0gRml4IHZhbGUgbGludGluZyBlcnJvciBieSBhZGRpbmcgIkFsZXJ0bWFuYWdlciIgdG8gdm9jYWJ1bGFyeQotIFt4XSBBbGwgY2hhbmdlcyBjb21wbGV0ZSBhbmQgdmVyaWZpZWQKCjwhLS0gU1RBUlQgQ09QSUxPVCBPUklHSU5BTCBQUk9NUFQgLS0+CgoKCjxkZXRhaWxzPgoKPHN1bW1hcnk+T3JpZ2luYWwgcHJvbXB0PC9zdW1tYXJ5PgoKCi0tLS0KCipUaGlzIHNlY3Rpb24gZGV0YWlscyBvbiB0aGUgb3JpZ2luYWwgaXNzdWUgeW91IHNob3VsZCByZXNvbHZlKgoKPGlzc3VlX3RpdGxlPkFsZXJ0bWFuYWdlckNsdXN0ZXJDcmFzaGxvb3BpbmcgZmFsc2UgcG9zaXRpdmUgY2F1c2VkIGJ5IFNlcnZpY2VNb25pdG9yIGxhYmVsZHJvcCBtZXJnaW5nIHByb2Nlc3Nfc3RhcnRfdGltZV9zZWNvbmRzIGZyb20gbXVsdGlwbGUgZW5kcG9pbnRzPC9pc3N1ZV90aXRsZT4KPGlzc3VlX2Rlc2NyaXB0aW9uPiMjIFN1bW1hcnkKClRoZSBgQWxlcnRtYW5hZ2VyQ2x1c3RlckNyYXNobG9vcGluZ2AgYWxlcnQgaXMgcGVybWFuZW50bHkgZmlyaW5nIGFzIGEgZmFsc2UgcG9zaXRpdmUgYWNyb3NzICoqYWxsIGVudmlyb25tZW50cyoqLiBUaGUgcm9vdCBjYXVzZSBpcyB0aGUgU2VydmljZU1vbml0b3IgcmVsYWJlbGluZyBjb25maWd1cmF0aW9uIGluIGByb2xlcy9rdWJlX3Byb21ldGhldXNfc3RhY2svdmFycy9tYWluLnltbGAgdGhhdCBkcm9wcyB0aGUgYGVuZHBvaW50YCBsYWJlbCwgY2F1c2luZyBgcHJvY2Vzc19zdGFydF90aW1lX3NlY29uZHNgIGZyb20gdHdvIGRpZmZlcmVudCBwcm9jZXNzZXMgdG8gYmUgbWVyZ2VkIGludG8gYSBzaW5nbGUgdGltZSBzZXJpZXMuCgojIyBSb290IENhdXNlIEFuYWx5c2lzCgojIyMgVGhlIEFsZXJ0CgpUaGUgYEFsZXJ0bWFuYWdlckNsdXN0ZXJDcmFzaGxvb3BpbmdgIGFsZXJ0IGZpcmVzIHdoZW46CgpgYGBwcm9tcWwKY2hhbmdlcyhwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kc3tqb2I9Imt1YmUtcHJvbWV0aGV1cy1zdGFjay1hbGVydG1hbmFnZXIiLG5hbWVzcGFjZT0ibW9uaXRvcmluZyJ9WzEwbV0pID4gNApgYGAKClRoaXMgd2FzIHNob3dpbmcgYGNoYW5nZXMoKSA9IDM5YCAoYXBwcm94aW1hdGVseSA0MCBzY3JhcGVzIHBlciAxMC1taW51dGUgd2luZG93IG1pbnVzIDEpLCBkZXNwaXRlIHRoZSBhbGVydG1hbmFnZXIgcG9kIGhhdmluZyAqKjAgcmVzdGFydHMqKiBhbmQgYmVpbmcgc3RhYmxlIGZvciBtb250aHMuCgojIyMgVGhlIFByb2JsZW0KClRoZSBhbGVydG1hbmFnZXIgU2VydmljZU1vbml0b3Igc2NyYXBlcyAqKnR3byBlbmRwb2ludHMqKjoKCjEuIGBodHRwLXdlYmAgKHBvcnQgOTA5Mykg4oCUIHRoZSBhbGVydG1hbmFnZXIgcHJvY2VzcyBpdHNlbGYKMi4gYHJlbG9hZGVyLXdlYmAgKHBvcnQgODA4MCkg4oCUIHRoZSBjb25maWctcmVsb2FkZXIgc2lkZWNhcgoKQm90aCBwcm9jZXNzZXMgZXhwb3J0IGBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kc2AsIGJ1dCB3aXRoICoqZGlmZmVyZW50IHZhbHVlcyoqIGJlY2F1c2UgdGhleSBhcmUgZGlmZmVyZW50IFBJRHMgd2l0aCBkaWZmZXJlbnQgc3RhcnQgdGltZXM6CgotIGFsZXJ0bWFuYWdlciAoOTA5Myk6IGBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kcyA9IDEuNzY1MjA1NDgxNTFlKzA5YCAodGltZXN0YW1wIGVuZGluZyBpbiAuNTEpCi0gY29uZmlnLXJlbG9hZGVyICg4MDgwKTogYHByb2Nlc3Nfc3RhcnRfdGltZV9zZWNvbmRzID0gMS43NjUyMDU0ODE1OWUrMDlgICh0aW1lc3RhbXAgZW5kaW5nIGluIC41OSkKClRoZSBgJnJlbGFiZWxpbmdzX2luc3RhbmNlX3RvX3BvZF9uYW1lYCBZQU1MIGFuY2hvciBpbiBgcm9sZXMva3ViZV9wcm9tZXRoZXVzX3N0YWNrL3ZhcnMvbWFpbi55bWxgIGFwcGxpZXMgdGhpcyByZWxhYmVsaW5nIHRvIGJvdGggZW5kcG9pbnRzOgoKYGBgeWFtbAotIGFjdGlvbjogbGFiZWxkcm9wCiAgcmVnZXg6IF4oY29udGFpbmVyfGVuZHBvaW50fG5hbWVzcGFjZXxwb2R8bm9kZXxzZXJ2aWNlKSQKYGBgCgpCZWNhdXNlIGBlbmRwb2ludGAgaXMgZHJvcHBlZCwgYm90aCBzY3JhcGUgdGFyZ2V0cyBwcm9kdWNlIGlkZW50aWNhbCBsYWJlbCBzZXRzOgoKYGBgCntpbnN0YW5jZT0iYWxlcnRtYW5hZ2VyLWt1YmUtcHJvbWV0aGV1cy1zdGFjay1hbGVydG1hbmFnZXItMCIsIGpvYj0ia3ViZS1wcm9tZXRoZXVzLXN0YWNrLWFsZXJ0bWFuYWdlciJ9CmBgYAoKUHJvbWV0aGV1cyBpbnRlcmxlYXZlcyBzYW1wbGVzIGZyb20gYm90aCB0YXJnZXRzIGludG8gYSAqKnNpbmdsZSB0aW1lIHNlcmllcyoqLCBhbHRlcm5hdGluZyBiZXR3ZWVuIHRoZSB0d28gZGlmZmVyZW50IGBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kc2AgdmFsdWVzIG9uIGV2ZXJ5IHNjcmFwZS4gVGhlIGBjaGFuZ2VzKClgIGZ1bmN0aW9uIGNvdW50cyBldmVyeSBzYW1wbGUgYXMgYSBjaGFuZ2UgYmVjYXVzZSB0aGUgdmFsdWUgYWx0ZXJuYXRlcywgcmVzdWx0aW5nIGluIGBjaGFuZ2VzKCkg4omIIG51bWJlcl9vZl9zY3JhcGVzIC0gMWAuCgojIyMgV2h5IFRoaXMgQWZmZWN0cyBBbGwgRW52aXJvbm1lbnRzCgpUaGUgc2FtZSBgJnJlbGFiZWxpbmdzX2luc3RhbmNlX3RvX3BvZF9uYW1lYCBZQU1MIGFuY2hvciBpcyB1c2VkIGJ5IGFsbCBTZXJ2aWNlTW9uaXRvcnMgZGVmaW5lZCBpbiBgcm9sZXMva3ViZV9wcm9tZXRoZXVzX3N0YWNrL3ZhcnMvbWFpbi55bWxgLCBpbmNsdWRpbmcgYWxlcnRtYW5hZ2VyLCBwcm9tZXRoZXVzLCBncmFmYW5hLCBjb3JlRG5zLCBrdWJlLXN0YXRlLW1ldHJpY3MsIHByb21ldGhldXNPcGVyYXRvciwgYW5kIGFsbCBhZGRpdGlvbmFsU2VydmljZU1vbml0b3JzLiBBbnkgU2VydmljZU1vbml0b3IgdGhhdCBzY3JhcGVzIG11bHRpcGxlIHBvcnRzL2VuZHBvaW50cyBmcm9tIHRoZSBzYW1lIHBvZCB3aWxsIGhhdmUgdGhpcyBzYW1lIGxhYmVsIGNvbGxpc2lvbiBpc3N1ZS4KCiMjIFZlcmlmaWNhdGlvbgoKQ29uZmlybWVkIG9uIGV4dC1jb3J2ZXggKHVudG91Y2hlZCBlbnZpcm9ubWVudCkgYnkgcXVlcnlpbmcgZWFjaCBlbmRwb2ludCBpbmRpdmlkdWFsbHk6CgpgYGAKIyBhbGVydG1hbmFnZXIgb24gcG9ydCA5MDkzCnByb2Nlc3Nfc3RhcnRfdGltZV9zZWNvbmRzID0gMS43NjUyMDU0ODE1MWUrMDkKCiMgY29uZmlnLXJlbG9hZGVyIG9uIHBvcnQgODA4MApwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kcyA9IDEuNzY1MjA1NDgxNTllKzA5CmBgYAoKVGhlc2UgdHdvIHZhbHVlcyBhbHRlcm5hdGUgaW4gdGhlIG1lcmdlZCBzZXJpZXMsIGNhdXNpbmcgYGNoYW5nZXMoKWAgdG8gZXF1YWwgdGhlIG51bWJlciBvZiBpbnRlcmxlYXZlZCBzY3JhcGVzIG1pbnVzIDEuCgojIyBGaXgKCiMjIyBWZXJpZmllZCBGaXgKClBhdGNoZWQgdGhlIGFsZXJ0bWFuYWdlciBTZXJ2aWNlTW9uaXRvciBvbiBjdXN0b21lciBlbnYgdG8gcmVtb3ZlIGBlbmRwb2ludGAgZnJvbSB0aGUgYGxhYmVsZHJvcGAgcmVnZXg6CgpgYGB5YW1sCiMgQmVmb3JlOgpyZWdleDogXihjb250YWluZXJ8ZW5kcG9pbnR8bmFtZXNwYWNlfHBvZHxub2RlfHNlcnZpY2UpJAojIEFmdGVyOgpyZWdleDogXihjb250YWluZXJ8bmFtZXNwYWNlfHBvZHxub2RlfHNlcnZpY2UpJApgYGAKClJlc3VsdCBhZnRlciBhcHBseWluZyB0aGUgZml4OgotICoqT2xkIG1lcmdlZCBzZXJpZXMqKiAobm8gYGVuZHBvaW50YCBsYWJlbCk6IGBjaGFuZ2VzPTI1YCAoZGVjYXlpbmcgYXMgdGhlIDEwbSB3aW5kb3cgY2xlYXJzIG9sZCBzYW1wbGVzKQotICoqTmV3IGBodHRwLXdlYmAgZW5kcG9pbnQgc2VyaWVzKio6IGBjaGFuZ2VzPTBgIOKchQotICoqTmV3IGByZWxvYWRlci13ZWJgIGVuZHBvaW50IHNlcmllcyoqOiBgY2hhbmdlcz0wYCDinIUKCiMjIyBSZXF1aXJlZCBDb2RlIENoYW5nZQoKSW4gYHJvbGVzL2t1YmVfcHJvbWV0aGV1c19zdGFjay92YXJzL21haW4ueW1sYCwgdXBkYXRlIHRoZSBgJnJlbGFiZWxpbmdzX2luc3RhbmNlX3RvX3BvZF9uYW1lYCBhbmNob3I6CgpgYGB5YW1sCiMgQmVmb3JlOgotIGFjdGlvbjogbGFiZWxkcm9wCiAgcmVnZXg6IF4oY29udGFpbmVyfGVuZHBvaW50fG5hbWVzcGFjZXxwb2R8bm9kZXxzZXJ2aWNlKSQKCiMgQWZ0ZXI6Ci0gYWN0aW9uOiBsYWJlbGRyb3AKICByZWdleDogXihjb250YWluZXJ8bmFtZXNwYWNlfHBvZHxub2RlfHNlcnZpY2UpJApgYGAKCiMjIyBJbXBhY3QgQXNzZXNzbWVudAoKUmVtb3ZpbmcgYGVuZHBvaW50YCBmcm9tIHRoZSBsYWJlbGRyb3AgbWVhbnMgdGhlIGBlbmRwb2ludGAgbGFiZWwgd2lsbCBub3cgYmUgcHJlc2VydmVkIG9uIGFsbCBtZXRyaWNzIHNjcmFwZWQgdGhyb3VnaCB0aGVzZSBTZXJ2aWNlTW9uaXRvcnMuIFRoaXMgaXMgdGhlIHN0YW5kYXJkIFByb21ldGhldXMgYmVoYXZpb3Ig4oCUIHRoZSBgZW5kcG9pbnRgIGxhYmVsIGRpc3Rpbmd1aXNoZXMgd2hpY2ggcG9ydC9wYXRoIGEgbWV0cmljIHdhcyBzY3JhcGVkIGZyb20uCgpTZXJ2aWNlcyBhZmZlY3RlZCBieSB0aGlzIGNoYW5nZSAoYWxsIHVzZSB0aGUgc2FtZSBZQU1MIGFuY2hvcik6Ci0gYWxlcnRtYW5hZ2VyCi0gcHJvbWV0aGV1cyAoYWxzbyBoYXMgYSByZWxvYWRlci13ZWIgZW5kcG9pbnQg4oCUIHNhbWUgaXNzdWUgbGlrZWx5IGFwcGxpZXMpCi0gZ3JhZmFuYQotIGNvcmVEbnMKLSBrdWJlLXN0YXRlLW1ldHJpY3MKLSBwcm9tZXRoZXVzT3BlcmF0b3IKLSBrZXljbG9hawotIG1lbWNhY2hlZAotIG9wZW5zdGFjay1kYXRhYmFzZS1leHBvcnRlcgotIHBlcmNvbmEteHRyYWRiLXB4YwotIHJhYmJpdG1xCi0gdmFsa2V5CgpEYXNoYm9hcmQgcXVlcmllcyBhbmQgYWxlcnQgcnVsZXMgdGhhdCByZWZlcmVuY2UgdGhlc2UgbWV0cmljcyBtYXkgbmVlZCB0byBiZSBjaGVja2VkIHRvIGVuc3VyZSB0aGV5IGhhbmRsZSB0aGUgYWRkaXRpb25hbCBgZW5kcG9pbnRgIGxhYmVsIGNvcnJlY3RseSAoZS5nLiwgYWdncmVnYXRpb25zIHNob3VsZCBzdGlsbCB3b3JrIHNpbmNlIGBzdW0gYnkgKGluc3RhbmNlKWAgd2lsbCBhZ2dyZWdhdGUgYWNyb3NzIGVuZHBvaW50cykuCgojIyBBZGRpdGlvbmFsIENvbnRleHQKCi0gVGhlIGBwcm9tZXRoZXVzL2NsaWVudF9nb2xhbmdgIGxpYnJhcnkgcmVhZHMgYC9wcm9jL1twaWRdL3N0YXRgIG9uIGV2ZXJ5IHNjcmFwZSB2aWEgYHByb2Nmcy5TdGFydFRpbWUoKWAgdG8gZXhwb3J0IGBwcm9jZXNzX3N0YXJ0X3RpbWVfc2Vjb25kc2AsIHNvIHRoZSB2YWx1ZSBpcyBhbC4uLgoKPC9kZXRhaWxzPgoKCgo8IS0tIFNUQVJUIENPUElMT1QgQ09ESU5HIEFHRU5UIFNVRkZJWCAtLT4KCi0gRml4ZXMgdmV4eGhvc3QvYXRtb3NwaGVyZSMzNTgwCgo8IS0tIFNUQVJUIENPUElMT1QgQ09ESU5HIEFHRU5UIFRJUFMgLS0+Ci0tLQoK8J+SoSBZb3UgY2FuIG1ha2UgQ29waWxvdCBzbWFydGVyIGJ5IHNldHRpbmcgdXAgY3VzdG9tIGluc3RydWN0aW9ucywgY3VzdG9taXppbmcgaXRzIGRldmVsb3BtZW50IGVudmlyb25tZW50IGFuZCBjb25maWd1cmluZyBNb2RlbCBDb250ZXh0IFByb3RvY29sIChNQ1ApIHNlcnZlcnMuIExlYXJuIG1vcmUgW0NvcGlsb3QgY29kaW5nIGFnZW50IHRpcHNdKGh0dHBzOi8vZ2guaW8vY29waWxvdC1jb2RpbmctYWdlbnQtdGlwcykgaW4gdGhlIGRvY3Mu
      patchset: 978d67e419e7a88ff3701d69c758cc7b1030014c
      pipeline: check
      playbook_context:
        playbook_projects:
          trusted/project_0/vexxhost.dev/zuul-config:
            canonical_name: vexxhost.dev/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          trusted/project_1/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: d73b78cc624f363c6b7fcfe833f2db4571e4e979
          trusted/project_2/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_0/github.com/vexxhost/zuul-jobs:
            canonical_name: github.com/vexxhost/zuul-jobs
            checkout: main
            commit: a6e68243e02ef030ce5e75f8b67630880c475f33
          untrusted/project_1/vexxhost.dev/zuul-config:
            canonical_name: vexxhost.dev/zuul-config
            checkout: main
            commit: 9052b5a7781b3346e4cffd452a54448cbff54d8b
          untrusted/project_2/opendev.org/zuul/zuul-jobs:
            canonical_name: opendev.org/zuul/zuul-jobs
            checkout: master
            commit: d73b78cc624f363c6b7fcfe833f2db4571e4e979
          untrusted/project_3/github.com/vexxhost/atmosphere:
            canonical_name: github.com/vexxhost/atmosphere
            checkout: main
            commit: 978d67e419e7a88ff3701d69c758cc7b1030014c
          untrusted/project_4/opendev.org/openstack/openstack-helm:
            canonical_name: opendev.org/openstack/openstack-helm
            checkout: master
            commit: 3a57ef7049b4b76a5a29f8331975931464a14d51
        playbooks:
        - path: untrusted/project_0/github.com/vexxhost/zuul-jobs/playbooks/molecule/run.yaml
          roles:
          - checkout: master
            checkout_description: project default branch
            link_name: ansible/playbook_0/role_1/zuul-jobs
            link_target: untrusted/project_2/opendev.org/zuul/zuul-jobs
            role_path: ansible/playbook_0/role_1/zuul-jobs/roles
          - checkout: main
            checkout_description: playbook branch
            link_name: ansible/playbook_0/role_2/zuul-jobs
            link_target: untrusted/project_0/github.com/vexxhost/zuul-jobs
            role_path: ansible/playbook_0/role_2/zuul-jobs/roles
      post_review: false
      post_timeout: null
      pre_timeout: null
      project:
        canonical_hostname: github.com
        canonical_name: github.com/vexxhost/atmosphere
        name: vexxhost/atmosphere
        short_name: atmosphere
        src_dir: src/github.com/vexxhost/atmosphere
      projects:
        github.com/vexxhost/atmosphere:
          canonical_hostname: github.com
          canonical_name: github.com/vexxhost/atmosphere
          checkout: main
          checkout_description: zuul branch
          commit: 978d67e419e7a88ff3701d69c758cc7b1030014c
          name: vexxhost/atmosphere
          required: false
          short_name: atmosphere
          src_dir: src/github.com/vexxhost/atmosphere
      ref: refs/pull/3581/head
      resources: {}
      tenant: oss
      timeout: 1800
      topic: null
      voting: true
