apiVersion: v1
kind: Pod
metadata:
  annotations:
    container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
    container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
    container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
    container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
  creationTimestamp: "2026-04-02T06:06:29Z"
  generateName: cilium-
  labels:
    app.kubernetes.io/name: cilium-agent
    app.kubernetes.io/part-of: cilium
    controller-revision-hash: 556fd66f89
    k8s-app: cilium
    pod-template-generation: "1"
  name: cilium-gzbq2
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: cilium
    uid: e06a7ce6-28f3-44ef-b8c6-4fb4389652d4
  resourceVersion: "716"
  uid: ff80ce06-1624-4524-b7e0-a8103597e942
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchFields:
          - key: metadata.name
            operator: In
            values:
            - instance
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            k8s-app: cilium
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    command:
    - cilium-agent
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_CLUSTERMESH_CONFIG
      value: /var/lib/cilium/clustermesh/
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command:
          - bash
          - -c
          - |
            set -o errexit
            set -o pipefail
            set -o nounset

            # When running in AWS ENI mode, it's likely that 'aws-node' has
            # had a chance to install SNAT iptables rules. These can result
            # in dropped traffic, so we should attempt to remove them.
            # We do it using a 'postStart' hook since this may need to run
            # for nodes which might have already been init'ed but may still
            # have dangling rules. This is safe because there are no
            # dependencies on anything that is part of the startup script
            # itself, and can be safely run multiple times per node (e.g. in
            # case of a restart).
            if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
            then
                echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
                iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
            fi
            echo 'Done!'
      preStop:
        exec:
          command:
          - /cni-uninstall.sh
    livenessProbe:
      failureThreshold: 10
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: cilium-agent
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    resources: {}
    securityContext:
      capabilities:
        add:
        - CHOWN
        - KILL
        - NET_ADMIN
        - NET_RAW
        - IPC_LOCK
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        - DAC_OVERRIDE
        - FOWNER
        - SETGID
        - SETUID
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    startupProbe:
      failureThreshold: 105
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 2
      successThreshold: 1
      timeoutSeconds: 1
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/proc/sys/net
      name: host-proc-sys-net
    - mountPath: /host/proc/sys/kernel
      name: host-proc-sys-kernel
    - mountPath: /sys/fs/bpf
      mountPropagation: HostToContainer
      name: bpf-maps
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /host/etc/cni/net.d
      name: etc-cni-netd
    - mountPath: /var/lib/cilium/clustermesh
      name: clustermesh-secrets
      readOnly: true
    - mountPath: /lib/modules
      name: lib-modules
      readOnly: true
    - mountPath: /run/xtables.lock
      name: xtables-lock
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  initContainers:
  - command:
    - cilium
    - build-config
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: config
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-mount /hostbin/cilium-mount;
      nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
      rm /hostbin/cilium-mount
    env:
    - name: CGROUP_ROOT
      value: /run/cilium/cgroupv2
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-cgroup
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
      nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
      rm /hostbin/cilium-sysctlfix
    env:
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: apply-sysctl-overwrites
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  - args:
    - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
    command:
    - /bin/bash
    - -c
    - --
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-bpf-fs
    resources: {}
    securityContext:
      privileged: true
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      mountPropagation: Bidirectional
      name: bpf-maps
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  - command:
    - /init-container.sh
    env:
    - name: CILIUM_ALL_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-state
          name: cilium-config
          optional: true
    - name: CILIUM_BPF_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-bpf-state
          name: cilium-config
          optional: true
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: clean-cilium-state
    resources: {}
    securityContext:
      capabilities:
        add:
        - NET_ADMIN
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      name: bpf-maps
    - mountPath: /run/cilium/cgroupv2
      mountPropagation: HostToContainer
      name: cilium-cgroup
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  - command:
    - /install-plugin.sh
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: install-cni-binaries
    resources:
      requests:
        cpu: 100m
        memory: 10Mi
    securityContext:
      capabilities:
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/opt/cni/bin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-lgt8l
      readOnly: true
  nodeName: instance
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000001000
  priorityClassName: system-node-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium
  serviceAccountName: cilium
  terminationGracePeriodSeconds: 1
  tolerations:
  - operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
  volumes:
  - emptyDir: {}
    name: tmp
  - hostPath:
      path: /var/run/cilium
      type: DirectoryOrCreate
    name: cilium-run
  - hostPath:
      path: /sys/fs/bpf
      type: DirectoryOrCreate
    name: bpf-maps
  - hostPath:
      path: /proc
      type: Directory
    name: hostproc
  - hostPath:
      path: /run/cilium/cgroupv2
      type: DirectoryOrCreate
    name: cilium-cgroup
  - hostPath:
      path: /opt/cni/bin
      type: DirectoryOrCreate
    name: cni-path
  - hostPath:
      path: /etc/cni/net.d
      type: DirectoryOrCreate
    name: etc-cni-netd
  - hostPath:
      path: /lib/modules
      type: ""
    name: lib-modules
  - hostPath:
      path: /run/xtables.lock
      type: FileOrCreate
    name: xtables-lock
  - name: clustermesh-secrets
    projected:
      defaultMode: 256
      sources:
      - secret:
          name: cilium-clustermesh
          optional: true
      - secret:
          items:
          - key: tls.key
            path: common-etcd-client.key
          - key: tls.crt
            path: common-etcd-client.crt
          - key: ca.crt
            path: common-etcd-client-ca.crt
          name: clustermesh-apiserver-remote-cert
          optional: true
  - hostPath:
      path: /proc/sys/net
      type: Directory
    name: host-proc-sys-net
  - hostPath:
      path: /proc/sys/kernel
      type: Directory
    name: host-proc-sys-kernel
  - name: kube-api-access-lgt8l
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-02T06:06:51Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-02T06:06:56Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-02T06:06:56Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-02T06:06:29Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://df117ef943678a5c5c0c203da04b30a83458966b8cbdba5e89f369d5bb93d06b
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: cilium-agent
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-04-02T06:06:51Z"
  hostIP: 199.19.213.114
  initContainerStatuses:
  - containerID: containerd://2bc16eb3fe8c78e401045bb19d030022c5fee109b9d2b2df5373394fdd1d7e89
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: config
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://2bc16eb3fe8c78e401045bb19d030022c5fee109b9d2b2df5373394fdd1d7e89
        exitCode: 0
        finishedAt: "2026-04-02T06:06:40Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:40Z"
  - containerID: containerd://37f25f3f2dbbe397e7146ae8b8e2ae3ec8397b1e8585556713d3c1fc957c75bb
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-cgroup
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://37f25f3f2dbbe397e7146ae8b8e2ae3ec8397b1e8585556713d3c1fc957c75bb
        exitCode: 0
        finishedAt: "2026-04-02T06:06:46Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:46Z"
  - containerID: containerd://bb59fd239fe99c4c96659b719773ea0a783cf5b057bd6286fdc96aa98a28fdd2
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: apply-sysctl-overwrites
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://bb59fd239fe99c4c96659b719773ea0a783cf5b057bd6286fdc96aa98a28fdd2
        exitCode: 0
        finishedAt: "2026-04-02T06:06:47Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:47Z"
  - containerID: containerd://ce6531035c6e73c5bf683b67ec9a07c11e594a88cc145276129bbcb2cb6f3b62
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-bpf-fs
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://ce6531035c6e73c5bf683b67ec9a07c11e594a88cc145276129bbcb2cb6f3b62
        exitCode: 0
        finishedAt: "2026-04-02T06:06:48Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:48Z"
  - containerID: containerd://7bfe5446499bed5a32523285d92a7dea1c8e0639f93f5c3d2f81d79aa8fc0938
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: clean-cilium-state
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://7bfe5446499bed5a32523285d92a7dea1c8e0639f93f5c3d2f81d79aa8fc0938
        exitCode: 0
        finishedAt: "2026-04-02T06:06:49Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:49Z"
  - containerID: containerd://fc8be27f223cd61c509dd1f6f599bae38f3e85c77c2671598d9bec4ab3ed2841
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: install-cni-binaries
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://fc8be27f223cd61c509dd1f6f599bae38f3e85c77c2671598d9bec4ab3ed2841
        exitCode: 0
        finishedAt: "2026-04-02T06:06:50Z"
        reason: Completed
        startedAt: "2026-04-02T06:06:50Z"
  phase: Running
  podIP: 199.19.213.114
  podIPs:
  - ip: 199.19.213.114
  qosClass: Burstable
  startTime: "2026-04-02T06:06:29Z"
