apiVersion: v1
kind: Pod
metadata:
  annotations:
    container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
    container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
    container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
    container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
  creationTimestamp: "2026-04-14T05:37:00Z"
  generateName: cilium-
  labels:
    app.kubernetes.io/name: cilium-agent
    app.kubernetes.io/part-of: cilium
    controller-revision-hash: 556fd66f89
    k8s-app: cilium
    pod-template-generation: "1"
  name: cilium-2n4wn
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: cilium
    uid: b36e1b72-d4ab-4d7c-870d-8da70242f4c2
  resourceVersion: "749"
  uid: f0f967bd-72bc-4b65-84c7-37d1146132df
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchFields:
          - key: metadata.name
            operator: In
            values:
            - instance
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            k8s-app: cilium
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    command:
    - cilium-agent
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_CLUSTERMESH_CONFIG
      value: /var/lib/cilium/clustermesh/
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command:
          - bash
          - -c
          - |
            set -o errexit
            set -o pipefail
            set -o nounset

            # When running in AWS ENI mode, it's likely that 'aws-node' has
            # had a chance to install SNAT iptables rules. These can result
            # in dropped traffic, so we should attempt to remove them.
            # We do it using a 'postStart' hook since this may need to run
            # for nodes which might have already been init'ed but may still
            # have dangling rules. This is safe because there are no
            # dependencies on anything that is part of the startup script
            # itself, and can be safely run multiple times per node (e.g. in
            # case of a restart).
            if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
            then
                echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
                iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
            fi
            echo 'Done!'
      preStop:
        exec:
          command:
          - /cni-uninstall.sh
    livenessProbe:
      failureThreshold: 10
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: cilium-agent
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    resources: {}
    securityContext:
      capabilities:
        add:
        - CHOWN
        - KILL
        - NET_ADMIN
        - NET_RAW
        - IPC_LOCK
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        - DAC_OVERRIDE
        - FOWNER
        - SETGID
        - SETUID
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    startupProbe:
      failureThreshold: 105
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 2
      successThreshold: 1
      timeoutSeconds: 1
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/proc/sys/net
      name: host-proc-sys-net
    - mountPath: /host/proc/sys/kernel
      name: host-proc-sys-kernel
    - mountPath: /sys/fs/bpf
      mountPropagation: HostToContainer
      name: bpf-maps
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /host/etc/cni/net.d
      name: etc-cni-netd
    - mountPath: /var/lib/cilium/clustermesh
      name: clustermesh-secrets
      readOnly: true
    - mountPath: /lib/modules
      name: lib-modules
      readOnly: true
    - mountPath: /run/xtables.lock
      name: xtables-lock
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  initContainers:
  - command:
    - cilium
    - build-config
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: config
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-mount /hostbin/cilium-mount;
      nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
      rm /hostbin/cilium-mount
    env:
    - name: CGROUP_ROOT
      value: /run/cilium/cgroupv2
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-cgroup
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
      nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
      rm /hostbin/cilium-sysctlfix
    env:
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: apply-sysctl-overwrites
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  - args:
    - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
    command:
    - /bin/bash
    - -c
    - --
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-bpf-fs
    resources: {}
    securityContext:
      privileged: true
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      mountPropagation: Bidirectional
      name: bpf-maps
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  - command:
    - /init-container.sh
    env:
    - name: CILIUM_ALL_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-state
          name: cilium-config
          optional: true
    - name: CILIUM_BPF_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-bpf-state
          name: cilium-config
          optional: true
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: clean-cilium-state
    resources: {}
    securityContext:
      capabilities:
        add:
        - NET_ADMIN
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      name: bpf-maps
    - mountPath: /run/cilium/cgroupv2
      mountPropagation: HostToContainer
      name: cilium-cgroup
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  - command:
    - /install-plugin.sh
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: install-cni-binaries
    resources:
      requests:
        cpu: 100m
        memory: 10Mi
    securityContext:
      capabilities:
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/opt/cni/bin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-875k4
      readOnly: true
  nodeName: instance
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000001000
  priorityClassName: system-node-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium
  serviceAccountName: cilium
  terminationGracePeriodSeconds: 1
  tolerations:
  - operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
  volumes:
  - emptyDir: {}
    name: tmp
  - hostPath:
      path: /var/run/cilium
      type: DirectoryOrCreate
    name: cilium-run
  - hostPath:
      path: /sys/fs/bpf
      type: DirectoryOrCreate
    name: bpf-maps
  - hostPath:
      path: /proc
      type: Directory
    name: hostproc
  - hostPath:
      path: /run/cilium/cgroupv2
      type: DirectoryOrCreate
    name: cilium-cgroup
  - hostPath:
      path: /opt/cni/bin
      type: DirectoryOrCreate
    name: cni-path
  - hostPath:
      path: /etc/cni/net.d
      type: DirectoryOrCreate
    name: etc-cni-netd
  - hostPath:
      path: /lib/modules
      type: ""
    name: lib-modules
  - hostPath:
      path: /run/xtables.lock
      type: FileOrCreate
    name: xtables-lock
  - name: clustermesh-secrets
    projected:
      defaultMode: 256
      sources:
      - secret:
          name: cilium-clustermesh
          optional: true
      - secret:
          items:
          - key: tls.key
            path: common-etcd-client.key
          - key: tls.crt
            path: common-etcd-client.crt
          - key: ca.crt
            path: common-etcd-client-ca.crt
          name: clustermesh-apiserver-remote-cert
          optional: true
  - hostPath:
      path: /proc/sys/net
      type: Directory
    name: host-proc-sys-net
  - hostPath:
      path: /proc/sys/kernel
      type: Directory
    name: host-proc-sys-kernel
  - name: kube-api-access-875k4
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-14T05:37:16Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-14T05:37:20Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-14T05:37:20Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-14T05:37:00Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://b84b3ac4fba5ff6ac480da2b2939128d7a9787d4e0e1e0dd5c85d6893ec38d46
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: cilium-agent
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-04-14T05:37:16Z"
  hostIP: 199.204.45.227
  initContainerStatuses:
  - containerID: containerd://26eddf19e2beb2409a1f1595503eced8c8877edb783b54ce64497bde0d3090e3
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: config
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://26eddf19e2beb2409a1f1595503eced8c8877edb783b54ce64497bde0d3090e3
        exitCode: 0
        finishedAt: "2026-04-14T05:37:08Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:08Z"
  - containerID: containerd://23f7669bf622e295e3d9d804fb0f9fb1ed6b61bd54d9f6b9ce784e145d8a8a0e
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-cgroup
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://23f7669bf622e295e3d9d804fb0f9fb1ed6b61bd54d9f6b9ce784e145d8a8a0e
        exitCode: 0
        finishedAt: "2026-04-14T05:37:11Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:11Z"
  - containerID: containerd://9919abea4555ab1a270f854f91a6df1e3ec59df02730798dfbc70aa31f4c9f84
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: apply-sysctl-overwrites
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://9919abea4555ab1a270f854f91a6df1e3ec59df02730798dfbc70aa31f4c9f84
        exitCode: 0
        finishedAt: "2026-04-14T05:37:12Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:12Z"
  - containerID: containerd://39f0edf38eee80b5c7a0fa3cd0cbecf3f49e90685962c33c3b791b6f4b92ca14
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-bpf-fs
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://39f0edf38eee80b5c7a0fa3cd0cbecf3f49e90685962c33c3b791b6f4b92ca14
        exitCode: 0
        finishedAt: "2026-04-14T05:37:13Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:13Z"
  - containerID: containerd://351b38511993665ba156cfb43dce88174f4a3187007c622587be94e89ca59449
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: clean-cilium-state
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://351b38511993665ba156cfb43dce88174f4a3187007c622587be94e89ca59449
        exitCode: 0
        finishedAt: "2026-04-14T05:37:14Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:14Z"
  - containerID: containerd://a1266ebfe939557a49101080be0b00f0674a2a6a6ed5ee5861742c1f2fa2b067
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: install-cni-binaries
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://a1266ebfe939557a49101080be0b00f0674a2a6a6ed5ee5861742c1f2fa2b067
        exitCode: 0
        finishedAt: "2026-04-14T05:37:15Z"
        reason: Completed
        startedAt: "2026-04-14T05:37:15Z"
  phase: Running
  podIP: 199.204.45.227
  podIPs:
  - ip: 199.204.45.227
  qosClass: Burstable
  startTime: "2026-04-14T05:37:00Z"
