apiVersion: v1
kind: Pod
metadata:
  annotations:
    container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
    container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
    container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
    container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
  creationTimestamp: "2026-02-17T15:59:59Z"
  generateName: cilium-
  labels:
    app.kubernetes.io/name: cilium-agent
    app.kubernetes.io/part-of: cilium
    controller-revision-hash: bdc98b467
    k8s-app: cilium
    pod-template-generation: "1"
  name: cilium-825zm
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: cilium
    uid: 6c7ed013-aa3c-4db1-88ad-e5b27b31050a
  resourceVersion: "674"
  uid: 1a4a165f-9d53-4a6a-9d01-9032345e699e
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchFields:
          - key: metadata.name
            operator: In
            values:
            - instance
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            k8s-app: cilium
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    command:
    - cilium-agent
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_CLUSTERMESH_CONFIG
      value: /var/lib/cilium/clustermesh/
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command:
          - bash
          - -c
          - |
            set -o errexit
            set -o pipefail
            set -o nounset

            # When running in AWS ENI mode, it's likely that 'aws-node' has
            # had a chance to install SNAT iptables rules. These can result
            # in dropped traffic, so we should attempt to remove them.
            # We do it using a 'postStart' hook since this may need to run
            # for nodes which might have already been init'ed but may still
            # have dangling rules. This is safe because there are no
            # dependencies on anything that is part of the startup script
            # itself, and can be safely run multiple times per node (e.g. in
            # case of a restart).
            if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
            then
                echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
                iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
            fi
            echo 'Done!'
      preStop:
        exec:
          command:
          - /cni-uninstall.sh
    livenessProbe:
      failureThreshold: 10
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: cilium-agent
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    resources: {}
    securityContext:
      capabilities:
        add:
        - CHOWN
        - KILL
        - NET_ADMIN
        - NET_RAW
        - IPC_LOCK
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        - DAC_OVERRIDE
        - FOWNER
        - SETGID
        - SETUID
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    startupProbe:
      failureThreshold: 105
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 2
      successThreshold: 1
      timeoutSeconds: 1
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/proc/sys/net
      name: host-proc-sys-net
    - mountPath: /host/proc/sys/kernel
      name: host-proc-sys-kernel
    - mountPath: /sys/fs/bpf
      mountPropagation: HostToContainer
      name: bpf-maps
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /host/etc/cni/net.d
      name: etc-cni-netd
    - mountPath: /var/lib/cilium/clustermesh
      name: clustermesh-secrets
      readOnly: true
    - mountPath: /lib/modules
      name: lib-modules
      readOnly: true
    - mountPath: /run/xtables.lock
      name: xtables-lock
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  initContainers:
  - command:
    - cilium
    - build-config
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: config
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-mount /hostbin/cilium-mount;
      nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
      rm /hostbin/cilium-mount
    env:
    - name: CGROUP_ROOT
      value: /run/cilium/cgroupv2
    - name: BIN_PATH
      value: /opt/cni/bin
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-cgroup
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
      nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
      rm /hostbin/cilium-sysctlfix
    env:
    - name: BIN_PATH
      value: /opt/cni/bin
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: apply-sysctl-overwrites
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  - args:
    - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
    command:
    - /bin/bash
    - -c
    - --
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-bpf-fs
    resources: {}
    securityContext:
      privileged: true
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      mountPropagation: Bidirectional
      name: bpf-maps
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  - command:
    - /init-container.sh
    env:
    - name: CILIUM_ALL_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-state
          name: cilium-config
          optional: true
    - name: CILIUM_BPF_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-bpf-state
          name: cilium-config
          optional: true
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: clean-cilium-state
    resources: {}
    securityContext:
      capabilities:
        add:
        - NET_ADMIN
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      name: bpf-maps
    - mountPath: /run/cilium/cgroupv2
      mountPropagation: HostToContainer
      name: cilium-cgroup
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  - command:
    - /install-plugin.sh
    image: quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: install-cni-binaries
    resources:
      requests:
        cpu: 100m
        memory: 10Mi
    securityContext:
      capabilities:
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/opt/cni/bin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8q6bb
      readOnly: true
  nodeName: instance
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000001000
  priorityClassName: system-node-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium
  serviceAccountName: cilium
  terminationGracePeriodSeconds: 1
  tolerations:
  - operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
  volumes:
  - emptyDir: {}
    name: tmp
  - hostPath:
      path: /var/run/cilium
      type: DirectoryOrCreate
    name: cilium-run
  - hostPath:
      path: /sys/fs/bpf
      type: DirectoryOrCreate
    name: bpf-maps
  - hostPath:
      path: /proc
      type: Directory
    name: hostproc
  - hostPath:
      path: /run/cilium/cgroupv2
      type: DirectoryOrCreate
    name: cilium-cgroup
  - hostPath:
      path: /opt/cni/bin
      type: DirectoryOrCreate
    name: cni-path
  - hostPath:
      path: /etc/cni/net.d
      type: DirectoryOrCreate
    name: etc-cni-netd
  - hostPath:
      path: /lib/modules
      type: ""
    name: lib-modules
  - hostPath:
      path: /run/xtables.lock
      type: FileOrCreate
    name: xtables-lock
  - name: clustermesh-secrets
    projected:
      defaultMode: 256
      sources:
      - secret:
          name: cilium-clustermesh
          optional: true
      - secret:
          items:
          - key: tls.key
            path: common-etcd-client.key
          - key: tls.crt
            path: common-etcd-client.crt
          - key: ca.crt
            path: common-etcd-client-ca.crt
          name: clustermesh-apiserver-remote-cert
          optional: true
  - hostPath:
      path: /proc/sys/net
      type: Directory
    name: host-proc-sys-net
  - hostPath:
      path: /proc/sys/kernel
      type: Directory
    name: host-proc-sys-kernel
  - name: kube-api-access-8q6bb
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-02-17T16:00:13Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-02-17T16:00:17Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-02-17T16:00:17Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-02-17T15:59:59Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://484a0dfa3f31e41de7eaa2103fa17adbe95c12dd505786146472c0ced27b15aa
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: cilium-agent
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-02-17T16:00:14Z"
  hostIP: 199.204.45.4
  initContainerStatuses:
  - containerID: containerd://e74ecd248337cda2a3f7859cf9435a1e388d2123314428aebe62943a63dfba9d
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: config
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://e74ecd248337cda2a3f7859cf9435a1e388d2123314428aebe62943a63dfba9d
        exitCode: 0
        finishedAt: "2026-02-17T16:00:07Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:07Z"
  - containerID: containerd://f5c4db938b8413018d91fd3dac6e62b49e40d8758082415122795087b00ede11
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-cgroup
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://f5c4db938b8413018d91fd3dac6e62b49e40d8758082415122795087b00ede11
        exitCode: 0
        finishedAt: "2026-02-17T16:00:08Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:08Z"
  - containerID: containerd://c29a1111410a72fa7408fee863e58aad2284673c004d6d03a8c3c7c1ad6e4bc2
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: apply-sysctl-overwrites
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://c29a1111410a72fa7408fee863e58aad2284673c004d6d03a8c3c7c1ad6e4bc2
        exitCode: 0
        finishedAt: "2026-02-17T16:00:10Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:10Z"
  - containerID: containerd://96c1fc3718cfe8d6e8d5b37204ab7bf4115cdc253d578e20773f311a12cc751a
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-bpf-fs
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://96c1fc3718cfe8d6e8d5b37204ab7bf4115cdc253d578e20773f311a12cc751a
        exitCode: 0
        finishedAt: "2026-02-17T16:00:11Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:11Z"
  - containerID: containerd://df0cabb8b5f895a6d883a689b409dc9abb58e77e1ac0ab82ff167c2b1fe795f5
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: clean-cilium-state
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://df0cabb8b5f895a6d883a689b409dc9abb58e77e1ac0ab82ff167c2b1fe795f5
        exitCode: 0
        finishedAt: "2026-02-17T16:00:12Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:12Z"
  - containerID: containerd://740ee9fc0d453484a8917c58d3edac65eadc19726c7c4e08a89aa157d14f26cd
    image: quay.io/cilium/cilium:v1.14.8
    imageID: quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: install-cni-binaries
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://740ee9fc0d453484a8917c58d3edac65eadc19726c7c4e08a89aa157d14f26cd
        exitCode: 0
        finishedAt: "2026-02-17T16:00:13Z"
        reason: Completed
        startedAt: "2026-02-17T16:00:13Z"
  phase: Running
  podIP: 199.204.45.4
  podIPs:
  - ip: 199.204.45.4
  qosClass: Burstable
  startTime: "2026-02-17T15:59:59Z"
