apiVersion: v1
kind: Pod
metadata:
  annotations:
    container.apparmor.security.beta.kubernetes.io/apply-sysctl-overwrites: unconfined
    container.apparmor.security.beta.kubernetes.io/cilium-agent: unconfined
    container.apparmor.security.beta.kubernetes.io/clean-cilium-state: unconfined
    container.apparmor.security.beta.kubernetes.io/mount-cgroup: unconfined
  creationTimestamp: "2026-04-28T06:53:46Z"
  generateName: cilium-
  labels:
    app.kubernetes.io/name: cilium-agent
    app.kubernetes.io/part-of: cilium
    controller-revision-hash: 556fd66f89
    k8s-app: cilium
    pod-template-generation: "1"
  name: cilium-sshkw
  namespace: kube-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: DaemonSet
    name: cilium
    uid: 22673242-8cb6-4a42-9900-046bfac9e9e5
  resourceVersion: "738"
  uid: ae41d8f7-da02-44ef-8703-dcee5b64a5b5
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchFields:
          - key: metadata.name
            operator: In
            values:
            - instance
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
      - labelSelector:
          matchLabels:
            k8s-app: cilium
        topologyKey: kubernetes.io/hostname
  automountServiceAccountToken: true
  containers:
  - args:
    - --config-dir=/tmp/cilium/config-map
    command:
    - cilium-agent
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: CILIUM_CLUSTERMESH_CONFIG
      value: /var/lib/cilium/clustermesh/
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    lifecycle:
      postStart:
        exec:
          command:
          - bash
          - -c
          - |
            set -o errexit
            set -o pipefail
            set -o nounset

            # When running in AWS ENI mode, it's likely that 'aws-node' has
            # had a chance to install SNAT iptables rules. These can result
            # in dropped traffic, so we should attempt to remove them.
            # We do it using a 'postStart' hook since this may need to run
            # for nodes which might have already been init'ed but may still
            # have dangling rules. This is safe because there are no
            # dependencies on anything that is part of the startup script
            # itself, and can be safely run multiple times per node (e.g. in
            # case of a restart).
            if [[ "$(iptables-save | grep -E -c 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN')" != "0" ]];
            then
                echo 'Deleting iptables rules created by the AWS CNI VPC plugin'
                iptables-save | grep -E -v 'AWS-SNAT-CHAIN|AWS-CONNMARK-CHAIN' | iptables-restore
            fi
            echo 'Done!'
      preStop:
        exec:
          command:
          - /cni-uninstall.sh
    livenessProbe:
      failureThreshold: 10
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    name: cilium-agent
    readinessProbe:
      failureThreshold: 3
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 30
      successThreshold: 1
      timeoutSeconds: 5
    resources: {}
    securityContext:
      capabilities:
        add:
        - CHOWN
        - KILL
        - NET_ADMIN
        - NET_RAW
        - IPC_LOCK
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        - DAC_OVERRIDE
        - FOWNER
        - SETGID
        - SETUID
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    startupProbe:
      failureThreshold: 105
      httpGet:
        host: 127.0.0.1
        httpHeaders:
        - name: brief
          value: "true"
        path: /healthz
        port: 9879
        scheme: HTTP
      periodSeconds: 2
      successThreshold: 1
      timeoutSeconds: 1
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/proc/sys/net
      name: host-proc-sys-net
    - mountPath: /host/proc/sys/kernel
      name: host-proc-sys-kernel
    - mountPath: /sys/fs/bpf
      mountPropagation: HostToContainer
      name: bpf-maps
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /host/etc/cni/net.d
      name: etc-cni-netd
    - mountPath: /var/lib/cilium/clustermesh
      name: clustermesh-secrets
      readOnly: true
    - mountPath: /lib/modules
      name: lib-modules
      readOnly: true
    - mountPath: /run/xtables.lock
      name: xtables-lock
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  hostNetwork: true
  initContainers:
  - command:
    - cilium
    - build-config
    env:
    - name: K8S_NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: CILIUM_K8S_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: config
    resources: {}
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp
      name: tmp
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-mount /hostbin/cilium-mount;
      nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT;
      rm /hostbin/cilium-mount
    env:
    - name: CGROUP_ROOT
      value: /run/cilium/cgroupv2
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-cgroup
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  - command:
    - sh
    - -ec
    - |
      cp /usr/bin/cilium-sysctlfix /hostbin/cilium-sysctlfix;
      nsenter --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-sysctlfix";
      rm /hostbin/cilium-sysctlfix
    env:
    - name: BIN_PATH
      value: /opt/cni/bin
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: apply-sysctl-overwrites
    resources: {}
    securityContext:
      capabilities:
        add:
        - SYS_ADMIN
        - SYS_CHROOT
        - SYS_PTRACE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /hostproc
      name: hostproc
    - mountPath: /hostbin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  - args:
    - mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf
    command:
    - /bin/bash
    - -c
    - --
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: mount-bpf-fs
    resources: {}
    securityContext:
      privileged: true
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      mountPropagation: Bidirectional
      name: bpf-maps
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  - command:
    - /init-container.sh
    env:
    - name: CILIUM_ALL_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-state
          name: cilium-config
          optional: true
    - name: CILIUM_BPF_STATE
      valueFrom:
        configMapKeyRef:
          key: clean-cilium-bpf-state
          name: cilium-config
          optional: true
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: clean-cilium-state
    resources: {}
    securityContext:
      capabilities:
        add:
        - NET_ADMIN
        - SYS_MODULE
        - SYS_ADMIN
        - SYS_RESOURCE
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /sys/fs/bpf
      name: bpf-maps
    - mountPath: /run/cilium/cgroupv2
      mountPropagation: HostToContainer
      name: cilium-cgroup
    - mountPath: /var/run/cilium
      name: cilium-run
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  - command:
    - /install-plugin.sh
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imagePullPolicy: IfNotPresent
    name: install-cni-binaries
    resources:
      requests:
        cpu: 100m
        memory: 10Mi
    securityContext:
      capabilities:
        drop:
        - ALL
      seLinuxOptions:
        level: s0
        type: spc_t
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /host/opt/cni/bin
      name: cni-path
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-dfr7d
      readOnly: true
  nodeName: instance
  nodeSelector:
    kubernetes.io/os: linux
  preemptionPolicy: PreemptLowerPriority
  priority: 2000001000
  priorityClassName: system-node-critical
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: cilium
  serviceAccountName: cilium
  terminationGracePeriodSeconds: 1
  tolerations:
  - operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/disk-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/memory-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/pid-pressure
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/unschedulable
    operator: Exists
  - effect: NoSchedule
    key: node.kubernetes.io/network-unavailable
    operator: Exists
  volumes:
  - emptyDir: {}
    name: tmp
  - hostPath:
      path: /var/run/cilium
      type: DirectoryOrCreate
    name: cilium-run
  - hostPath:
      path: /sys/fs/bpf
      type: DirectoryOrCreate
    name: bpf-maps
  - hostPath:
      path: /proc
      type: Directory
    name: hostproc
  - hostPath:
      path: /run/cilium/cgroupv2
      type: DirectoryOrCreate
    name: cilium-cgroup
  - hostPath:
      path: /opt/cni/bin
      type: DirectoryOrCreate
    name: cni-path
  - hostPath:
      path: /etc/cni/net.d
      type: DirectoryOrCreate
    name: etc-cni-netd
  - hostPath:
      path: /lib/modules
      type: ""
    name: lib-modules
  - hostPath:
      path: /run/xtables.lock
      type: FileOrCreate
    name: xtables-lock
  - name: clustermesh-secrets
    projected:
      defaultMode: 256
      sources:
      - secret:
          name: cilium-clustermesh
          optional: true
      - secret:
          items:
          - key: tls.key
            path: common-etcd-client.key
          - key: tls.crt
            path: common-etcd-client.crt
          - key: ca.crt
            path: common-etcd-client-ca.crt
          name: clustermesh-apiserver-remote-cert
          optional: true
  - hostPath:
      path: /proc/sys/net
      type: Directory
    name: host-proc-sys-net
  - hostPath:
      path: /proc/sys/kernel
      type: Directory
    name: host-proc-sys-kernel
  - name: kube-api-access-dfr7d
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-28T06:54:00Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-28T06:54:11Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-28T06:54:11Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-28T06:53:46Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://6a3b8440721a69ed3d1700d9da153be7d44124e0ee2874ee18a3dad50be26cf7
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: cilium-agent
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-04-28T06:54:00Z"
  hostIP: 199.204.45.33
  initContainerStatuses:
  - containerID: containerd://67789954071fc2b74a8d1ab0aadc8bb19241593fc0bc8271723b5c8d8daf4d38
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: config
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://67789954071fc2b74a8d1ab0aadc8bb19241593fc0bc8271723b5c8d8daf4d38
        exitCode: 0
        finishedAt: "2026-04-28T06:53:54Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:54Z"
  - containerID: containerd://2812ebb132aa2f982e565d2e478e0cb88a9bc7bb8f86c48e7dfa475bd6ff657c
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-cgroup
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://2812ebb132aa2f982e565d2e478e0cb88a9bc7bb8f86c48e7dfa475bd6ff657c
        exitCode: 0
        finishedAt: "2026-04-28T06:53:55Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:55Z"
  - containerID: containerd://5b87efa95b3a36a8941a604b12e58b3483774f38fee3be45b0457dc506d4f7ac
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: apply-sysctl-overwrites
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://5b87efa95b3a36a8941a604b12e58b3483774f38fee3be45b0457dc506d4f7ac
        exitCode: 0
        finishedAt: "2026-04-28T06:53:56Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:56Z"
  - containerID: containerd://efa18f2329316f5fb7ed3ab9cfb48015b690a23994107dfc8fed10e98518556c
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: mount-bpf-fs
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://efa18f2329316f5fb7ed3ab9cfb48015b690a23994107dfc8fed10e98518556c
        exitCode: 0
        finishedAt: "2026-04-28T06:53:57Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:57Z"
  - containerID: containerd://ffff3ae86fb1b060cbb94921731b89495d710a2d07ca80dec97eb83eaba7fbee
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: clean-cilium-state
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://ffff3ae86fb1b060cbb94921731b89495d710a2d07ca80dec97eb83eaba7fbee
        exitCode: 0
        finishedAt: "2026-04-28T06:53:58Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:58Z"
  - containerID: containerd://856834f2f6de20f21f216890b2fb2f5e5498c7f3b4cde2b6dc4bf452ded3511b
    image: harbor.atmosphere.dev/quay.io/cilium/cilium:v1.14.8
    imageID: harbor.atmosphere.dev/quay.io/cilium/cilium@sha256:7fca3ba4b04af066e8b086b5c1a52e30f52db01ffc642e7db0a439514aed3ada
    lastState: {}
    name: install-cni-binaries
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://856834f2f6de20f21f216890b2fb2f5e5498c7f3b4cde2b6dc4bf452ded3511b
        exitCode: 0
        finishedAt: "2026-04-28T06:53:59Z"
        reason: Completed
        startedAt: "2026-04-28T06:53:59Z"
  phase: Running
  podIP: 199.204.45.33
  podIPs:
  - ip: 199.204.45.33
  qosClass: Burstable
  startTime: "2026-04-28T06:53:46Z"
