apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2026-04-23T16:45:05Z"
  generateName: capi-controller-manager-bc4cf8c95-
  labels:
    cluster.x-k8s.io/provider: cluster-api
    control-plane: controller-manager
    pod-template-hash: bc4cf8c95
  name: capi-controller-manager-bc4cf8c95-nqrmx
  namespace: capi-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: capi-controller-manager-bc4cf8c95
    uid: 41965304-8dc0-43ce-bfd2-e5f68bbab730
  resourceVersion: "27169"
  uid: 23ab94b1-3a43-43bb-81e4-f7fa2e10d983
spec:
  containers:
  - args:
    - --leader-elect
    - --diagnostics-address=:8443
    - --insecure-diagnostics=false
    - --feature-gates=MachinePool=true,ClusterResourceSet=true,ClusterTopology=true,RuntimeSDK=false,MachineSetPreflightChecks=true,MachineWaitForVolumeDetachConsiderVolumeAttachments=true,PriorityQueue=false
    command:
    - /manager
    env:
    - name: POD_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: POD_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.name
    - name: POD_UID
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.uid
    image: harbor.atmosphere.dev/registry.k8s.io/cluster-api/cluster-api-controller:v1.10.5
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    ports:
    - containerPort: 9443
      name: webhook-server
      protocol: TCP
    - containerPort: 9440
      name: healthz
      protocol: TCP
    - containerPort: 8443
      name: metrics
      protocol: TCP
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources: {}
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp/k8s-webhook-server/serving-certs
      name: cert
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-qx7q9
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: capi-manager
  serviceAccountName: capi-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: cert
    secret:
      defaultMode: 420
      secretName: capi-webhook-service-cert
  - name: kube-api-access-qx7q9
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-23T16:45:05Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-23T16:49:43Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-23T16:49:43Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-23T16:45:05Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://9a5f1023bdaf4108e84bb6373f914e91d4114a0b776680f5c5e003a4db61f338
    image: harbor.atmosphere.dev/registry.k8s.io/cluster-api/cluster-api-controller:v1.10.5
    imageID: harbor.atmosphere.dev/registry.k8s.io/cluster-api/cluster-api-controller@sha256:d93407d031296336ccbabc8494005672dc048c4ebc616ccfc18f813d49bd87fc
    lastState:
      terminated:
        containerID: containerd://83e2f87d625a6b7b06892566432422ebf5faa2da5dc096b5ede763286ae03f4f
        exitCode: 1
        finishedAt: "2026-04-23T16:48:44Z"
        message: |
          ler.go:248] "Starting workers" controller="machinedeployment" controllerGroup="cluster.x-k8s.io" controllerKind="MachineDeployment" worker count=10
          I0423 16:45:24.229830       1 controller.go:239] "Starting Controller" controller="machinepool" controllerGroup="cluster.x-k8s.io" controllerKind="MachinePool"
          I0423 16:45:24.229970       1 controller.go:248] "Starting workers" controller="machinepool" controllerGroup="cluster.x-k8s.io" controllerKind="MachinePool" worker count=10
          E0423 16:47:25.597287       1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capi-system/leases/controller-leader-election-capi?timeout=5s": context deadline exceeded, falling back to slow path
          E0423 16:48:39.675843       1 leaderelection.go:429] Failed to update lock optimistically: Put "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capi-system/leases/controller-leader-election-capi?timeout=5s": context deadline exceeded, falling back to slow path
          E0423 16:48:44.676711       1 leaderelection.go:436] error retrieving resource lock capi-system/controller-leader-election-capi: Get "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capi-system/leases/controller-leader-election-capi?timeout=5s": net/http: request canceled (Client.Timeout exceeded while awaiting headers)
          E0423 16:48:44.676941       1 leaderelection.go:429] Failed to update lock optimistically: client rate limiter Wait returned an error: rate: Wait(n=1) would exceed context deadline, falling back to slow path
          E0423 16:48:44.677054       1 leaderelection.go:436] error retrieving resource lock capi-system/controller-leader-election-capi: client rate limiter Wait returned an error: rate: Wait(n=1) would exceed context deadline
          I0423 16:48:44.677102       1 leaderelection.go:297] failed to renew lease capi-system/controller-leader-election-capi: context deadline exceeded
          E0423 16:48:44.677174       1 main.go:433] "Problem running manager" err="leader election lost" logger="setup"
        reason: Error
        startedAt: "2026-04-23T16:45:06Z"
    name: manager
    ready: true
    restartCount: 1
    started: true
    state:
      running:
        startedAt: "2026-04-23T16:49:42Z"
  hostIP: 199.204.45.210
  phase: Running
  podIP: 10.0.0.13
  podIPs:
  - ip: 10.0.0.13
  qosClass: BestEffort
  startTime: "2026-04-23T16:45:05Z"
