apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2026-02-26T22:44:47Z"
  generateName: capi-kubeadm-bootstrap-controller-manager-6558cd8d7f-
  labels:
    cluster.x-k8s.io/provider: bootstrap-kubeadm
    control-plane: controller-manager
    pod-template-hash: 6558cd8d7f
  name: capi-kubeadm-bootstrap-controller-manager-6558cd8d7f-l2kgx
  namespace: capi-kubeadm-bootstrap-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: capi-kubeadm-bootstrap-controller-manager-6558cd8d7f
    uid: a918fffc-83e1-42e5-b86c-a9dd9a8b0574
  resourceVersion: "24834"
  uid: fa2efc60-d82a-45c4-94a9-6924dd064346
spec:
  containers:
  - args:
    - --leader-elect
    - --diagnostics-address=:8443
    - --insecure-diagnostics=false
    - --feature-gates=MachinePool=true,KubeadmBootstrapFormatIgnition=false,PriorityQueue=false
    - --bootstrap-token-ttl=15m
    command:
    - /manager
    env:
    - name: POD_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: POD_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.name
    - name: POD_UID
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.uid
    image: harbor.atmosphere.dev/registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.5
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    ports:
    - containerPort: 9443
      name: webhook-server
      protocol: TCP
    - containerPort: 9440
      name: healthz
      protocol: TCP
    - containerPort: 8443
      name: metrics
      protocol: TCP
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources: {}
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp/k8s-webhook-server/serving-certs
      name: cert
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-j9kqp
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: capi-kubeadm-bootstrap-manager
  serviceAccountName: capi-kubeadm-bootstrap-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: cert
    secret:
      defaultMode: 420
      secretName: capi-kubeadm-bootstrap-webhook-service-cert
  - name: kube-api-access-j9kqp
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:44:47Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:50:37Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:50:37Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:44:47Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://d87beba9bc8b13f1f9629e84c7e5058d28056221a8e2b5d2112ff10cbd97acb6
    image: harbor.atmosphere.dev/registry.k8s.io/cluster-api/kubeadm-bootstrap-controller:v1.10.5
    imageID: harbor.atmosphere.dev/registry.k8s.io/cluster-api/kubeadm-bootstrap-controller@sha256:e0f657538b3dc93a2eb0462561d0c895c78d9a515f048871bc90c927c0d4ce64
    lastState:
      terminated:
        containerID: containerd://f6353cea825bdfaeb35b514588be6810024e8fdf19ad7f70c6f660e5180578f5
        exitCode: 1
        finishedAt: "2026-02-26T22:50:05Z"
        message: |
          "
          I0226 22:50:05.393158       1 internal.go:538] "Stopping and waiting for non leader election runnables"
          I0226 22:50:05.393188       1 internal.go:542] "Stopping and waiting for leader election runnables"
          I0226 22:50:05.393204       1 internal.go:550] "Stopping and waiting for caches"
          I0226 22:50:05.393218       1 internal.go:554] "Stopping and waiting for webhooks"
          I0226 22:50:05.393227       1 internal.go:557] "Stopping and waiting for HTTP servers"
          I0226 22:50:05.393239       1 internal.go:561] "Wait completed, proceeding to shutdown the manager"
          I0226 22:50:05.393251       1 controller.go:268] "Shutdown signal received, waiting for all workers to finish" controller="kubeadmconfig" controllerGroup="bootstrap.cluster.x-k8s.io" controllerKind="KubeadmConfig"
          I0226 22:50:05.393239       1 controller.go:268] "Shutdown signal received, waiting for all workers to finish" controller="crdmigrator" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition"
          I0226 22:50:05.393327       1 server.go:249] "Shutting down webhook server with timeout of 1 minute" logger="controller-runtime.webhook"
          I0226 22:50:05.393330       1 controller.go:268] "Shutdown signal received, waiting for all workers to finish" controller="clustercache" controllerGroup="cluster.x-k8s.io" controllerKind="Cluster"
          I0226 22:50:05.393479       1 server.go:68] "shutting down server" name="health probe" addr="[::]:9440"
          I0226 22:50:05.393506       1 controller.go:270] "All workers finished" controller="kubeadmconfig" controllerGroup="bootstrap.cluster.x-k8s.io" controllerKind="KubeadmConfig"
          I0226 22:50:05.393560       1 controller.go:270] "All workers finished" controller="clustercache" controllerGroup="cluster.x-k8s.io" controllerKind="Cluster"
          I0226 22:50:05.393472       1 controller.go:270] "All workers finished" controller="crdmigrator" controllerGroup="apiextensions.k8s.io" controllerKind="CustomResourceDefinition"
          E0226 22:50:05.393249       1 main.go:306] "problem running manager" err="leader election lost" logger="setup"
        reason: Error
        startedAt: "2026-02-26T22:44:52Z"
    name: manager
    ready: true
    restartCount: 1
    started: true
    state:
      running:
        startedAt: "2026-02-26T22:50:36Z"
  hostIP: 199.204.45.41
  phase: Running
  podIP: 10.0.0.96
  podIPs:
  - ip: 10.0.0.96
  qosClass: BestEffort
  startTime: "2026-02-26T22:44:47Z"
