apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2026-02-26T22:42:35Z"
  generateName: capo-controller-manager-6975759b4b-
  labels:
    cluster.x-k8s.io/provider: infrastructure-openstack
    control-plane: capo-controller-manager
    pod-template-hash: 6975759b4b
  name: capo-controller-manager-6975759b4b-gxg6d
  namespace: capo-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: capo-controller-manager-6975759b4b
    uid: 5e9ef9a8-13ae-4479-9bec-7552ba8a3112
  resourceVersion: "23480"
  uid: 4f819312-b615-4e18-bee4-939a990506e5
spec:
  containers:
  - args:
    - --leader-elect
    - --v=2
    - --diagnostics-address=127.0.0.1:8080
    - --insecure-diagnostics=true
    command:
    - /manager
    env:
    - name: CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT
      value: "10"
    image: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller:v0.12.4
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    ports:
    - containerPort: 9443
      name: webhook-server
      protocol: TCP
    - containerPort: 9440
      name: healthz
      protocol: TCP
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources: {}
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp/k8s-webhook-server/serving-certs
      name: cert
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-8wcsp
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: capo-manager
  serviceAccountName: capo-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: cert
    secret:
      defaultMode: 420
      secretName: capo-webhook-service-cert
  - name: kube-api-access-8wcsp
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:42:35Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:46:10Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:46:10Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-02-26T22:42:35Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://fd400f5a2e6d4edc74389108cfa7db29657fd56d94b8a3ef8faf683e8e801bf2
    image: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller:v0.12.4
    imageID: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller@sha256:237da708e483aa8c39766a217d25e45de52816d446764569c77550e6f56f0970
    lastState:
      terminated:
        containerID: containerd://5788de02e893401d192b16fcd77e952e47be87fc4ce1b7e578662652c8bfe131
        exitCode: 1
        finishedAt: "2026-02-26T22:46:07Z"
        message: |
          or.go:243
          I0226 22:42:39.186801       1 reflector.go:368] Caches populated for *v1beta1.OpenStackMachine from pkg/mod/k8s.io/client-go@v0.31.10/tools/cache/reflector.go:243
          I0226 22:42:39.280870       1 controller.go:217] "Starting workers" controller="openstackserver" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackServer" worker count=10
          I0226 22:42:39.285891       1 controller.go:217] "Starting workers" controller="openstackmachine" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackMachine" worker count=10
          I0226 22:42:39.286000       1 controller.go:217] "Starting workers" controller="openstackcluster" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackCluster" worker count=10
          I0226 22:42:39.286206       1 controller.go:217] "Starting workers" controller="openstackfloatingippool" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackFloatingIPPool" worker count=1
          E0226 22:45:16.514541       1 leaderelection.go:429] Failed to update lock optimitically: Put "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capo-system/leases/controller-leader-election-capo?timeout=5s": context deadline exceeded, falling back to slow path
          E0226 22:45:21.514260       1 leaderelection.go:436] error retrieving resource lock capo-system/controller-leader-election-capo: Get "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capo-system/leases/controller-leader-election-capo?timeout=5s": context deadline exceeded
          I0226 22:45:21.514304       1 leaderelection.go:297] failed to renew lease capo-system/controller-leader-election-capo: timed out waiting for the condition
          E0226 22:45:26.515424       1 leaderelection.go:322] Failed to release lock: the server was unable to return a response in the time allotted, but may still be processing the request (put leases.coordination.k8s.io controller-leader-election-capo)
          E0226 22:45:26.515534       1 main.go:281] "problem running manager" err="leader election lost" logger="setup"
        reason: Error
        startedAt: "2026-02-26T22:42:36Z"
    name: manager
    ready: true
    restartCount: 1
    started: true
    state:
      running:
        startedAt: "2026-02-26T22:46:08Z"
  hostIP: 162.253.55.195
  phase: Running
  podIP: 10.0.0.78
  podIPs:
  - ip: 10.0.0.78
  qosClass: BestEffort
  startTime: "2026-02-26T22:42:35Z"
