apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2026-03-04T13:41:53Z"
  generateName: capo-controller-manager-6975759b4b-
  labels:
    cluster.x-k8s.io/provider: infrastructure-openstack
    control-plane: capo-controller-manager
    pod-template-hash: 6975759b4b
  name: capo-controller-manager-6975759b4b-8lkwp
  namespace: capo-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: capo-controller-manager-6975759b4b
    uid: ee958e77-dd6f-4859-b56f-67deb0c1a660
  resourceVersion: "24310"
  uid: 0ff17a0e-99a3-490f-bdbc-c5765bd6b270
spec:
  containers:
  - args:
    - --leader-elect
    - --v=2
    - --diagnostics-address=127.0.0.1:8080
    - --insecure-diagnostics=true
    command:
    - /manager
    env:
    - name: CLUSTER_API_OPENSTACK_INSTANCE_CREATE_TIMEOUT
      value: "10"
    image: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller:v0.12.4
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    ports:
    - containerPort: 9443
      name: webhook-server
      protocol: TCP
    - containerPort: 9440
      name: healthz
      protocol: TCP
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: healthz
        scheme: HTTP
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources: {}
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /tmp/k8s-webhook-server/serving-certs
      name: cert
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-tp8fs
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: capo-manager
  serviceAccountName: capo-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoSchedule
    key: node-role.kubernetes.io/master
  - effect: NoSchedule
    key: node-role.kubernetes.io/control-plane
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: cert
    secret:
      defaultMode: 420
      secretName: capo-webhook-service-cert
  - name: kube-api-access-tp8fs
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-03-04T13:41:53Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-03-04T13:45:46Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-03-04T13:45:46Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-03-04T13:41:53Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://697836cb9fb1408d2f90f31f1d802b83bd8fc2991496b2c953380ccfeea2dcf1
    image: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller:v0.12.4
    imageID: harbor.atmosphere.dev/registry.k8s.io/capi-openstack/capi-openstack-controller@sha256:237da708e483aa8c39766a217d25e45de52816d446764569c77550e6f56f0970
    lastState:
      terminated:
        containerID: containerd://ddb9109c9e817e448bb478bb329d239d165c86d1c7522f9bd159217d2bcf3a36
        exitCode: 1
        finishedAt: "2026-03-04T13:45:12Z"
        message: |2
           populated for *v1beta1.Cluster from pkg/mod/k8s.io/client-go@v0.31.10/tools/cache/reflector.go:243
          I0304 13:42:05.626851       1 reflector.go:368] Caches populated for *v1alpha1.OpenStackFloatingIPPool from pkg/mod/k8s.io/client-go@v0.31.10/tools/cache/reflector.go:243
          I0304 13:42:05.627564       1 reflector.go:368] Caches populated for *v1alpha1.Image from pkg/mod/k8s.io/client-go@v0.31.10/tools/cache/reflector.go:243
          I0304 13:42:05.721870       1 controller.go:217] "Starting workers" controller="openstackcluster" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackCluster" worker count=10
          I0304 13:42:05.722061       1 controller.go:217] "Starting workers" controller="openstackmachine" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackMachine" worker count=10
          I0304 13:42:05.726327       1 controller.go:217] "Starting workers" controller="openstackfloatingippool" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackFloatingIPPool" worker count=1
          I0304 13:42:05.726410       1 controller.go:217] "Starting workers" controller="openstackserver" controllerGroup="infrastructure.cluster.x-k8s.io" controllerKind="OpenStackServer" worker count=10
          E0304 13:45:03.324542       1 leaderelection.go:429] Failed to update lock optimitically: Put "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capo-system/leases/controller-leader-election-capo?timeout=5s": context deadline exceeded (Client.Timeout exceeded while awaiting headers), falling back to slow path
          E0304 13:45:08.322721       1 leaderelection.go:472] Failed to update lock: Put "https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/capo-system/leases/controller-leader-election-capo?timeout=5s": context deadline exceeded
          I0304 13:45:08.322796       1 leaderelection.go:297] failed to renew lease capo-system/controller-leader-election-capo: timed out waiting for the condition
          E0304 13:45:12.194213       1 main.go:281] "problem running manager" err="leader election lost" logger="setup"
        reason: Error
        startedAt: "2026-03-04T13:41:54Z"
    name: manager
    ready: true
    restartCount: 1
    started: true
    state:
      running:
        startedAt: "2026-03-04T13:45:46Z"
  hostIP: 162.253.55.110
  phase: Running
  podIP: 10.0.0.254
  podIPs:
  - ip: 10.0.0.254
  qosClass: BestEffort
  startTime: "2026-03-04T13:41:53Z"
