apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubectl.kubernetes.io/default-container: manager
  creationTimestamp: "2026-04-17T11:36:47Z"
  generateName: orc-controller-manager-6cb597b5d4-
  labels:
    control-plane: controller-manager
    pod-template-hash: 6cb597b5d4
  name: orc-controller-manager-6cb597b5d4-ph7z7
  namespace: orc-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: orc-controller-manager-6cb597b5d4
    uid: 3dbe244a-9f49-41f9-ba98-5410931423c9
  resourceVersion: "23105"
  uid: 738dcd35-e0b9-49af-a593-726903fff4bf
spec:
  containers:
  - args:
    - --metrics-bind-address=:8443
    - --leader-elect
    - --health-probe-bind-address=:8081
    command:
    - /manager
    image: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller:v2.2.0
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: 8081
        scheme: HTTP
      initialDelaySeconds: 15
      periodSeconds: 20
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: 8081
        scheme: HTTP
      initialDelaySeconds: 5
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources:
      limits:
        cpu: 500m
        memory: 256Mi
      requests:
        cpu: 10m
        memory: 64Mi
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-7lprd
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: orc-controller-manager
  serviceAccountName: orc-controller-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-7lprd
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-17T11:36:47Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-17T11:44:38Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-17T11:44:38Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-17T11:36:47Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://aaa835d469f3be962005d8c6d7df43718c2a1ae8e9542877752d86a9872450bf
    image: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller:v2.2.0
    imageID: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller@sha256:e3c51b1c3048c3f8c2856a6327810981fd4624602091021c4d310092c85e247c
    lastState:
      terminated:
        containerID: containerd://4dd7f6dd6cc74bafec842ce2622a1ed46b120ebcf9496cb50570b0c74f19c46c
        exitCode: 1
        finishedAt: "2026-04-17T11:44:28Z"
        message: "ing workers\t{\"controller\": \"servergroup\", \"controllerGroup\":
          \"openstack.k-orc.cloud\", \"controllerKind\": \"ServerGroup\", \"worker
          count\": 1}\n2026-04-17T11:42:58Z\tINFO\tStarting Controller\t{\"controller\":
          \"credentials_deletion_guard_for_flavor\", \"controllerGroup\": \"\", \"controllerKind\":
          \"Secret\"}\n2026-04-17T11:42:58Z\tINFO\tStarting workers\t{\"controller\":
          \"credentials_deletion_guard_for_flavor\", \"controllerGroup\": \"\", \"controllerKind\":
          \"Secret\", \"worker count\": 1}\nE0417 11:43:44.365548       1 leaderelection.go:429]
          Failed to update lock optimistically: Put \"https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/orc-system/leases/f35396c5.k-orc.cloud?timeout=5s\":
          context deadline exceeded, falling back to slow path\nE0417 11:43:49.366266
          \      1 leaderelection.go:436] error retrieving resource lock orc-system/f35396c5.k-orc.cloud:
          Get \"https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/orc-system/leases/f35396c5.k-orc.cloud?timeout=5s\":
          net/http: request canceled (Client.Timeout exceeded while awaiting headers)\nE0417
          11:43:49.366395       1 leaderelection.go:429] Failed to update lock optimistically:
          client rate limiter Wait returned an error: rate: Wait(n=1) would exceed
          context deadline, falling back to slow path\nE0417 11:43:49.366435       1
          leaderelection.go:436] error retrieving resource lock orc-system/f35396c5.k-orc.cloud:
          client rate limiter Wait returned an error: rate: Wait(n=1) would exceed
          context deadline\nI0417 11:43:49.366876       1 leaderelection.go:297] failed
          to renew lease orc-system/f35396c5.k-orc.cloud: context deadline exceeded\nE0417
          11:43:53.579355       1 leaderelection.go:322] Failed to release lock: Operation
          cannot be fulfilled on leases.coordination.k8s.io \"f35396c5.k-orc.cloud\":
          the object has been modified; please apply your changes to the latest version
          and try again\n2026-04-17T11:43:53Z\tERROR\tsetup\tError starting manager\t{\"error\":
          \"problem running manager: leader election lost\"}\nmain.main\n\t/workspace/cmd/manager/main.go:121\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:272\n"
        reason: Error
        startedAt: "2026-04-17T11:42:37Z"
    name: manager
    ready: true
    restartCount: 2
    started: true
    state:
      running:
        startedAt: "2026-04-17T11:44:32Z"
  hostIP: 199.204.45.233
  phase: Running
  podIP: 10.0.0.1
  podIPs:
  - ip: 10.0.0.1
  qosClass: Burstable
  startTime: "2026-04-17T11:36:47Z"
