apiVersion: v1
kind: Pod
metadata:
  annotations:
    kubectl.kubernetes.io/default-container: manager
  creationTimestamp: "2026-04-16T10:00:40Z"
  generateName: orc-controller-manager-6cb597b5d4-
  labels:
    control-plane: controller-manager
    pod-template-hash: 6cb597b5d4
  name: orc-controller-manager-6cb597b5d4-979f5
  namespace: orc-system
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: orc-controller-manager-6cb597b5d4
    uid: 04fae537-4903-4823-b028-be4d2b0e1541
  resourceVersion: "23321"
  uid: 21b5d6f3-f82e-4dfa-a131-36f8901aff65
spec:
  containers:
  - args:
    - --metrics-bind-address=:8443
    - --leader-elect
    - --health-probe-bind-address=:8081
    command:
    - /manager
    image: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller:v2.2.0
    imagePullPolicy: IfNotPresent
    livenessProbe:
      failureThreshold: 3
      httpGet:
        path: /healthz
        port: 8081
        scheme: HTTP
      initialDelaySeconds: 15
      periodSeconds: 20
      successThreshold: 1
      timeoutSeconds: 1
    name: manager
    readinessProbe:
      failureThreshold: 3
      httpGet:
        path: /readyz
        port: 8081
        scheme: HTTP
      initialDelaySeconds: 5
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 1
    resources:
      limits:
        cpu: 500m
        memory: 256Mi
      requests:
        cpu: 10m
        memory: 64Mi
    securityContext:
      allowPrivilegeEscalation: false
      capabilities:
        drop:
        - ALL
      privileged: false
      runAsGroup: 65532
      runAsUser: 65532
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: FallbackToLogsOnError
    volumeMounts:
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-kchmm
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  nodeName: instance
  nodeSelector:
    openstack-control-plane: enabled
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext:
    runAsNonRoot: true
    seccompProfile:
      type: RuntimeDefault
  serviceAccount: orc-controller-manager
  serviceAccountName: orc-controller-manager
  terminationGracePeriodSeconds: 10
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: kube-api-access-kchmm
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-16T10:00:40Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-16T10:08:40Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-16T10:08:40Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-16T10:00:40Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://5cb922420ee2d92e7852f23b0b69fbddb8fcaae1e7da007244a0bdf8d051dc0f
    image: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller:v2.2.0
    imageID: harbor.atmosphere.dev/quay.io/orc/openstack-resource-controller@sha256:e3c51b1c3048c3f8c2856a6327810981fd4624602091021c4d310092c85e247c
    lastState:
      terminated:
        containerID: containerd://fa972d009ec59e8a199e45586a9dd007d6b0f85d0937a26529ea5b0a7978fed2
        exitCode: 1
        finishedAt: "2026-04-16T10:08:06Z"
        message: "2026-04-16T10:00:44Z\tINFO\tStarting workers\t{\"controller\": \"credentials_deletion_guard_for_flavor\",
          \"controllerGroup\": \"\", \"controllerKind\": \"Secret\", \"worker count\":
          1}\n2026-04-16T10:00:44Z\tINFO\tStarting Controller\t{\"controller\": \"port\",
          \"controllerGroup\": \"openstack.k-orc.cloud\", \"controllerKind\": \"Port\"}\n2026-04-16T10:00:44Z\tINFO\tStarting
          workers\t{\"controller\": \"port\", \"controllerGroup\": \"openstack.k-orc.cloud\",
          \"controllerKind\": \"Port\", \"worker count\": 1}\n2026-04-16T10:00:44Z\tINFO\tcontroller-runtime.metrics\tServing
          metrics server\t{\"bindAddress\": \":8443\", \"secure\": true}\nE0416 10:04:32.666457
          \      1 leaderelection.go:429] Failed to update lock optimistically: the
          server was unable to return a response in the time allotted, but may still
          be processing the request (put leases.coordination.k8s.io f35396c5.k-orc.cloud),
          falling back to slow path\nE0416 10:05:53.060616       1 leaderelection.go:429]
          Failed to update lock optimistically: Put \"https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/orc-system/leases/f35396c5.k-orc.cloud?timeout=5s\":
          net/http: request canceled (Client.Timeout exceeded while awaiting headers),
          falling back to slow path\nE0416 10:05:58.060805       1 leaderelection.go:436]
          error retrieving resource lock orc-system/f35396c5.k-orc.cloud: Get \"https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/orc-system/leases/f35396c5.k-orc.cloud?timeout=5s\":
          context deadline exceeded\nI0416 10:05:58.060926       1 leaderelection.go:297]
          failed to renew lease orc-system/f35396c5.k-orc.cloud: context deadline
          exceeded\nE0416 10:06:03.061748       1 leaderelection.go:322] Failed to
          release lock: Put \"https://10.96.0.1:443/apis/coordination.k8s.io/v1/namespaces/orc-system/leases/f35396c5.k-orc.cloud?timeout=5s\":
          net/http: request canceled (Client.Timeout exceeded while awaiting headers)\n2026-04-16T10:06:03Z\tERROR\tsetup\tError
          starting manager\t{\"error\": \"problem running manager: leader election
          lost\"}\nmain.main\n\t/workspace/cmd/manager/main.go:121\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:272\n"
        reason: Error
        startedAt: "2026-04-16T10:00:42Z"
    name: manager
    ready: true
    restartCount: 1
    started: true
    state:
      running:
        startedAt: "2026-04-16T10:08:33Z"
  hostIP: 199.204.45.77
  phase: Running
  podIP: 10.0.0.246
  podIPs:
  - ip: 10.0.0.246
  qosClass: Burstable
  startTime: "2026-04-16T10:00:40Z"
