apiVersion: v1
kind: Pod
metadata:
  creationTimestamp: "2026-04-08T06:36:11Z"
  generateName: rook-ceph-rgw-ceph-a-699b8bdb59-
  labels:
    app: rook-ceph-rgw
    app.kubernetes.io/component: cephobjectstores.ceph.rook.io
    app.kubernetes.io/created-by: rook-ceph-operator
    app.kubernetes.io/instance: ceph
    app.kubernetes.io/managed-by: rook-ceph-operator
    app.kubernetes.io/name: ceph-rgw
    app.kubernetes.io/part-of: ceph
    ceph_daemon_id: ceph
    ceph_daemon_type: rgw
    pod-template-hash: 699b8bdb59
    rgw: ceph
    rook.io/operator-namespace: rook-ceph
    rook_cluster: openstack
    rook_object_store: ceph
  name: rook-ceph-rgw-ceph-a-699b8bdb59-zn68p
  namespace: openstack
  ownerReferences:
  - apiVersion: apps/v1
    blockOwnerDeletion: true
    controller: true
    kind: ReplicaSet
    name: rook-ceph-rgw-ceph-a-699b8bdb59
    uid: 00da538f-f103-404a-b3e7-c732ceec9684
  resourceVersion: "9784"
  uid: 9f44d0e9-2b36-4d97-a70f-5166f989273b
spec:
  affinity:
    nodeAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          - key: openstack-control-plane
            operator: In
            values:
            - enabled
    podAntiAffinity:
      preferredDuringSchedulingIgnoredDuringExecution:
      - podAffinityTerm:
          labelSelector:
            matchLabels:
              app: rook-ceph-rgw
              ceph_daemon_id: ceph
              rgw: ceph
              rook_cluster: openstack
              rook_object_store: ceph
          topologyKey: kubernetes.io/hostname
        weight: 50
  containers:
  - args:
    - --fsid=4837cbf8-4f90-4300-b3f6-726c9b9f89b4
    - --keyring=/etc/ceph/keyring-store/keyring
    - --default-log-to-stderr=true
    - --default-err-to-stderr=true
    - --default-mon-cluster-log-to-stderr=true
    - '--default-log-stderr-prefix=debug '
    - --default-log-to-file=false
    - --default-mon-cluster-log-to-file=false
    - --mon-host=$(ROOK_CEPH_MON_HOST)
    - --mon-initial-members=$(ROOK_CEPH_MON_INITIAL_MEMBERS)
    - --id=rgw.ceph.a
    - --setuser=ceph
    - --setgroup=ceph
    - --foreground
    - --rgw-frontends=beast port=8080
    - --host=$(POD_NAME)
    - --rgw-mime-types-file=/etc/ceph/rgw/mime.types
    - --rgw-realm=ceph
    - --rgw-zonegroup=ceph
    - --rgw-zone=ceph
    command:
    - radosgw
    env:
    - name: CONTAINER_IMAGE
      value: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    - name: POD_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.name
    - name: POD_NAMESPACE
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: metadata.namespace
    - name: NODE_NAME
      valueFrom:
        fieldRef:
          apiVersion: v1
          fieldPath: spec.nodeName
    - name: POD_MEMORY_LIMIT
      valueFrom:
        resourceFieldRef:
          divisor: "0"
          resource: limits.memory
    - name: POD_MEMORY_REQUEST
      valueFrom:
        resourceFieldRef:
          divisor: "0"
          resource: requests.memory
    - name: POD_CPU_LIMIT
      valueFrom:
        resourceFieldRef:
          divisor: "1"
          resource: limits.cpu
    - name: POD_CPU_REQUEST
      valueFrom:
        resourceFieldRef:
          divisor: "0"
          resource: requests.cpu
    - name: CEPH_USE_RANDOM_NONCE
      value: "true"
    - name: ROOK_MSGR2
      value: msgr2_false_encryption_false_compression_false
    - name: ROOK_CEPH_MON_HOST
      valueFrom:
        secretKeyRef:
          key: mon_host
          name: rook-ceph-config
    - name: ROOK_CEPH_MON_INITIAL_MEMBERS
      valueFrom:
        secretKeyRef:
          key: mon_initial_members
          name: rook-ceph-config
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imagePullPolicy: IfNotPresent
    name: rgw
    readinessProbe:
      exec:
        command:
        - bash
        - -c
        - |
          #!/usr/bin/env bash

          PROBE_TYPE="readiness"
          PROBE_PORT="8080"
          PROBE_PROTOCOL="HTTP"

          # standard bash codes start at 126 and progress upward. pick error codes from 125 downward for
          # script as to allow curl to output new error codes and still return a distinctive number.
          USAGE_ERR_CODE=125
          PROBE_ERR_CODE=124
          # curl error codes: 1-123

          STARTUP_TYPE='startup'
          READINESS_TYPE='readiness'

          RGW_URL="$PROBE_PROTOCOL://0.0.0.0:$PROBE_PORT"

          function check() {
            local URL="$1"
            # --insecure - don't validate ssl if using secure port only
            # --silent - don't output progress info
            # --output /dev/stderr - output HTML header to stdout (good for debugging)
            # --write-out '%{response_code}' - print the HTTP response code to stdout
            curl --insecure --silent --output /dev/stderr --write-out '%{response_code}' "$URL"
          }

          http_response="$(check "$RGW_URL")"
          retcode=$?

          if [[ $retcode -ne 0 ]]; then
            # if this is the startup probe, always returning failure. if startup probe passes, all subsequent
            # probes can rely on the assumption that the health check was once succeeding without errors.
            # if this is the readiness probe, we know that curl was previously working correctly in the
            # startup probe, so curl error most likely means some new error with the RGW.
            echo "RGW health check failed with error code: $retcode. the RGW likely cannot be reached by clients" >/dev/stderr
            exit $retcode
          fi

          RGW_RATE_LIMITING_RESPONSE=503
          RGW_MISCONFIGURATION_RESPONSE=500

          if [[ $http_response -ge 200 ]] && [[ $http_response -lt 400 ]]; then
            # 200-399 are successful responses. same behavior as Kubernetes' HTTP probe
            exit 0

          elif [[ $http_response -eq $RGW_RATE_LIMITING_RESPONSE ]]; then
            # S3's '503: slow down' code is not an error but an indication that RGW is throttling client
            # traffic. failing the readiness check here would only cause an increase in client connections on
            # other RGWs and likely cause those to fail also in a cascade. i.e., a special healthy response.
            echo "INFO: RGW is rate limiting" 2>/dev/stderr
            exit 0

          elif [[ $http_response -eq $RGW_MISCONFIGURATION_RESPONSE ]]; then
            # can't specifically determine if the RGW is running or not. most likely a misconfiguration.
            case "$PROBE_TYPE" in
            "$STARTUP_TYPE")
              # fail until we can accurately get a valid healthy response when runtime starts.
              echo 'FAIL: HTTP code 500 suggests an RGW misconfiguration.' >/dev/stderr
              exit $PROBE_ERR_CODE
              ;;
            "$READINESS_TYPE")
              # config likely modified at runtime which could result in all RGWs failing this check.
              # occasional client failures are still better than total failure, so ignore this
              echo 'WARN: HTTP code 500 suggests an RGW misconfiguration' >/dev/stderr
              exit 0
              ;;
            *)
              # prior arg validation means this path should never be activated, but keep to be safe
              echo "ERROR: probe type is unknown: $PROBE_TYPE" >/dev/stderr
              exit $USAGE_ERR_CODE
              ;;
            esac

          else
            # anything else is a failing response. same behavior as Kubernetes' HTTP probe
            echo "FAIL: received an HTTP error code: $http_response"
            exit $PROBE_ERR_CODE

          fi
      failureThreshold: 3
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 3
      timeoutSeconds: 5
    resources: {}
    securityContext:
      privileged: false
    startupProbe:
      exec:
        command:
        - bash
        - -c
        - |
          #!/usr/bin/env bash

          PROBE_TYPE="startup"
          PROBE_PORT="8080"
          PROBE_PROTOCOL="HTTP"

          # standard bash codes start at 126 and progress upward. pick error codes from 125 downward for
          # script as to allow curl to output new error codes and still return a distinctive number.
          USAGE_ERR_CODE=125
          PROBE_ERR_CODE=124
          # curl error codes: 1-123

          STARTUP_TYPE='startup'
          READINESS_TYPE='readiness'

          RGW_URL="$PROBE_PROTOCOL://0.0.0.0:$PROBE_PORT"

          function check() {
            local URL="$1"
            # --insecure - don't validate ssl if using secure port only
            # --silent - don't output progress info
            # --output /dev/stderr - output HTML header to stdout (good for debugging)
            # --write-out '%{response_code}' - print the HTTP response code to stdout
            curl --insecure --silent --output /dev/stderr --write-out '%{response_code}' "$URL"
          }

          http_response="$(check "$RGW_URL")"
          retcode=$?

          if [[ $retcode -ne 0 ]]; then
            # if this is the startup probe, always returning failure. if startup probe passes, all subsequent
            # probes can rely on the assumption that the health check was once succeeding without errors.
            # if this is the readiness probe, we know that curl was previously working correctly in the
            # startup probe, so curl error most likely means some new error with the RGW.
            echo "RGW health check failed with error code: $retcode. the RGW likely cannot be reached by clients" >/dev/stderr
            exit $retcode
          fi

          RGW_RATE_LIMITING_RESPONSE=503
          RGW_MISCONFIGURATION_RESPONSE=500

          if [[ $http_response -ge 200 ]] && [[ $http_response -lt 400 ]]; then
            # 200-399 are successful responses. same behavior as Kubernetes' HTTP probe
            exit 0

          elif [[ $http_response -eq $RGW_RATE_LIMITING_RESPONSE ]]; then
            # S3's '503: slow down' code is not an error but an indication that RGW is throttling client
            # traffic. failing the readiness check here would only cause an increase in client connections on
            # other RGWs and likely cause those to fail also in a cascade. i.e., a special healthy response.
            echo "INFO: RGW is rate limiting" 2>/dev/stderr
            exit 0

          elif [[ $http_response -eq $RGW_MISCONFIGURATION_RESPONSE ]]; then
            # can't specifically determine if the RGW is running or not. most likely a misconfiguration.
            case "$PROBE_TYPE" in
            "$STARTUP_TYPE")
              # fail until we can accurately get a valid healthy response when runtime starts.
              echo 'FAIL: HTTP code 500 suggests an RGW misconfiguration.' >/dev/stderr
              exit $PROBE_ERR_CODE
              ;;
            "$READINESS_TYPE")
              # config likely modified at runtime which could result in all RGWs failing this check.
              # occasional client failures are still better than total failure, so ignore this
              echo 'WARN: HTTP code 500 suggests an RGW misconfiguration' >/dev/stderr
              exit 0
              ;;
            *)
              # prior arg validation means this path should never be activated, but keep to be safe
              echo "ERROR: probe type is unknown: $PROBE_TYPE" >/dev/stderr
              exit $USAGE_ERR_CODE
              ;;
            esac

          else
            # anything else is a failing response. same behavior as Kubernetes' HTTP probe
            echo "FAIL: received an HTTP error code: $http_response"
            exit $PROBE_ERR_CODE

          fi
      failureThreshold: 33
      initialDelaySeconds: 10
      periodSeconds: 10
      successThreshold: 1
      timeoutSeconds: 5
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /etc/ceph
      name: rook-config-override
      readOnly: true
    - mountPath: /run/ceph
      name: ceph-daemons-sock-dir
    - mountPath: /etc/ceph/keyring-store/
      name: rook-ceph-rgw-ceph-a-keyring
      readOnly: true
    - mountPath: /var/log/ceph
      name: rook-ceph-log
    - mountPath: /var/lib/ceph/crash
      name: rook-ceph-crash
    - mountPath: /var/lib/ceph/rgw/ceph-ceph
      name: ceph-daemon-data
    - mountPath: /etc/ceph/rgw
      name: rook-ceph-rgw-ceph-mime-types
      readOnly: true
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-7xsq2
      readOnly: true
    workingDir: /var/log/ceph
  - command:
    - /bin/bash
    - -x
    - -e
    - -m
    - -c
    - "\nCEPH_CLIENT_ID=ceph-client.rgw.ceph.a\nPERIODICITY=daily\nLOG_ROTATE_CEPH_FILE=/etc/logrotate.d/ceph\nLOG_MAX_SIZE=500M\nROTATE=7\n\n#
      edit the logrotate file to only rotate a specific daemon log\n# otherwise we
      will logrotate log files without reloading certain daemons\n# this might happen
      when multiple daemons run on the same machine\nsed -i \"s|*.log|$CEPH_CLIENT_ID.log|\"
      \"$LOG_ROTATE_CEPH_FILE\"\n\n# replace default daily with given user input\nsed
      --in-place \"s/daily/$PERIODICITY/g\" \"$LOG_ROTATE_CEPH_FILE\"\n\n# replace
      rotate count, default 7 for all ceph daemons other than rbd-mirror\nsed --in-place
      \"s/rotate 7/rotate $ROTATE/g\" \"$LOG_ROTATE_CEPH_FILE\"\n\nif [ \"$LOG_MAX_SIZE\"
      != \"0\" ]; then\n\t# adding maxsize $LOG_MAX_SIZE at the 4th line of the logrotate
      config file with 4 spaces to maintain indentation\n\tsed --in-place \"4i \\
      \\ \\ \\ maxsize $LOG_MAX_SIZE\" \"$LOG_ROTATE_CEPH_FILE\"\nfi\n\nwhile true;
      do\n\t# we don't force the logrorate but we let the logrotate binary handle
      the rotation based on user's input for periodicity and size\n\tlogrotate --verbose
      \"$LOG_ROTATE_CEPH_FILE\"\n\tsleep 15m\ndone\n"
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imagePullPolicy: IfNotPresent
    name: log-collector
    resources:
      limits:
        memory: 1Gi
      requests:
        cpu: 100m
        memory: 100Mi
    securityContext:
      privileged: false
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    tty: true
    volumeMounts:
    - mountPath: /etc/ceph
      name: rook-config-override
      readOnly: true
    - mountPath: /run/ceph
      name: ceph-daemons-sock-dir
    - mountPath: /var/log/ceph
      name: rook-ceph-log
    - mountPath: /var/lib/ceph/crash
      name: rook-ceph-crash
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-7xsq2
      readOnly: true
  dnsPolicy: ClusterFirst
  enableServiceLinks: true
  initContainers:
  - args:
    - --verbose
    - --recursive
    - ceph:ceph
    - /var/log/ceph
    - /var/lib/ceph/crash
    - /run/ceph
    - /var/lib/ceph/rgw/ceph-ceph
    command:
    - chown
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imagePullPolicy: IfNotPresent
    name: chown-container-data-dir
    resources: {}
    securityContext:
      privileged: false
    terminationMessagePath: /dev/termination-log
    terminationMessagePolicy: File
    volumeMounts:
    - mountPath: /etc/ceph
      name: rook-config-override
      readOnly: true
    - mountPath: /run/ceph
      name: ceph-daemons-sock-dir
    - mountPath: /etc/ceph/keyring-store/
      name: rook-ceph-rgw-ceph-a-keyring
      readOnly: true
    - mountPath: /var/log/ceph
      name: rook-ceph-log
    - mountPath: /var/lib/ceph/crash
      name: rook-ceph-crash
    - mountPath: /var/lib/ceph/rgw/ceph-ceph
      name: ceph-daemon-data
    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount
      name: kube-api-access-7xsq2
      readOnly: true
  nodeName: instance
  preemptionPolicy: PreemptLowerPriority
  priority: 0
  restartPolicy: Always
  schedulerName: default-scheduler
  securityContext: {}
  serviceAccount: rook-ceph-rgw
  serviceAccountName: rook-ceph-rgw
  shareProcessNamespace: true
  terminationGracePeriodSeconds: 30
  tolerations:
  - effect: NoExecute
    key: node.kubernetes.io/unreachable
    operator: Exists
    tolerationSeconds: 5
  - effect: NoExecute
    key: node.kubernetes.io/not-ready
    operator: Exists
    tolerationSeconds: 300
  volumes:
  - name: rook-config-override
    projected:
      defaultMode: 420
      sources:
      - configMap:
          items:
          - key: config
            mode: 292
            path: ceph.conf
          name: rook-config-override
  - name: rook-ceph-rgw-ceph-a-keyring
    secret:
      defaultMode: 420
      secretName: rook-ceph-rgw-ceph-a-keyring
  - hostPath:
      path: /var/lib/rook/exporter
      type: DirectoryOrCreate
    name: ceph-daemons-sock-dir
  - hostPath:
      path: /var/lib/rook/openstack/log
      type: ""
    name: rook-ceph-log
  - hostPath:
      path: /var/lib/rook/openstack/crash
      type: ""
    name: rook-ceph-crash
  - emptyDir: {}
    name: ceph-daemon-data
  - configMap:
      defaultMode: 420
      name: rook-ceph-rgw-ceph-mime-types
    name: rook-ceph-rgw-ceph-mime-types
  - name: kube-api-access-7xsq2
    projected:
      defaultMode: 420
      sources:
      - serviceAccountToken:
          expirationSeconds: 3607
          path: token
      - configMap:
          items:
          - key: ca.crt
            path: ca.crt
          name: kube-root-ca.crt
      - downwardAPI:
          items:
          - fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
            path: namespace
status:
  conditions:
  - lastProbeTime: null
    lastTransitionTime: "2026-04-08T06:36:13Z"
    status: "True"
    type: Initialized
  - lastProbeTime: null
    lastTransitionTime: "2026-04-08T06:36:52Z"
    status: "True"
    type: Ready
  - lastProbeTime: null
    lastTransitionTime: "2026-04-08T06:36:52Z"
    status: "True"
    type: ContainersReady
  - lastProbeTime: null
    lastTransitionTime: "2026-04-08T06:36:11Z"
    status: "True"
    type: PodScheduled
  containerStatuses:
  - containerID: containerd://7c7fc1125cb57e7ebc0c9531e21d47d46151d1cb82f0c80499a86ddd2633e997
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imageID: harbor.atmosphere.dev/quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
    lastState: {}
    name: log-collector
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-04-08T06:36:13Z"
  - containerID: containerd://739ae1b621444939003f586cf871ffe2881c478d684a0a35404c8b15124319d4
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imageID: harbor.atmosphere.dev/quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
    lastState: {}
    name: rgw
    ready: true
    restartCount: 0
    started: true
    state:
      running:
        startedAt: "2026-04-08T06:36:13Z"
  hostIP: 199.204.45.19
  initContainerStatuses:
  - containerID: containerd://f26dce6126c33cd2d034fdc49c7e2229e4f81d0e8925d2be6c8c4f713c783aee
    image: harbor.atmosphere.dev/quay.io/ceph/ceph:v18.2.7
    imageID: harbor.atmosphere.dev/quay.io/ceph/ceph@sha256:1b9158ce28975f95def6a0ad459fa19f1336506074267a4b47c1bd914a00fec0
    lastState: {}
    name: chown-container-data-dir
    ready: true
    restartCount: 0
    started: false
    state:
      terminated:
        containerID: containerd://f26dce6126c33cd2d034fdc49c7e2229e4f81d0e8925d2be6c8c4f713c783aee
        exitCode: 0
        finishedAt: "2026-04-08T06:36:12Z"
        reason: Completed
        startedAt: "2026-04-08T06:36:12Z"
  phase: Running
  podIP: 10.0.0.51
  podIPs:
  - ip: 10.0.0.51
  qosClass: Burstable
  startTime: "2026-04-08T06:36:11Z"
