apiVersion: v1
data:
  ceph-admin-keyring.sh: "#!/bin/bash\n\n\n\nset -ex\nexport HOME=/tmp\n\ncat > /etc/ceph/ceph.client.admin.keyring
    << EOF\n[client.admin]\n    key = $(cat /tmp/client-keyring)\nEOF\n\nexit 0\n
    \ \n"
  ceph-keyring.sh: |
    #!/bin/bash



    set -ex
    export HOME=/tmp

    cp -fv /etc/ceph/ceph.conf.template /etc/ceph/ceph.conf

    KEYRING=/etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring
    if ! [ "x${CEPH_CINDER_USER}" == "xadmin" ]; then
      #
      # If user is not client.admin, check if it already exists. If not create
      # the user. If the cephx user does not exist make sure the caps are set
      # according to best practices
      #
      if USERINFO=$(ceph auth get client.${CEPH_CINDER_USER}); then
        echo "Cephx user client.${CEPH_CINDER_USER} already exist"
        echo "Update user client.${CEPH_CINDER_USER} caps"
        ceph auth caps client.${CEPH_CINDER_USER} \
           mon "profile rbd" \
           osd "profile rbd"
        ceph auth get client.${CEPH_CINDER_USER} -o ${KEYRING}
      else
        echo "Creating Cephx user client.${CEPH_CINDER_USER}"
        ceph auth get-or-create client.${CEPH_CINDER_USER} \
          mon "profile rbd" \
          osd "profile rbd" \
          -o ${KEYRING}
      fi
      rm -f /etc/ceph/ceph.client.admin.keyring
    fi
  init-dynamic-options.sh: |
    #!/bin/bash
    set -ex

    LIBVIRT_CONF_PATH=/tmp/pod-shared/libvirtd.conf
    LISTEN_IP_ADDRESS=127.0.0.1

    if [[ -z $LISTEN_IP_ADDRESS ]]; then
        echo "LISTEN_IP_ADDRESS is not set."
        exit 1
    fi

    tee > ${LIBVIRT_CONF_PATH} << EOF

    auth_tcp = "none"
    auth_unix_rw = "none"
    ca_file = "/etc/pki/CA/cacert.pem"
    cert_file = "/etc/pki/libvirt/servercert.pem"
    key_file = "/etc/pki/libvirt/private/serverkey.pem"
    listen_addr = "0.0.0.0"
    listen_tcp = 0
    listen_tls = 1
    log_level = 3
    log_outputs = "1:file:/var/log/libvirt/libvirtd.log"
    EOF
  libvirt-init-modules.sh: "#!/bin/bash\n\nset -ex\nexport HOME=/tmp\nKVM_QEMU_CONF_HOST=\"/etc/modprobe.d_host/qemu-system-x86.conf\"\n\nif
    [[ ! -f \"${KVM_QEMU_CONF_HOST}\" ]]; then\n  if grep vmx /proc/cpuinfo; then\n
    \   cat << EOF > ${KVM_QEMU_CONF_HOST}\noptions kvm_intel nested=1\noptions kvm_intel
    enable_apicv=1\noptions kvm_intel ept=1\nEOF\n    modprobe -r kvm_intel || true\n
    \   modprobe kvm_intel nested=1\n  elif grep svm /proc/cpuinfo; then\n    cat
    << EOF > ${KVM_QEMU_CONF_HOST}\noptions kvm_amd nested=1\nEOF\n    modprobe -r
    kvm_amd || true\n    modprobe kvm_amd nested=1\n  else\n    echo \"Nested virtualization
    is not supported\"\n  fi\nfi  \n"
  libvirt.sh: |
    #!/bin/bash



    set -ex

    wait_for_file() {
      local file=$1

      while [ ! -f $file ]; do
        sleep 1
      done
    }

    wait_for_file /etc/pki/CA/cacert.pem
    wait_for_file /etc/pki/qemu/ca-cert.pem

    wait_for_file /etc/pki/libvirt/servercert.pem
    wait_for_file /etc/pki/libvirt/clientcert.pem
    wait_for_file /etc/pki/qemu/server-cert.pem
    wait_for_file /etc/pki/qemu/client-cert.pem

    wait_for_file /etc/pki/libvirt/private/serverkey.pem
    wait_for_file /etc/pki/libvirt/private/clientkey.pem
    wait_for_file /etc/pki/qemu/server-key.pem
    wait_for_file /etc/pki/qemu/client-key.pem

    wait_for_file /etc/pki/libvirt-vnc/ca-cert.pem
    wait_for_file /etc/pki/libvirt-vnc/server-cert.pem
    wait_for_file /etc/pki/libvirt-vnc/server-key.pem

    if [ -n "$(cat /proc/*/comm 2>/dev/null | grep -w libvirtd)" ]; then
      set +x
      for proc in $(ls /proc/*/comm 2>/dev/null); do
        if [ "x$(cat $proc 2>/dev/null | grep -w libvirtd)" == "xlibvirtd" ]; then
          set -x
          libvirtpid=$(echo $proc | cut -f 3 -d '/')
          echo "WARNING: libvirtd daemon already running on host" 1>&2
          echo "$(cat "/proc/${libvirtpid}/status" 2>/dev/null | grep State)" 1>&2
          kill -9 "$libvirtpid" || true
          set +x
        fi
      done
      set -x
    fi

    rm -f /var/run/libvirtd.pid

    if [[ -c /dev/kvm ]]; then
        chmod 660 /dev/kvm
        chown root:kvm /dev/kvm
    fi

    #Setup Cgroups to use when breaking out of Kubernetes defined groups
    CGROUPS=""
    for CGROUP in blkio cpu devices freezer hugetlb memory net_cls perf_event rdma misc pids; do
      if [ -d /sys/fs/cgroup/${CGROUP} ] || grep -w $CGROUP /sys/fs/cgroup/cgroup.controllers; then
        CGROUPS+="${CGROUP},"
      fi
    done
    cgcreate -g ${CGROUPS%,}:/osh-libvirt

    # We assume that if hugepage count > 0, then hugepages should be exposed to libvirt/qemu
    hp_count="$(cat /proc/meminfo | grep HugePages_Total | tr -cd '[:digit:]')"
    if [ 0"$hp_count" -gt 0 ]; then

      echo "INFO: Detected hugepage count of '$hp_count'. Enabling hugepage settings for libvirt/qemu."

      # Enable KVM hugepages for QEMU
      if [ -n "$(grep KVM_HUGEPAGES=0 /etc/default/qemu-kvm)" ]; then
        sed -i 's/.*KVM_HUGEPAGES=0.*/KVM_HUGEPAGES=1/g' /etc/default/qemu-kvm
      else
        echo KVM_HUGEPAGES=1 >> /etc/default/qemu-kvm
      fi

      # Ensure that the hugepage mount location is available/mapped inside the
      # container. This assumes use of the default ubuntu dev-hugepages.mount
      # systemd unit which mounts hugepages at this location.
      if [ ! -d /dev/hugepages ]; then
        echo "ERROR: Hugepages configured in kernel, but libvirtd container cannot access /dev/hugepages"
        exit 1
      fi
    fi

    if [ -n "${LIBVIRT_CEPH_CINDER_SECRET_UUID}" ] || [ -n "${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}" ] ; then

      cgexec -g ${CGROUPS%,}:/osh-libvirt systemd-run --scope --slice=system libvirtd --listen &

      tmpsecret=$(mktemp --suffix .xml)
      if [ -n "${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}" ] ; then
        tmpsecret2=$(mktemp --suffix .xml)
      fi
      function cleanup {
        rm -f "${tmpsecret}"
        if [ -n "${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}" ] ; then
          rm -f "${tmpsecret2}"
        fi
      }
      trap cleanup EXIT

      # Wait for the libvirtd is up
      TIMEOUT=60
      while [[ ! -f /var/run/libvirtd.pid ]]; do
        if [[ ${TIMEOUT} -gt 0 ]]; then
          let TIMEOUT-=1
          sleep 1
        else
          echo "ERROR: libvirt did not start in time (pid file missing)"
          exit 1
        fi
      done

      # Even though we see the pid file the socket immediately (this is
      # needed for virsh)
      TIMEOUT=10
      while [[ ! -e /var/run/libvirt/libvirt-sock ]]; do
        if [[ ${TIMEOUT} -gt 0 ]]; then
          let TIMEOUT-=1
          sleep 1
        else
          echo "ERROR: libvirt did not start in time (socket missing)"
          exit 1
        fi
      done

      function create_virsh_libvirt_secret {
        sec_user=$1
        sec_uuid=$2
        sec_ceph_keyring=$3
        cat > ${tmpsecret} <<EOF
    <secret ephemeral='no' private='no'>
      <uuid>${sec_uuid}</uuid>
      <usage type='ceph'>
        <name>client.${sec_user}. secret</name>
      </usage>
    </secret>
    EOF
        virsh secret-define --file ${tmpsecret}
        virsh secret-set-value --secret "${sec_uuid}" --base64 "${sec_ceph_keyring}"
      }

      if [ -z "${CEPH_CINDER_KEYRING}" ] && [ -n "${CEPH_CINDER_USER}" ] ; then
        CEPH_CINDER_KEYRING=$(awk '/key/{print $3}' /etc/ceph/ceph.client.${CEPH_CINDER_USER}.keyring)
      fi
      if [ -n "${CEPH_CINDER_USER}" ] ; then
        create_virsh_libvirt_secret ${CEPH_CINDER_USER} ${LIBVIRT_CEPH_CINDER_SECRET_UUID} ${CEPH_CINDER_KEYRING}
      fi

      if [ -n "${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID}" ] ; then
        EXTERNAL_CEPH_CINDER_KEYRING=$(cat /tmp/external-ceph-client-keyring)
        create_virsh_libvirt_secret ${EXTERNAL_CEPH_CINDER_USER} ${LIBVIRT_EXTERNAL_CEPH_CINDER_SECRET_UUID} ${EXTERNAL_CEPH_CINDER_KEYRING}
      fi

      cleanup

      # stop libvirtd; we needed it up to create secrets
      LIBVIRTD_PID=$(cat /var/run/libvirtd.pid)
      kill $LIBVIRTD_PID
      tail --pid=$LIBVIRTD_PID -f /dev/null

    fi

    # NOTE(vsaienko): changing CGROUP is required as restart of the pod will cause domains restarts
    cgexec -g ${CGROUPS%,}:/osh-libvirt systemd-run --scope --slice=system libvirtd --listen
  wait-for-libvirt.sh: |
    #!/bin/bash



    set -xe

    # NOTE(mnaser): We use this script in the postStart hook of the libvirt
    #               container to ensure that the libvirt daemon is running
    #               before we start the exporter.
    until virsh list --all; do
        echo "Waiting for libvirt to be ready..."
        sleep 1
    done
kind: ConfigMap
metadata:
  annotations:
    meta.helm.sh/release-name: libvirt
    meta.helm.sh/release-namespace: openstack
  creationTimestamp: "2026-04-27T21:28:26Z"
  labels:
    app.kubernetes.io/managed-by: Helm
  name: libvirt-libvirt-default-bin
  namespace: openstack
  resourceVersion: "14867"
  uid: 48a065f7-1db4-4dc3-8ac1-771fbba03ec0
