From 11f450b018fc6424610b39bbef11ceb19a624710 Mon Sep 17 00:00:00 2001 From: Jing Zhang Date: Sat, 15 Apr 2023 20:18:18 +0800 Subject: [PATCH] Refact spec and remove BuildRequirements: mold and xmlstarlet Signed-off-by: Jing Zhang --- ceph.spec | 1604 ++++++++++++++++++++++++----------------------------- 1 file changed, 726 insertions(+), 878 deletions(-) diff --git a/ceph.spec b/ceph.spec index 5d2b14d..f0052bf 100644 --- a/ceph.spec +++ b/ceph.spec @@ -1,9 +1,12 @@ -%define anolis_release 1 +%define anolis_release 2 %global _hardened_build 1 +%bcond_with cmake_verbose_logging +%bcond_with cephfs_java +%bcond_with jaeger %bcond_with make_check +%bcond_with system_arrow %bcond_with zbd -%bcond_with cmake_verbose_logging %bcond_without ceph_test_package %bcond_without tcmalloc %bcond_without rbd_ssd_cache @@ -18,24 +21,20 @@ %else %bcond_without system_pmdk %endif -%bcond_without selinux -%bcond_with cephfs_java %bcond_without amqp_endpoint %bcond_without kafka_endpoint %bcond_without lttng %bcond_without libradosstriper -%bcond_without ocf -%global luarocks_package_name luarocks %bcond_without lua_packages -%global _remote_tarball_prefix https://download.ceph.com/tarballs/ - -%bcond_with jaeger +%bcond_without ocf +%bcond_without selinux %bcond_without cephfs_shell -%bcond_with system_arrow %bcond_without system_utf8proc -%global weak_deps 1 +%bcond_without weak_deps + +%global luarocks_package_name luarocks +%global _remote_tarball_prefix https://download.ceph.com/tarballs/ %if %{with selinux} -# get selinux policy version %global _selinux_policy_version 0.0.0 %endif @@ -45,7 +44,6 @@ %{!?python3_version_nodots: %global python3_version_nodots 3} %{!?python3_version: %global python3_version 3} -# use multi-threaded xz compression: xz level 7 using ncpus threads %global _source_payload w7T%{_smp_build_ncpus}.xzdio %global _binary_payload w7T%{_smp_build_ncpus}.xzdio @@ -57,23 +55,14 @@ echo $jobs ) %if 0%{?_smp_ncpus_max} == 0 -# 3.0 GiB mem per job -# SUSE distros use limit_build in the place of smp_limit_mem_per_job, please -# be sure to update it (in the build section, below) as well when changing this -# number. %global _smp_ncpus_max %{smp_limit_mem_per_job 3000000} %endif -################################################################################# -# main package definition -################################################################################# Name: ceph Version: 17.2.5 Release: %{anolis_release}%{?dist} Epoch: 2 - Summary: User space components of the Ceph file system -#License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT License: (LGPLv2+ or LGPLv3) and CC-BY-SA-3.0 and GPLv2 and Boost and BSD and MIT URL: http://ceph.com/ Source0: https://download.ceph.com/tarballs/ceph-%{version}.tar.gz @@ -94,72 +83,29 @@ Patch0025: 0025-selinux-prepare-for-anon-inode-controls-enablement.patch %if %{without bundled_boost} Patch1001: ceph-17.2.0-pybind-boost-1.74.patch Patch1002: ceph-17.2.5-boost-1.81.patch -Patch1003: ceph-17.2.0-deprecated-boost.patch -%endif -################################################################################# -# dependencies that apply across all distro families -################################################################################# -Requires: ceph-osd = %{EVR} -Requires: ceph-mds = %{EVR} -Requires: ceph-mgr = %{EVR} -Requires: ceph-mon = %{EVR} +Patch1003: %{name}-17.2.0-deprecated-boost.patch +%endif +Requires: %{name}-osd = %{EVR} %{name}-mds = %{EVR} %{name}-mgr = %{EVR} %{name}-mon = %{EVR} Requires(post): binutils -%if 0%{with cephfs_java} -BuildRequires: java-devel -BuildRequires: sharutils +%if 0%{with %{name}fs_java} +BuildRequires: java-devel sharutils %endif %if 0%{with selinux} -BuildRequires: checkpolicy -BuildRequires: selinux-policy-devel -%endif -BuildRequires: gperf -BuildRequires: cmake > 3.5 -BuildRequires: fuse3-devel -BuildRequires: gcc-c++ -BuildRequires: libatomic -%ifarch x86_64 aarch64 -BuildRequires: mold +BuildRequires: checkpolicy selinux-policy-devel %endif +BuildRequires: gperf cmake > 3.5 fuse3-devel gcc-c++ libatomic %if 0%{with tcmalloc} -# libprofiler did not build on ppc64le until 2.7.90 -BuildRequires: gperftools-devel >= 2.7.90 -BuildRequires: libunwind-devel -BuildRequires: gperftools-devel >= 2.6.1 -%endif -BuildRequires: libaio-devel -BuildRequires: libblkid-devel >= 2.17 -BuildRequires: cryptsetup-devel -BuildRequires: libcurl-devel -BuildRequires: libcap-ng-devel -BuildRequires: fmt-devel >= 6.2.1 -BuildRequires: liburing-devel -BuildRequires: pkgconfig(libudev) -BuildRequires: libnl3-devel -BuildRequires: liboath-devel -BuildRequires: libtool -BuildRequires: libxml2-devel -BuildRequires: libzstd-devel -BuildRequires: ninja-build -BuildRequires: ncurses-devel -BuildRequires: libicu-devel -BuildRequires: patch -BuildRequires: perl -BuildRequires: pkgconfig -BuildRequires: procps -BuildRequires: python%{python3_pkgversion} -BuildRequires: python%{python3_pkgversion}-devel -BuildRequires: python%{python3_pkgversion}-setuptools -BuildRequires: python%{python3_pkgversion}-Cython -BuildRequires: snappy-devel -BuildRequires: sqlite-devel -BuildRequires: sudo -BuildRequires: pkgconfig(udev) -BuildRequires: valgrind-devel -BuildRequires: which -BuildRequires: xfsprogs-devel -BuildRequires: xmlstarlet -BuildRequires: nasm -BuildRequires: lua-devel +BuildRequires: gperftools-devel >= 2.7.90 libunwind-devel gperftools-devel >= 2.6.1 +%endif +BuildRequires: libaio-devel libblkid-devel >= 2.17 cryptsetup-devel libcurl-devel +BuildRequires: libcap-ng-devel fmt-devel >= 6.2.1 liburing-devel pkgconfig(libudev) +BuildRequires: libnl3-devel liboath-devel libtool libxml2-devel libzstd-devel +BuildRequires: ninja-build ncurses-devel libicu-devel patch perl procps +BuildRequires: python%{python3_pkgversion} python%{python3_pkgversion}-devel +BuildRequires: python%{python3_pkgversion}-setuptools python%{python3_pkgversion}-Cython +BuildRequires: snappy-devel sqlite-devel sudo +BuildRequires: pkgconfig(udev) pkgconfig +BuildRequires: valgrind-devel which xfsprogs-devel nasm lua-devel xmlstarlet %if 0%{with jaeger} BuildRequires: yaml-cpp-devel >= 0.6 %endif @@ -173,72 +119,48 @@ BuildRequires: librdkafka-devel BuildRequires: %{luarocks_package_name} %endif %if 0%{with make_check} -BuildRequires: hostname -BuildRequires: jq -BuildRequires: libuuid-devel +BuildRequires: hostname jq libuuid-devel socat BuildRequires: python%{python3_pkgversion}-bcrypt BuildRequires: python%{python3_pkgversion}-pecan BuildRequires: python%{python3_pkgversion}-requests BuildRequires: python%{python3_pkgversion}-dateutil BuildRequires: python%{python3_pkgversion}-coverage BuildRequires: python%{python3_pkgversion}-pyOpenSSL -BuildRequires: socat %endif %if 0%{with zbd} BuildRequires: libzbd-devel %endif -BuildRequires: thrift-devel >= 0.13.0 -BuildRequires: re2-devel +BuildRequires: thrift-devel >= 0.13.0 re2-devel %if 0%{with jaeger} -BuildRequires: bison -BuildRequires: flex -BuildRequires: json-devel -BuildRequires: libevent-devel +BuildRequires: bison flex json-devel libevent-devel %endif %if 0%{with system_pmdk} -BuildRequires: libpmem-devel -BuildRequires: libpmemobj-devel +BuildRequires: libpmem-devel libpmemobj-devel %endif %if 0%{with system_arrow} -BuildRequires: arrow-devel -BuildRequires: parquet-devel +BuildRequires: arrow-devel parquet-devel %else BuildRequires: xsimd-devel %endif %if 0%{with system_utf8proc} BuildRequires: utf8proc-devel %endif -################################################################################# -# distro-conditional dependencies -################################################################################# + Requires: systemd -BuildRequires: boost-devel -BuildRequires: boost-random -BuildRequires: nss-devel -BuildRequires: keyutils-libs-devel -BuildRequires: libibverbs-devel -BuildRequires: librdmacm-devel -BuildRequires: ninja-build -BuildRequires: openldap-devel -BuildRequires: openssl-devel -BuildRequires: CUnit-devel +BuildRequires: boost-devel boost-random nss-devel ninja-build +BuildRequires: keyutils-libs-devel libibverbs-devel librdmacm-devel +BuildRequires: openldap-devel openssl-devel CUnit-devel BuildRequires: python%{python3_pkgversion}-devel BuildRequires: python%{python3_pkgversion}-prettytable BuildRequires: python%{python3_pkgversion}-pyyaml BuildRequires: python%{python3_pkgversion}-sphinx -BuildRequires: lz4-devel >= 1.7 -BuildRequires: golang +BuildRequires: lz4-devel >= 1.7 golang %if 0%{with make_check} -BuildRequires: golang -BuildRequires: golang-github-prometheus -BuildRequires: libtool-ltdl-devel -BuildRequires: xmlsec1 -BuildRequires: xmlsec1-devel +BuildRequires: golang golang-github-prometheus libtool-ltdl-devel +BuildRequires: xmlsec1 xmlsec1-devel xmlsec1-openssl xmlsec1-openssl-devel %ifarch x86_64 BuildRequires: xmlsec1-nss %endif -BuildRequires: xmlsec1-openssl -BuildRequires: xmlsec1-openssl-devel BuildRequires: python%{python3_pkgversion}-cherrypy BuildRequires: python%{python3_pkgversion}-jwt BuildRequires: python%{python3_pkgversion}-routes @@ -246,13 +168,10 @@ BuildRequires: python%{python3_pkgversion}-scipy BuildRequires: python%{python3_pkgversion}-werkzeug BuildRequires: python%{python3_pkgversion}-pyOpenSSL %endif -# lttng and babeltrace for rbd-replay-prep %if %{with lttng} -BuildRequires: lttng-ust-devel -BuildRequires: libbabeltrace-devel +BuildRequires: lttng-ust-devel libbabeltrace-devel %endif BuildRequires: expat-devel -#hardened-cc1 BuildRequires: /usr/bin/pathfix.py %if %{with bundled_boost} Provides: bundled(boost) = 1.75.0 @@ -262,61 +181,59 @@ Provides: bundled(boost) = 1.75.0 Ceph is a massively scalable, open-source, distributed storage system that runs on commodity hardware and delivers object, block and file system storage. - -################################################################################# -# subpackages -################################################################################# %package base Summary: Ceph Base Package -Provides: ceph-test:/usr/bin/ceph-kvstore-tool -Requires: ceph-common = %{EVR} -Requires: librbd1 = %{EVR} -Requires: librados2 = %{EVR} -Requires: libcephfs2 = %{EVR} -Requires: librgw2 = %{EVR} +Provides: %{name}-test:/usr/bin/%{name}-kvstore-tool +Requires: %{name}-common = %{EVR} +Requires: librbd1 = %{EVR} librados2 = %{EVR} +Requires: lib%{name}fs2 = %{EVR} librgw2 = %{EVR} %if 0%{with selinux} -Requires: ceph-selinux = %{EVR} -%endif -Requires: findutils -Requires: grep -Requires: logrotate -Requires: parted -Requires: psmisc -Requires: util-linux -Requires: which -%if 0%{?weak_deps} -Recommends: chrony -Recommends: nvme-cli -Recommends: smartmontools +Requires: %{name}-selinux = %{EVR} +%endif +Requires: findutils grep logrotate parted psmisc util-linux which +%if %{with weak_deps} +Recommends: chrony nvme-cli smartmontools %endif %description base -Base is the package that includes all the files shared amongst ceph servers +Base is the package that includes all the files shared amongst %{name} servers -%package -n cephadm +%package -n %{name}adm Summary: Utility to bootstrap Ceph clusters BuildArch: noarch -Requires: lvm2 -Requires: python%{python3_pkgversion} -Requires: openssh-server -Requires: which -%if 0%{?weak_deps} +Requires: lvm2 python%{python3_pkgversion} openssh-server which +%if %{with weak_deps} Recommends: podman >= 2.0.2 %endif -%description -n cephadm + +%description -n %{name}adm Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed with systemd and podman. -%package -n ceph-common +%package -n %{name}fs-mirror +Summary: Ceph daemon for mirroring CephFS snapshots +Requires: %{name}-base = %{EVR} +Requires: librados2 = %{EVR} +Requires: lib%{name}fs2 = %{EVR} +%description -n %{name}fs-mirror +Daemon for mirroring CephFS snapshots between Ceph clusters. + +%package -n %{name}fs-top +Summary: top(1) like utility for Ceph Filesystem +BuildArch: noarch +Requires: python%{python3_pkgversion}-rados +%description -n %{name}fs-top +This package provides a top(1) like utility to display Ceph Filesystem metrics +in realtime. + +%package -n %{name}-common Summary: Ceph Common -Requires: librbd1 = %{EVR} -Requires: librados2 = %{EVR} -Requires: libcephfs2 = %{EVR} +Requires: librbd1 = %{EVR} librados2 = %{EVR} lib%{name}fs2 = %{EVR} Requires: python%{python3_pkgversion}-rados = %{EVR} Requires: python%{python3_pkgversion}-rbd = %{EVR} -Requires: python%{python3_pkgversion}-cephfs = %{EVR} +Requires: python%{python3_pkgversion}-%{name}fs = %{EVR} Requires: python%{python3_pkgversion}-rgw = %{EVR} -Requires: python%{python3_pkgversion}-ceph-argparse = %{EVR} -Requires: python%{python3_pkgversion}-ceph-common = %{EVR} +Requires: python%{python3_pkgversion}-%{name}-argparse = %{EVR} +Requires: python%{python3_pkgversion}-%{name}-common = %{EVR} %if 0%{with jaeger} Requires: libjaeger = %{EVR} %endif @@ -326,76 +243,110 @@ Requires: libradosstriper1 = %{EVR} %endif %{?systemd_requires} Requires: systemd-udev -%description -n ceph-common -Common utilities to mount and interact with a ceph storage cluster. + +%description -n %{name}-common +Common utilities to mount and interact with a %{name} storage cluster. Comprised of files that are common to Ceph clients and servers. +%package -n %{name}-exporter +Summary: Daemon for exposing perf counters as Prometheus metrics +Requires: %{name}-base = %{EVR} +%description -n %{name}-exporter +Daemon for exposing perf counters as Prometheus metrics + +%package fuse +Summary: Ceph fuse-based client +Requires: fuse +Requires: python%{python3_pkgversion} +%description fuse +FUSE based client for Ceph distributed network file system + +%package immutable-object-cache +Summary: Ceph daemon for immutable object cache +Requires: %{name}-base = %{EVR} +Requires: librados2 = %{EVR} +%description immutable-object-cache +Daemon for immutable object cache. + %package mds Summary: Ceph Metadata Server Daemon -Requires: ceph-base = %{EVR} +Requires: %{name}-base = %{EVR} %description mds -ceph-mds is the metadata server daemon for the Ceph distributed file system. -One or more instances of ceph-mds collectively manage the file system +%{name}-mds is the metadata server daemon for the Ceph distributed file system. +One or more instances of %{name}-mds collectively manage the file system namespace, coordinating access to the shared OSD cluster. -%package mon -Summary: Ceph Monitor Daemon -Provides: ceph-test:/usr/bin/ceph-monstore-tool -Requires: ceph-base = %{EVR} -%description mon -ceph-mon is the cluster monitor daemon for the Ceph distributed file -system. One or more instances of ceph-mon form a Paxos part-time -parliament cluster that provides extremely reliable and durable storage -of cluster membership, configuration, and state. - %package mgr Summary: Ceph Manager Daemon -Requires: ceph-base = %{EVR} -Requires: ceph-mgr-modules-core = %{EVR} -Requires: libcephsqlite = %{EVR} -%if 0%{?weak_deps} -Recommends: ceph-mgr-dashboard = %{EVR} -Recommends: ceph-mgr-diskprediction-local = %{EVR} -Recommends: ceph-mgr-k8sevents = %{EVR} -Recommends: ceph-mgr-cephadm = %{EVR} +Requires: %{name}-base = %{EVR} +Requires: %{name}-mgr-modules-core = %{EVR} +Requires: lib%{name}sqlite = %{EVR} +%if %{with weak_deps} +Recommends: %{name}-mgr-dashboard = %{EVR} +Recommends: %{name}-mgr-diskprediction-local = %{EVR} +Recommends: %{name}-mgr-k8sevents = %{EVR} +Recommends: %{name}-mgr-%{name}adm = %{EVR} Recommends: python%{python3_pkgversion}-influxdb %endif %description mgr -ceph-mgr enables python modules that provide services (such as the REST -module derived from Calamari) and expose CLI hooks. ceph-mgr gathers +%{name}-mgr enables python modules that provide services (such as the REST +module derived from Calamari) and expose CLI hooks. %{name}-mgr gathers the cluster maps, the daemon metadata, and performance counters, and exposes all these to the python modules. +%package mgr-%{name}adm +Summary: Ceph Manager module for %{name}adm-based orchestration +BuildArch: noarch +Requires: %{name}-mgr = %{EVR} +Requires: python%{python3_pkgversion}-asyncssh +Requires: python%{python3_pkgversion}-natsort +Requires: %{name}adm = %{EVR} +Requires: openssh-clients +Requires: python%{python3_pkgversion}-cherrypy +Requires: python%{python3_pkgversion}-jinja2 +%description mgr-%{name}adm +%{name}-mgr-%{name}adm is a %{name}-mgr module for orchestration functions using +the integrated %{name}adm deployment tool management operations. + %package mgr-dashboard Summary: Ceph Dashboard BuildArch: noarch -Requires: ceph-mgr = %{EVR} -Requires: ceph-grafana-dashboards = %{EVR} -Requires: ceph-prometheus-alerts = %{EVR} +Requires: %{name}-mgr = %{EVR} +Requires: %{name}-grafana-dashboards = %{EVR} +Requires: %{name}-prometheus-alerts = %{EVR} Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-jwt Requires: python%{python3_pkgversion}-routes Requires: python%{python3_pkgversion}-werkzeug -%if 0%{?weak_deps} +%if %{with weak_deps} Recommends: python%{python3_pkgversion}-saml %endif %description mgr-dashboard -ceph-mgr-dashboard is a manager module, providing a web-based application +%{name}-mgr-dashboard is a manager module, providing a web-based application to monitor and manage many aspects of a Ceph cluster and related components. -See the Dashboard documentation at http://docs.ceph.com/ for details and a +See the Dashboard documentation at http://docs.%{name}.com/ for details and a detailed feature overview. %package mgr-diskprediction-local Summary: Ceph Manager module for predicting disk failures BuildArch: noarch -Requires: ceph-mgr = %{EVR} +Requires: %{name}-mgr = %{EVR} Requires: python%{python3_pkgversion}-numpy Requires: python%{python3_pkgversion}-scikit-learn Requires: python3-scipy %description mgr-diskprediction-local -ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict +%{name}-mgr-diskprediction-local is a %{name}-mgr module that tries to predict disk failures using local algorithms and machine-learning databases. +%package mgr-k8sevents +BuildArch: noarch +Summary: Ceph Manager module to orchestrate %{name}-events to kubernetes' events API +Requires: %{name}-mgr = %{EVR} +Requires: python%{python3_pkgversion}-kubernetes +%description mgr-k8sevents +%{name}-mgr-k8sevents is a %{name}-mgr module that sends every %{name}-events +to kubernetes events API + %package mgr-modules-core Summary: Ceph Manager modules which are always enabled BuildArch: noarch @@ -407,66 +358,50 @@ Requires: python%{python3_pkgversion}-dateutil Requires: python%{python3_pkgversion}-cherrypy Requires: python%{python3_pkgversion}-pyyaml Requires: python%{python3_pkgversion}-werkzeug -%if 0%{?weak_deps} -Recommends: ceph-mgr-rook = %{EVR} +%if %{with weak_deps} +Recommends: %{name}-mgr-rook = %{EVR} %endif %description mgr-modules-core -ceph-mgr-modules-core provides a set of modules which are always -enabled by ceph-mgr. +%{name}-mgr-modules-core provides a set of modules which are always +enabled by %{name}-mgr. %package mgr-rook BuildArch: noarch Summary: Ceph Manager module for Rook-based orchestration -Requires: ceph-mgr = %{EVR} +Requires: %{name}-mgr = %{EVR} Requires: python%{python3_pkgversion}-kubernetes Requires: python%{python3_pkgversion}-jsonpatch %description mgr-rook -ceph-mgr-rook is a ceph-mgr module for orchestration functions using +%{name}-mgr-rook is a %{name}-mgr module for orchestration functions using a Rook backend. -%package mgr-k8sevents -BuildArch: noarch -Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API -Requires: ceph-mgr = %{EVR} -Requires: python%{python3_pkgversion}-kubernetes -%description mgr-k8sevents -ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events -to kubernetes' events API - -%package mgr-cephadm -Summary: Ceph Manager module for cephadm-based orchestration -BuildArch: noarch -Requires: ceph-mgr = %{EVR} -Requires: python%{python3_pkgversion}-asyncssh -Requires: python%{python3_pkgversion}-natsort -Requires: cephadm = %{EVR} -Requires: openssh-clients -Requires: python%{python3_pkgversion}-cherrypy -Requires: python%{python3_pkgversion}-jinja2 -%description mgr-cephadm -ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using -the integrated cephadm deployment tool management operations. - -%package fuse -Summary: Ceph fuse-based client -Requires: fuse -Requires: python%{python3_pkgversion} -%description fuse -FUSE based client for Ceph distributed network file system - -%package -n cephfs-mirror -Summary: Ceph daemon for mirroring CephFS snapshots -Requires: ceph-base = %{EVR} -Requires: librados2 = %{EVR} -Requires: libcephfs2 = %{EVR} -%description -n cephfs-mirror -Daemon for mirroring CephFS snapshots between Ceph clusters. +%package mon +Summary: Ceph Monitor Daemon +Provides: %{name}-test:/usr/bin/%{name}-monstore-tool +Requires: %{name}-base = %{EVR} +%description mon +%{name}-mon is the cluster monitor daemon for the Ceph distributed file +system. One or more instances of %{name}-mon form a Paxos part-time +parliament cluster that provides extremely reliable and durable storage +of cluster membership, configuration, and state. -%package -n ceph-exporter -Summary: Daemon for exposing perf counters as Prometheus metrics -Requires: ceph-base = %{EVR} -%description -n ceph-exporter -Daemon for exposing perf counters as Prometheus metrics +%package radosgw +Summary: Rados REST gateway +Requires: %{name}-base = %{EVR} +%if 0%{with selinux} +Requires: %{name}-selinux = %{EVR} +%endif +Requires: librados2 = %{EVR} +Requires: librgw2 = %{EVR} +Requires: mailcap +%if %{with weak_deps} +Recommends: gawk +%endif +%description radosgw +RADOS is a distributed object store used by the Ceph distributed +storage system. This package provides a REST gateway to the +object store that aims to implement a superset of Amazon's S3 +service as well as the OpenStack Object Storage ("Swift") API. %package -n rbd-fuse Summary: Ceph fuse-based client @@ -477,20 +412,13 @@ FUSE based client to map Ceph rbd images to files %package -n rbd-mirror Summary: Ceph daemon for mirroring RBD images -Requires: ceph-base = %{EVR} +Requires: %{name}-base = %{EVR} Requires: librados2 = %{EVR} Requires: librbd1 = %{EVR} %description -n rbd-mirror Daemon for mirroring RBD images between Ceph clusters, streaming changes asynchronously. -%package immutable-object-cache -Summary: Ceph daemon for immutable object cache -Requires: ceph-base = %{EVR} -Requires: librados2 = %{EVR} -%description immutable-object-cache -Daemon for immutable object cache. - %package -n rbd-nbd Summary: Ceph RBD client base on NBD Requires: librados2 = %{EVR} @@ -498,37 +426,11 @@ Requires: librbd1 = %{EVR} %description -n rbd-nbd NBD based client to map Ceph rbd images to local device -%package radosgw -Summary: Rados REST gateway -Requires: ceph-base = %{EVR} -%if 0%{with selinux} -Requires: ceph-selinux = %{EVR} -%endif -Requires: librados2 = %{EVR} -Requires: librgw2 = %{EVR} -Requires: mailcap -%if 0%{?weak_deps} -Recommends: gawk -%endif -%description radosgw -RADOS is a distributed object store used by the Ceph distributed -storage system. This package provides a REST gateway to the -object store that aims to implement a superset of Amazon's S3 -service as well as the OpenStack Object Storage ("Swift") API. - -%package -n cephfs-top -Summary: top(1) like utility for Ceph Filesystem -BuildArch: noarch -Requires: python%{python3_pkgversion}-rados -%description -n cephfs-top -This package provides a top(1) like utility to display Ceph Filesystem metrics -in realtime. - %if %{with ocf} %package resource-agents Summary: OCF-compliant resource agents for Ceph daemons BuildArch: noarch -Requires: ceph-base = %{EVR} +Requires: %{name}-base = %{EVR} Requires: resource-agents %description resource-agents Resource agents for monitoring and managing Ceph daemons @@ -538,22 +440,22 @@ managers such as Pacemaker. %package osd Summary: Ceph Object Storage Daemon -Provides: ceph-test:/usr/bin/ceph-osdomap-tool -Requires: ceph-base = %{EVR} +Provides: %{name}-test:/usr/bin/%{name}-osdomap-tool +Requires: %{name}-base = %{EVR} Requires: sudo Requires: libstoragemgmt -%if 0%{?weak_deps} -Recommends: ceph-volume = %{EVR} +%if %{with weak_deps} +Recommends: %{name}-volume = %{EVR} %endif %description osd -ceph-osd is the object storage daemon for the Ceph distributed file +%{name}-osd is the object storage daemon for the Ceph distributed file system. It is responsible for storing objects on a local file system and providing access to them over the network. %package volume Summary: Ceph OSD deployment and inspection tool BuildArch: noarch -Requires: ceph-osd = %{EVR} +Requires: %{name}-osd = %{EVR} Requires: cryptsetup Requires: e2fsprogs Requires: lvm2 @@ -561,7 +463,7 @@ Requires: parted Requires: util-linux Requires: xfsprogs Requires: python%{python3_pkgversion}-setuptools -Requires: python%{python3_pkgversion}-ceph-common = %{EVR} +Requires: python%{python3_pkgversion}-%{name}-common = %{EVR} %description volume This package contains a tool to deploy OSD with different devices like lvm or physical disks, and trying to follow a predictable, and robust @@ -569,7 +471,7 @@ way of preparing, activating, and starting the deployed OSD. %package -n librados2 Summary: RADOS distributed object store client library -Obsoletes: ceph-libs < %{EVR} +Obsoletes: %{name}-libs < %{EVR} %description -n librados2 RADOS is a reliable, autonomic distributed object storage cluster developed as part of the Ceph distributed storage system. This is a @@ -579,7 +481,7 @@ store using a simple file-like interface. %package -n librados-devel Summary: RADOS headers Requires: librados2 = %{EVR} -Obsoletes: ceph-devel < %{EVR} +Obsoletes: %{name}-devel < %{EVR} Provides: librados2-devel = %{EVR} Obsoletes: librados2-devel < %{EVR} %description -n librados-devel @@ -611,45 +513,23 @@ Obsoletes: librgw2-devel < %{EVR} This package contains libraries and headers needed to develop programs that use RADOS gateway client library. -%package -n python%{python3_pkgversion}-rgw -Summary: Python 3 libraries for the RADOS gateway -Requires: librgw2 = %{EVR} -Requires: python%{python3_pkgversion}-rados = %{EVR} -%{?python_provide:%python_provide python%{python3_pkgversion}-rgw} -Provides: python-rgw = %{EVR} -Obsoletes: python-rgw < %{EVR} -%description -n python%{python3_pkgversion}-rgw -This package contains Python 3 libraries for interacting with Ceph RADOS -gateway. - -%package -n python%{python3_pkgversion}-rados -Summary: Python 3 libraries for the RADOS object store -Requires: python%{python3_pkgversion} -Requires: librados2 = %{EVR} -%{?python_provide:%python_provide python%{python3_pkgversion}-rados} -Provides: python-rados = %{EVR} -Obsoletes: python-rados < %{EVR} -%description -n python%{python3_pkgversion}-rados -This package contains Python 3 libraries for interacting with Ceph RADOS -object store. - -%package -n libcephsqlite +%package -n lib%{name}sqlite Summary: SQLite3 VFS for Ceph Requires: librados2 = %{EVR} -%description -n libcephsqlite +%description -n lib%{name}sqlite A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS distributed object store. -%package -n libcephsqlite-devel +%package -n lib%{name}sqlite-devel Summary: SQLite3 VFS for Ceph headers Requires: sqlite-devel -Requires: libcephsqlite = %{EVR} +Requires: lib%{name}sqlite = %{EVR} Requires: librados-devel = %{EVR} Requires: libradospp-devel = %{EVR} -Obsoletes: ceph-devel < %{EVR} -Provides: libcephsqlite-devel = %{EVR} -Obsoletes: libcephsqlite-devel < %{EVR} -%description -n libcephsqlite-devel +Obsoletes: %{name}-devel < %{EVR} +Provides: lib%{name}sqlite-devel = %{EVR} +Obsoletes: lib%{name}sqlite-devel < %{EVR} +%description -n lib%{name}sqlite-devel A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS distributed object store. @@ -667,7 +547,7 @@ Summary: RADOS striping interface headers Requires: libradosstriper1 = %{EVR} Requires: librados-devel = %{EVR} Requires: libradospp-devel = %{EVR} -Obsoletes: ceph-devel < %{EVR} +Obsoletes: %{name}-devel < %{EVR} Provides: libradosstriper1-devel = %{EVR} Obsoletes: libradosstriper1-devel < %{EVR} %description -n libradosstriper-devel @@ -678,7 +558,7 @@ that use RADOS striping interface. %package -n librbd1 Summary: RADOS block device client library Requires: librados2 = %{EVR} -Obsoletes: ceph-libs < %{EVR} +Obsoletes: %{name}-libs < %{EVR} %description -n librbd1 RBD is a block device striped across multiple distributed objects in RADOS, a reliable, autonomic distributed object storage cluster @@ -690,128 +570,150 @@ Summary: RADOS block device headers Requires: librbd1 = %{EVR} Requires: librados-devel = %{EVR} Requires: libradospp-devel = %{EVR} -Obsoletes: ceph-devel < %{EVR} +Obsoletes: %{name}-devel < %{EVR} Provides: librbd1-devel = %{EVR} Obsoletes: librbd1-devel < %{EVR} %description -n librbd-devel This package contains libraries and headers needed to develop programs that use RADOS block device. -%package -n python%{python3_pkgversion}-rbd -Summary: Python 3 libraries for the RADOS block device -Requires: librbd1 = %{EVR} -Requires: python%{python3_pkgversion}-rados = %{EVR} -%{?python_provide:%python_provide python%{python3_pkgversion}-rbd} -Provides: python-rbd = %{EVR} -Obsoletes: python-rbd < %{EVR} -%description -n python%{python3_pkgversion}-rbd -This package contains Python 3 libraries for interacting with Ceph RADOS -block device. - -%package -n libcephfs2 +%package -n lib%{name}fs2 Summary: Ceph distributed file system client library -Obsoletes: libcephfs1 < %{EVR} -Obsoletes: ceph-libs < %{EVR} -Obsoletes: ceph-libcephfs < %{EVR} -%description -n libcephfs2 +Obsoletes: lib%{name}fs1 < %{EVR} +Obsoletes: %{name}-libs < %{EVR} +Obsoletes: %{name}-lib%{name}fs < %{EVR} +%description -n lib%{name}fs2 Ceph is a distributed network file system designed to provide excellent performance, reliability, and scalability. This is a shared library allowing applications to access a Ceph distributed file system via a POSIX-like interface. -%package -n libcephfs-devel +%package -n lib%{name}fs-devel Summary: Ceph distributed file system headers -Requires: libcephfs2 = %{EVR} +Requires: lib%{name}fs2 = %{EVR} Requires: librados-devel = %{EVR} -Obsoletes: ceph-devel < %{EVR} -Provides: libcephfs2-devel = %{EVR} -Obsoletes: libcephfs2-devel < %{EVR} -%description -n libcephfs-devel +Obsoletes: %{name}-devel < %{EVR} +Provides: lib%{name}fs2-devel = %{EVR} +Obsoletes: lib%{name}fs2-devel < %{EVR} +%description -n lib%{name}fs-devel This package contains libraries and headers needed to develop programs that use Ceph distributed file system. -%package -n python%{python3_pkgversion}-cephfs +%package -n python%{python3_pkgversion}-%{name}fs Summary: Python 3 libraries for Ceph distributed file system -Requires: libcephfs2 = %{EVR} +Requires: lib%{name}fs2 = %{EVR} Requires: python%{python3_pkgversion}-rados = %{EVR} -Requires: python%{python3_pkgversion}-ceph-argparse = %{EVR} -%{?python_provide:%python_provide python%{python3_pkgversion}-cephfs} -Provides: python-cephfs = %{EVR} -Obsoletes: python-cephfs < %{EVR} -%description -n python%{python3_pkgversion}-cephfs +Requires: python%{python3_pkgversion}-%{name}-argparse = %{EVR} +%{?python_provide:%python_provide python%{python3_pkgversion}-%{name}fs} +Provides: python-%{name}fs = %{EVR} +Obsoletes: python-%{name}fs < %{EVR} +%description -n python%{python3_pkgversion}-%{name}fs This package contains Python 3 libraries for interacting with Ceph distributed file system. -%package -n python%{python3_pkgversion}-ceph-argparse +%package -n python%{python3_pkgversion}-%{name}-argparse Summary: Python 3 utility libraries for Ceph CLI -%{?python_provide:%python_provide python%{python3_pkgversion}-ceph-argparse} -%description -n python%{python3_pkgversion}-ceph-argparse +%{?python_provide:%python_provide python%{python3_pkgversion}-%{name}-argparse} +%description -n python%{python3_pkgversion}-%{name}-argparse This package contains types and routines for Python 3 used by the Ceph CLI as well as the RESTful interface. These have to do with querying the daemons for command-description information, validating user command input against those descriptions, and submitting the command to the appropriate daemon. -%package -n python%{python3_pkgversion}-ceph-common +%package -n python%{python3_pkgversion}-%{name}-common Summary: Python 3 utility libraries for Ceph Requires: python%{python3_pkgversion}-pyyaml -%{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common} -%description -n python%{python3_pkgversion}-ceph-common +%{?python_provide:%python_provide python%{python3_pkgversion}-%{name}-common} +%description -n python%{python3_pkgversion}-%{name}-common This package contains data structures, classes and functions used by Ceph. -It also contains utilities used for the cephadm orchestrator. +It also contains utilities used for the %{name}adm orchestrator. -%if 0%{with cephfs_shell} -%package -n cephfs-shell +%package -n python%{python3_pkgversion}-rados +Summary: Python 3 libraries for the RADOS object store +Requires: python%{python3_pkgversion} +Requires: librados2 = %{EVR} +%{?python_provide:%python_provide python%{python3_pkgversion}-rados} +Provides: python-rados = %{EVR} +Obsoletes: python-rados < %{EVR} +%description -n python%{python3_pkgversion}-rados +This package contains Python 3 libraries for interacting with Ceph RADOS +object store. + +%package -n python%{python3_pkgversion}-rbd +Summary: Python 3 libraries for the RADOS block device +Requires: librbd1 = %{EVR} +Requires: python%{python3_pkgversion}-rados = %{EVR} +%{?python_provide:%python_provide python%{python3_pkgversion}-rbd} +Provides: python-rbd = %{EVR} +Obsoletes: python-rbd < %{EVR} +%description -n python%{python3_pkgversion}-rbd +This package contains Python 3 libraries for interacting with Ceph RADOS +block device. + +%package -n python%{python3_pkgversion}-rgw +Summary: Python 3 libraries for the RADOS gateway +Requires: librgw2 = %{EVR} +Requires: python%{python3_pkgversion}-rados = %{EVR} +%{?python_provide:%python_provide python%{python3_pkgversion}-rgw} +Provides: python-rgw = %{EVR} +Obsoletes: python-rgw < %{EVR} +%description -n python%{python3_pkgversion}-rgw +This package contains Python 3 libraries for interacting with Ceph RADOS +gateway. + +%if 0%{with %{name}fs_shell} +%package -n %{name}fs-shell Summary: Interactive shell for Ceph file system Requires: python%{python3_pkgversion}-cmd2 Requires: python%{python3_pkgversion}-colorama -Requires: python%{python3_pkgversion}-cephfs -%description -n cephfs-shell +Requires: python%{python3_pkgversion}-%{name}fs +%description -n %{name}fs-shell This package contains an interactive tool that allows accessing a Ceph file system without mounting it by providing a nice pseudo-shell which works like an FTP client. %endif -%if 0%{with ceph_test_package} -%package -n ceph-test +%if 0%{with %{name}_test_package} +%package -n %{name}-test Summary: Ceph benchmarks and test tools -Requires: ceph-common = %{EVR} +Requires: %{name}-common = %{EVR} Requires: xmlstarlet Requires: jq Requires: socat BuildRequires: gtest-devel BuildRequires: gmock-devel -%description -n ceph-test +%description -n %{name}-test This package contains Ceph benchmarks and test tools. %endif -%if 0%{with cephfs_java} +%if 0%{with %{name}fs_java} -%package -n libcephfs_jni1 +%package -n lib%{name}fs_jni1 Summary: Java Native Interface library for CephFS Java bindings Requires: java -Requires: libcephfs2 = %{EVR} -%description -n libcephfs_jni1 +Requires: lib%{name}fs2 = %{EVR} +%description -n lib%{name}fs_jni1 This package contains the Java Native Interface library for CephFS Java bindings. -%package -n libcephfs_jni-devel +%package -n lib%{name}fs_jni-devel Summary: Development files for CephFS Java Native Interface library Requires: java -Requires: libcephfs_jni1 = %{EVR} -Obsoletes: ceph-devel < %{EVR} -Provides: libcephfs_jni1-devel = %{EVR} -Obsoletes: libcephfs_jni1-devel < %{EVR} -%description -n libcephfs_jni-devel +Requires: lib%{name}fs_jni1 = %{EVR} +Obsoletes: %{name}-devel < %{EVR} +Provides: lib%{name}fs_jni1-devel = %{EVR} +Obsoletes: lib%{name}fs_jni1-devel < %{EVR} +%description -n lib%{name}fs_jni-devel This package contains the development files for CephFS Java Native Interface library. -%package -n cephfs-java +%package -n %{name}fs-java Summary: Java libraries for the Ceph File System Requires: java -Requires: libcephfs_jni1 = %{EVR} +Requires: lib%{name}fs_jni1 = %{EVR} Requires: junit BuildRequires: junit -%description -n cephfs-java +%description -n %{name}fs-java This package contains the Java libraries for the Ceph File System. %endif @@ -827,9 +729,9 @@ class plugins. %package selinux Summary: SELinux support for Ceph MON, OSD and MDS -Requires: ceph-base = %{EVR} +Requires: %{name}-base = %{EVR} Requires: policycoreutils, libselinux-utils -Requires(post): ceph-base = %{EVR} +Requires(post): %{name}-base = %{EVR} Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk Requires(postun): policycoreutils %description selinux @@ -856,16 +758,12 @@ Group: System/Monitoring %description prometheus-alerts This package provides Ceph default alerts for Prometheus. -################################################################################# -# common -################################################################################# %prep %autosetup -p1 %build -%if 0%{with cephfs_java} -# Find jni.h +%if 0%{with %{name}fs_java} for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do [ -d $i ] && java_inc="$java_inc -I$i" done @@ -880,7 +778,6 @@ env | sort %{?!_vpath_builddir:%global _vpath_builddir %{_target_platform}} -# TODO: drop this step once we can use `cmake -B` %{cmake} \ -GNinja \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ @@ -890,10 +787,10 @@ env | sort -DWITH_MANPAGE:BOOL=ON \ -DWITH_PYTHON3:STRING=%{python3_version} \ -DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \ -%if 0%{without ceph_test_package} +%if 0%{without %{name}_test_package} -DWITH_TESTS:BOOL=OFF \ %endif -%if 0%{with cephfs_java} +%if 0%{with %{name}fs_java} -DWITH_CEPHFS_JAVA:BOOL=ON \ %endif %if 0%{with selinux} @@ -916,7 +813,7 @@ env | sort %else -DWITH_SYSTEM_BOOST:BOOL=ON \ %endif -%if 0%{with cephfs_shell} +%if 0%{with %{name}fs_shell} -DWITH_CEPHFS_SHELL:BOOL=ON \ %endif %if 0%{with libradosstriper} @@ -956,7 +853,7 @@ env | sort -DWITH_JAEGER:BOOL=ON \ %endif -DBOOST_J:STRING=%{_smp_build_ncpus} \ -%if 0%{with ceph_test_package} +%if 0%{with %{name}_test_package} -DWITH_SYSTEM_GTEST:BOOL=ON \ %endif -DWITH_SYSTEM_ZSTD:BOOL=ON \ @@ -966,9 +863,6 @@ env | sort %endif %if 0%{with system_utf8proc} -DWITH_SYSTEM_UTF8PROC:BOOL=ON \ -%endif -%ifarch x86_64 aarch64 - -DCMAKE_LINKER=%{_bindir}/ld.mold \ %endif -DWITH_GRAFANA:BOOL=ON @@ -984,159 +878,178 @@ export GCC_COLORS= %if 0%{with make_check} %check -# run in-tree unittests -# cd %{_vpath_builddir} -# ctest "$CEPH_MFLAGS_JOBS" %endif %install %cmake_install -# we have dropped sysvinit bits -rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph +rm -f %{buildroot}/%{_sysconfdir}/init.d/%{name} -install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap -install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph -install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf -install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset +install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/%{name}/rbdmap +install -m 0644 -D etc/sysconfig/%{name} %{buildroot}%{_sysconfdir}/sysconfig/%{name} +install -m 0644 -D systemd/%{name}.tmpfiles.d %{buildroot}%{_tmpfilesdir}/%{name}-common.conf +install -m 0644 -D systemd/50-%{name}.preset %{buildroot}%{_presetdir}/50-%{name}.preset mkdir -p %{buildroot}%{_sbindir} -install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph -chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf -install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING -install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf +install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/%{name} +chmod 0644 %{buildroot}%{_docdir}/%{name}/sample.%{name}.conf +install -m 0644 -D COPYING %{buildroot}%{_docdir}/%{name}/COPYING +install -m 0644 -D etc/sysctl/90-%{name}-osd.conf %{buildroot}%{_sysctldir}/90-%{name}-osd.conf install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce -install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm -mkdir -p %{buildroot}%{_sharedstatedir}/cephadm -chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm -mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh -chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh -touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys -chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys +install -m 0755 src/%{name}adm/%{name}adm %{buildroot}%{_sbindir}/%{name}adm +mkdir -p %{buildroot}%{_sharedstatedir}/%{name}adm +chmod 0700 %{buildroot}%{_sharedstatedir}/%{name}adm +mkdir -p %{buildroot}%{_sharedstatedir}/%{name}adm/.ssh +chmod 0700 %{buildroot}%{_sharedstatedir}/%{name}adm/.ssh +touch %{buildroot}%{_sharedstatedir}/%{name}adm/.ssh/authorized_keys +chmod 0600 %{buildroot}%{_sharedstatedir}/%{name}adm/.ssh/authorized_keys -# udev rules install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules -# sudoers.d -install -m 0440 -D sudoers.d/ceph-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-smartctl +install -m 0440 -D sudoers.d/%{name}-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/%{name}-smartctl pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/* pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/* -#set up placeholder directories -mkdir -p %{buildroot}%{_sysconfdir}/ceph -mkdir -p %{buildroot}%{_localstatedir}/run/ceph -mkdir -p %{buildroot}%{_localstatedir}/log/ceph -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd -mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror - -# prometheus alerts -install -m 644 -D monitoring/ceph-mixin/prometheus_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml +mkdir -p %{buildroot}%{_sysconfdir}/%{name} +mkdir -p %{buildroot}%{_localstatedir}/run/%{name} +mkdir -p %{buildroot}%{_localstatedir}/log/%{name} +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/tmp +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/mon +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/osd +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/mds +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/mgr +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/crash +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/crash/posted +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/radosgw +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-osd +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-mds +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-rgw +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-mgr +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-rbd +mkdir -p %{buildroot}%{_localstatedir}/lib/%{name}/bootstrap-rbd-mirror + +install -m 644 -D monitoring/%{name}-mixin/prometheus_alerts.yml %{buildroot}/etc/prometheus/%{name}/%{name}_default_alerts.yml %py_byte_compile %{__python3} %{buildroot}%{python3_sitelib} -################################################################################# -# files and systemd scriptlets -################################################################################# %files %files base -%{_bindir}/ceph-crash +%{_bindir}/%{name}-crash %{_bindir}/crushtool %{_bindir}/monmaptool %{_bindir}/osdmaptool -%{_bindir}/ceph-kvstore-tool -%{_bindir}/ceph-run -%{_presetdir}/50-ceph.preset -%{_sbindir}/ceph-create-keys -%dir %{_libexecdir}/ceph -%{_libexecdir}/ceph/ceph_common.sh +%{_bindir}/%{name}-kvstore-tool +%{_bindir}/%{name}-run +%{_presetdir}/50-%{name}.preset +%{_sbindir}/%{name}-create-keys +%dir %{_libexecdir}/%{name} +%{_libexecdir}/%{name}/%{name}_common.sh %dir %{_libdir}/rados-classes %{_libdir}/rados-classes/* -%dir %{_libdir}/ceph -%dir %{_libdir}/ceph/erasure-code -%{_libdir}/ceph/erasure-code/libec_*.so* -%dir %{_libdir}/ceph/compressor -%{_libdir}/ceph/compressor/libceph_*.so* -%{_unitdir}/ceph-crash.service -%dir %{_libdir}/ceph/crypto -%{_libdir}/ceph/crypto/libceph_*.so* +%dir %{_libdir}/%{name} +%dir %{_libdir}/%{name}/erasure-code +%{_libdir}/%{name}/erasure-code/libec_*.so* +%dir %{_libdir}/%{name}/compressor +%{_libdir}/%{name}/compressor/lib%{name}_*.so* +%{_unitdir}/%{name}-crash.service +%dir %{_libdir}/%{name}/crypto +%{_libdir}/%{name}/crypto/lib%{name}_*.so* %if %{with lttng} %{_libdir}/libos_tp.so* %{_libdir}/libosd_tp.so* %endif -%config(noreplace) %{_sysconfdir}/logrotate.d/ceph -%config(noreplace) %{_sysconfdir}/sysconfig/ceph -%{_unitdir}/ceph.target -%{_mandir}/man8/ceph-create-keys.8* -%{_mandir}/man8/ceph-run.8* +%config(noreplace) %{_sysconfdir}/logrotate.d/%{name} +%config(noreplace) %{_sysconfdir}/sysconfig/%{name} +%{_unitdir}/%{name}.target +%{_mandir}/man8/%{name}-create-keys.8* +%{_mandir}/man8/%{name}-run.8* %{_mandir}/man8/crushtool.8* %{_mandir}/man8/osdmaptool.8* %{_mandir}/man8/monmaptool.8* -%{_mandir}/man8/ceph-kvstore-tool.8* +%{_mandir}/man8/%{name}-kvstore-tool.8* #set up placeholder directories -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror -%{_sysconfdir}/sudoers.d/ceph-smartctl +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/crash +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/crash/posted +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/tmp +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-osd +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-mds +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-rgw +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-mgr +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-rbd +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/bootstrap-rbd-mirror +%{_sysconfdir}/sudoers.d/%{name}-smartctl %post base -%systemd_post ceph.target ceph-crash.service +%systemd_post %{name}.target %{name}-crash.service if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}.target %{name}-crash.service >/dev/null 2>&1 || : fi %preun base -%systemd_preun ceph.target ceph-crash.service +%systemd_preun %{name}.target %{name}-crash.service %postun base -%systemd_postun ceph.target +%systemd_postun %{name}.target -%pre -n cephadm -getent group cephadm >/dev/null || groupadd -r cephadm -getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm +%pre -n %{name}adm +getent group %{name}adm >/dev/null || groupadd -r %{name}adm +getent passwd %{name}adm >/dev/null || useradd -r -g %{name}adm -s /bin/bash -c "%{name}adm user for mgr/%{name}adm" -d %{_sharedstatedir}/%{name}adm %{name}adm exit 0 -%postun -n cephadm -[ $1 -ne 0 ] || userdel cephadm || : +%postun -n %{name}adm +[ $1 -ne 0 ] || userdel %{name}adm || : + +%files -n %{name}adm +%{_sbindir}/%{name}adm +%{_mandir}/man8/%{name}adm.8* +%attr(0700,%{name}adm,%{name}adm) %dir %{_sharedstatedir}/%{name}adm +%attr(0700,%{name}adm,%{name}adm) %dir %{_sharedstatedir}/%{name}adm/.ssh +%config(noreplace) %attr(0600,%{name}adm,%{name}adm) %{_sharedstatedir}/%{name}adm/.ssh/authorized_keys + +%files -n %{name}fs-mirror +%{_bindir}/%{name}fs-mirror +%{_mandir}/man8/%{name}fs-mirror.8* +%{_unitdir}/%{name}fs-mirror@.service +%{_unitdir}/%{name}fs-mirror.target + +%post -n %{name}fs-mirror +%systemd_post %{name}fs-mirror@\*.service %{name}fs-mirror.target +if [ $1 -eq 1 ] ; then +/usr/bin/systemctl start %{name}fs-mirror.target >/dev/null 2>&1 || : +fi + +%preun -n %{name}fs-mirror +%systemd_preun %{name}fs-mirror@\*.service %{name}fs-mirror.target + +%postun -n %{name}fs-mirror +%systemd_postun %{name}fs-mirror@\*.service %{name}fs-mirror.target +if [ $1 -ge 1 ] ; then + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} + if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then + source $SYSCONF_CEPH + fi + if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then + /usr/bin/systemctl try-restart %{name}fs-mirror@\*.service > /dev/null 2>&1 || : + fi +fi -%files -n cephadm -%{_sbindir}/cephadm -%{_mandir}/man8/cephadm.8* -%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm -%attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh -%config(noreplace) %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys +%files -n %{name}-exporter +%{_bindir}/%{name}-exporter %files common -%dir %{_docdir}/ceph -%doc %{_docdir}/ceph/sample.ceph.conf -%license %{_docdir}/ceph/COPYING -%{_bindir}/ceph -%{_bindir}/ceph-authtool -%{_bindir}/ceph-conf -%{_bindir}/ceph-dencoder -%{_bindir}/ceph-rbdnamer -%{_bindir}/ceph-syn -%{_bindir}/cephfs-data-scan -%{_bindir}/cephfs-journal-tool -%{_bindir}/cephfs-table-tool +%dir %{_docdir}/%{name} +%doc %{_docdir}/%{name}/sample.%{name}.conf +%license %{_docdir}/%{name}/COPYING +%{_bindir}/%{name} +%{_bindir}/%{name}-authtool +%{_bindir}/%{name}-conf +%{_bindir}/%{name}-dencoder +%{_bindir}/%{name}-rbdnamer +%{_bindir}/%{name}-syn +%{_bindir}/%{name}fs-data-scan +%{_bindir}/%{name}fs-journal-tool +%{_bindir}/%{name}fs-table-tool %{_bindir}/crushdiff %{_bindir}/rados %{_bindir}/radosgw-admin @@ -1144,24 +1057,24 @@ exit 0 %{_bindir}/rbd-replay %{_bindir}/rbd-replay-many %{_bindir}/rbdmap -%{_sbindir}/mount.ceph +%{_sbindir}/mount.%{name} %if %{with lttng} %{_bindir}/rbd-replay-prep %endif -%{_bindir}/ceph-post-file -%dir %{_libdir}/ceph/denc -%{_libdir}/ceph/denc/denc-mod-*.so -%{_tmpfilesdir}/ceph-common.conf -%{_mandir}/man8/ceph-authtool.8* -%{_mandir}/man8/ceph-conf.8* -%{_mandir}/man8/ceph-dencoder.8* -%{_mandir}/man8/ceph-diff-sorted.8* -%{_mandir}/man8/ceph-rbdnamer.8* -%{_mandir}/man8/ceph-syn.8* -%{_mandir}/man8/ceph-post-file.8* -%{_mandir}/man8/ceph.8* +%{_bindir}/%{name}-post-file +%dir %{_libdir}/%{name}/denc +%{_libdir}/%{name}/denc/denc-mod-*.so +%{_tmpfilesdir}/%{name}-common.conf +%{_mandir}/man8/%{name}-authtool.8* +%{_mandir}/man8/%{name}-conf.8* +%{_mandir}/man8/%{name}-dencoder.8* +%{_mandir}/man8/%{name}-diff-sorted.8* +%{_mandir}/man8/%{name}-rbdnamer.8* +%{_mandir}/man8/%{name}-syn.8* +%{_mandir}/man8/%{name}-post-file.8* +%{_mandir}/man8/%{name}.8* %{_mandir}/man8/crushdiff.8* -%{_mandir}/man8/mount.ceph.8* +%{_mandir}/man8/mount.%{name}.8* %{_mandir}/man8/rados.8* %{_mandir}/man8/radosgw-admin.8* %{_mandir}/man8/rbd.8* @@ -1170,268 +1083,231 @@ exit 0 %{_mandir}/man8/rbd-replay-many.8* %{_mandir}/man8/rbd-replay-prep.8* %{_mandir}/man8/rgw-orphan-list.8* -%dir %{_datadir}/ceph/ -%{_datadir}/ceph/known_hosts_drop.ceph.com -%{_datadir}/ceph/id_rsa_drop.ceph.com -%{_datadir}/ceph/id_rsa_drop.ceph.com.pub -%dir %{_sysconfdir}/ceph/ -%config %{_sysconfdir}/bash_completion.d/ceph +%dir %{_datadir}/%{name}/ +%{_datadir}/%{name}/known_hosts_drop.%{name}.com +%{_datadir}/%{name}/id_rsa_drop.%{name}.com +%{_datadir}/%{name}/id_rsa_drop.%{name}.com.pub +%dir %{_sysconfdir}/%{name}/ +%config %{_sysconfdir}/bash_completion.d/%{name} %config %{_sysconfdir}/bash_completion.d/rados %config %{_sysconfdir}/bash_completion.d/rbd %config %{_sysconfdir}/bash_completion.d/radosgw-admin -%config(noreplace) %{_sysconfdir}/ceph/rbdmap +%config(noreplace) %{_sysconfdir}/%{name}/rbdmap %{_unitdir}/rbdmap.service %{_udevrulesdir}/50-rbd.rules -%attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/ -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/ +%attr(3770,%{name},%{name}) %dir %{_localstatedir}/log/%{name}/ +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/ %pre common CEPH_GROUP_ID=167 CEPH_USER_ID=167 -/usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || : -/usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || : +/usr/sbin/groupadd %{name} -g $CEPH_GROUP_ID -o -r 2>/dev/null || : +/usr/sbin/useradd %{name} -u $CEPH_USER_ID -o -r -g %{name} -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/%{name} 2>/dev/null || : exit 0 %post common -%tmpfiles_create %{_tmpfilesdir}/ceph-common.conf +%tmpfiles_create %{_tmpfilesdir}/%{name}-common.conf %postun common -# Package removal cleanup if [ "$1" -eq "0" ] ; then - rm -rf %{_localstatedir}/log/ceph - rm -rf %{_sysconfdir}/ceph + rm -rf %{_localstatedir}/log/%{name} + rm -rf %{_sysconfdir}/%{name} fi +%files fuse +%{_bindir}/%{name}-fuse +%{_mandir}/man8/%{name}-fuse.8* +%{_sbindir}/mount.fuse.%{name} +%{_mandir}/man8/mount.fuse.%{name}.8* +%{_unitdir}/%{name}-fuse@.service +%{_unitdir}/%{name}-fuse.target + %files mds -%{_bindir}/ceph-mds -%{_mandir}/man8/ceph-mds.8* -%{_unitdir}/ceph-mds@.service -%{_unitdir}/ceph-mds.target -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds +%{_bindir}/%{name}-mds +%{_mandir}/man8/%{name}-mds.8* +%{_unitdir}/%{name}-mds@.service +%{_unitdir}/%{name}-mds.target +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/mds %post mds -%systemd_post ceph-mds@\*.service ceph-mds.target +%systemd_post %{name}-mds@\*.service %{name}-mds.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-mds.target >/dev/null 2>&1 || : fi %preun mds -%systemd_preun ceph-mds@\*.service ceph-mds.target +%systemd_preun %{name}-mds@\*.service %{name}-mds.target %postun mds -%systemd_postun ceph-mds@\*.service ceph-mds.target +%systemd_postun %{name}-mds@\*.service %{name}-mds.target if [ $1 -ge 1 ] ; then # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mds@\*.service > /dev/null 2>&1 || : fi fi %files mgr -%{_bindir}/ceph-mgr -%dir %{_datadir}/ceph/mgr -%{_datadir}/ceph/mgr/mgr_module.* -%{_datadir}/ceph/mgr/mgr_util.* -%{_datadir}/ceph/mgr/object_format.* -%{_unitdir}/ceph-mgr@.service -%{_unitdir}/ceph-mgr.target -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr +%{_bindir}/%{name}-mgr +%dir %{_datadir}/%{name}/mgr +%{_datadir}/%{name}/mgr/mgr_module.* +%{_datadir}/%{name}/mgr/mgr_util.* +%{_datadir}/%{name}/mgr/object_format.* +%{_unitdir}/%{name}-mgr@.service +%{_unitdir}/%{name}-mgr.target +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/mgr %post mgr -%systemd_post ceph-mgr@\*.service ceph-mgr.target +%systemd_post %{name}-mgr@\*.service %{name}-mgr.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-mgr.target >/dev/null 2>&1 || : fi %preun mgr -%systemd_preun ceph-mgr@\*.service ceph-mgr.target +%systemd_preun %{name}-mgr@\*.service %{name}-mgr.target %postun mgr -%systemd_postun ceph-mgr@\*.service ceph-mgr.target +%systemd_postun %{name}-mgr@\*.service %{name}-mgr.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr@\*.service > /dev/null 2>&1 || : fi fi %files mgr-dashboard -%{_datadir}/ceph/mgr/dashboard +%{_datadir}/%{name}/mgr/dashboard %post mgr-dashboard if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi %postun mgr-dashboard if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi %files mgr-diskprediction-local -%{_datadir}/ceph/mgr/diskprediction_local +%{_datadir}/%{name}/mgr/diskprediction_local %post mgr-diskprediction-local if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi %postun mgr-diskprediction-local if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%files mgr-modules-core -%dir %{_datadir}/ceph/mgr -%{_datadir}/ceph/mgr/alerts -%{_datadir}/ceph/mgr/balancer -%{_datadir}/ceph/mgr/crash -%{_datadir}/ceph/mgr/devicehealth -%{_datadir}/ceph/mgr/influx -%{_datadir}/ceph/mgr/insights -%{_datadir}/ceph/mgr/iostat -%{_datadir}/ceph/mgr/localpool -%{_datadir}/ceph/mgr/mds_autoscaler -%{_datadir}/ceph/mgr/mirroring -%{_datadir}/ceph/mgr/nfs -%{_datadir}/ceph/mgr/orchestrator -%{_datadir}/ceph/mgr/osd_perf_query -%{_datadir}/ceph/mgr/osd_support -%{_datadir}/ceph/mgr/pg_autoscaler -%{_datadir}/ceph/mgr/progress -%{_datadir}/ceph/mgr/prometheus -%{_datadir}/ceph/mgr/rbd_support -%{_datadir}/ceph/mgr/restful -%{_datadir}/ceph/mgr/selftest -%{_datadir}/ceph/mgr/snap_schedule -%{_datadir}/ceph/mgr/stats -%{_datadir}/ceph/mgr/status -%{_datadir}/ceph/mgr/telegraf -%{_datadir}/ceph/mgr/telemetry -%{_datadir}/ceph/mgr/test_orchestrator -%{_datadir}/ceph/mgr/volumes -%{_datadir}/ceph/mgr/zabbix - -%files mgr-rook -%{_datadir}/ceph/mgr/rook +%files mgr-k8sevents +%{_datadir}/%{name}/mgr/k8sevents -%post mgr-rook +%post mgr-k8sevents if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%postun mgr-rook +%postun mgr-k8sevents if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%files mgr-k8sevents -%{_datadir}/ceph/mgr/k8sevents +%files mgr-modules-core +%dir %{_datadir}/%{name}/mgr +%{_datadir}/%{name}/mgr/alerts +%{_datadir}/%{name}/mgr/balancer +%{_datadir}/%{name}/mgr/crash +%{_datadir}/%{name}/mgr/devicehealth +%{_datadir}/%{name}/mgr/influx +%{_datadir}/%{name}/mgr/insights +%{_datadir}/%{name}/mgr/iostat +%{_datadir}/%{name}/mgr/localpool +%{_datadir}/%{name}/mgr/mds_autoscaler +%{_datadir}/%{name}/mgr/mirroring +%{_datadir}/%{name}/mgr/nfs +%{_datadir}/%{name}/mgr/orchestrator +%{_datadir}/%{name}/mgr/osd_perf_query +%{_datadir}/%{name}/mgr/osd_support +%{_datadir}/%{name}/mgr/pg_autoscaler +%{_datadir}/%{name}/mgr/progress +%{_datadir}/%{name}/mgr/prometheus +%{_datadir}/%{name}/mgr/rbd_support +%{_datadir}/%{name}/mgr/restful +%{_datadir}/%{name}/mgr/selftest +%{_datadir}/%{name}/mgr/snap_schedule +%{_datadir}/%{name}/mgr/stats +%{_datadir}/%{name}/mgr/status +%{_datadir}/%{name}/mgr/telegraf +%{_datadir}/%{name}/mgr/telemetry +%{_datadir}/%{name}/mgr/test_orchestrator +%{_datadir}/%{name}/mgr/volumes +%{_datadir}/%{name}/mgr/zabbix -%post mgr-k8sevents +%files mgr-rook +%{_datadir}/%{name}/mgr/rook + +%post mgr-rook if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%postun mgr-k8sevents +%postun mgr-rook if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%files mgr-cephadm -%{_datadir}/ceph/mgr/cephadm +%files mgr-%{name}adm +%{_datadir}/%{name}/mgr/%{name}adm -%post mgr-cephadm +%post mgr-%{name}adm if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi -%postun mgr-cephadm +%postun mgr-%{name}adm if [ $1 -eq 1 ] ; then - /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mgr.target >/dev/null 2>&1 || : fi %files mon -%{_bindir}/ceph-mon -%{_bindir}/ceph-monstore-tool -%{_mandir}/man8/ceph-mon.8* -%{_unitdir}/ceph-mon@.service -%{_unitdir}/ceph-mon.target -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon +%{_bindir}/%{name}-mon +%{_bindir}/%{name}-monstore-tool +%{_mandir}/man8/%{name}-mon.8* +%{_unitdir}/%{name}-mon@.service +%{_unitdir}/%{name}-mon.target +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/mon %post mon -%systemd_post ceph-mon@\*.service ceph-mon.target +%systemd_post %{name}-mon@\*.service %{name}-mon.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-mon.target >/dev/null 2>&1 || : fi %preun mon -%systemd_preun ceph-mon@\*.service ceph-mon.target +%systemd_preun %{name}-mon@\*.service %{name}-mon.target %postun mon -%systemd_postun ceph-mon@\*.service ceph-mon.target +%systemd_postun %{name}-mon@\*.service %{name}-mon.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-mon@\*.service > /dev/null 2>&1 || : fi fi -%files fuse -%{_bindir}/ceph-fuse -%{_mandir}/man8/ceph-fuse.8* -%{_sbindir}/mount.fuse.ceph -%{_mandir}/man8/mount.fuse.ceph.8* -%{_unitdir}/ceph-fuse@.service -%{_unitdir}/ceph-fuse.target - -%files -n cephfs-mirror -%{_bindir}/cephfs-mirror -%{_mandir}/man8/cephfs-mirror.8* -%{_unitdir}/cephfs-mirror@.service -%{_unitdir}/cephfs-mirror.target - -%post -n cephfs-mirror -%systemd_post cephfs-mirror@\*.service cephfs-mirror.target -if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || : -fi - -%preun -n cephfs-mirror -%systemd_preun cephfs-mirror@\*.service cephfs-mirror.target - -%postun -n cephfs-mirror -%systemd_postun cephfs-mirror@\*.service cephfs-mirror.target -if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph - if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then - source $SYSCONF_CEPH - fi - if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || : - fi -fi - -%files -n ceph-exporter -%{_bindir}/ceph-exporter - %files -n rbd-fuse %{_bindir}/rbd-fuse %{_mandir}/man8/rbd-fuse.8* @@ -1439,69 +1315,59 @@ fi %files -n rbd-mirror %{_bindir}/rbd-mirror %{_mandir}/man8/rbd-mirror.8* -%{_unitdir}/ceph-rbd-mirror@.service -%{_unitdir}/ceph-rbd-mirror.target +%{_unitdir}/%{name}-rbd-mirror@.service +%{_unitdir}/%{name}-rbd-mirror.target %post -n rbd-mirror -%systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%systemd_post %{name}-rbd-mirror@\*.service %{name}-rbd-mirror.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-rbd-mirror.target >/dev/null 2>&1 || : fi %preun -n rbd-mirror -%systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%systemd_preun %{name}-rbd-mirror@\*.service %{name}-rbd-mirror.target %postun -n rbd-mirror -%systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target +%systemd_postun %{name}-rbd-mirror@\*.service %{name}-rbd-mirror.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-rbd-mirror@\*.service > /dev/null 2>&1 || : fi fi %files immutable-object-cache -%{_bindir}/ceph-immutable-object-cache -%{_mandir}/man8/ceph-immutable-object-cache.8* -%{_unitdir}/ceph-immutable-object-cache@.service -%{_unitdir}/ceph-immutable-object-cache.target +%{_bindir}/%{name}-immutable-object-cache +%{_mandir}/man8/%{name}-immutable-object-cache.8* +%{_unitdir}/%{name}-immutable-object-cache@.service +%{_unitdir}/%{name}-immutable-object-cache.target %post immutable-object-cache -%systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%systemd_post %{name}-immutable-object-cache@\*.service %{name}-immutable-object-cache.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-immutable-object-cache.target >/dev/null 2>&1 || : fi %preun immutable-object-cache -%systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%systemd_preun %{name}-immutable-object-cache@\*.service %{name}-immutable-object-cache.target %postun immutable-object-cache -%systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target +%systemd_postun %{name}-immutable-object-cache@\*.service %{name}-immutable-object-cache.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-immutable-object-cache@\*.service > /dev/null 2>&1 || : fi fi -%files -n rbd-nbd -%{_bindir}/rbd-nbd -%{_mandir}/man8/rbd-nbd.8* -%dir %{_libexecdir}/rbd-nbd -%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce - %files radosgw -%{_bindir}/ceph-diff-sorted +%{_bindir}/%{name}-diff-sorted %{_bindir}/radosgw %{_bindir}/radosgw-token %{_bindir}/radosgw-es @@ -1511,106 +1377,105 @@ fi %{_bindir}/rgw-orphan-list %{_libdir}/libradosgw.so* %{_mandir}/man8/radosgw.8* -%dir %{_localstatedir}/lib/ceph/radosgw -%{_unitdir}/ceph-radosgw@.service -%{_unitdir}/ceph-radosgw.target +%dir %{_localstatedir}/lib/%{name}/radosgw +%{_unitdir}/%{name}-radosgw@.service +%{_unitdir}/%{name}-radosgw.target %post radosgw /sbin/ldconfig -%systemd_post ceph-radosgw@\*.service ceph-radosgw.target +%systemd_post %{name}-radosgw@\*.service %{name}-radosgw.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-radosgw.target >/dev/null 2>&1 || : fi %preun radosgw -%systemd_preun ceph-radosgw@\*.service ceph-radosgw.target +%systemd_preun %{name}-radosgw@\*.service %{name}-radosgw.target %postun radosgw /sbin/ldconfig -%systemd_postun ceph-radosgw@\*.service ceph-radosgw.target +%systemd_postun %{name}-radosgw@\*.service %{name}-radosgw.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-radosgw@\*.service > /dev/null 2>&1 || : fi fi +%files -n rbd-nbd +%{_bindir}/rbd-nbd +%{_mandir}/man8/rbd-nbd.8* +%dir %{_libexecdir}/rbd-nbd +%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce + %files osd -%{_bindir}/ceph-clsinfo -%{_bindir}/ceph-bluestore-tool -%{_bindir}/ceph-erasure-code-tool -%{_bindir}/ceph-objectstore-tool -%{_bindir}/ceph-osdomap-tool -%{_bindir}/ceph-osd -%{_libexecdir}/ceph/ceph-osd-prestart.sh -%{_mandir}/man8/ceph-clsinfo.8* -%{_mandir}/man8/ceph-osd.8* -%{_mandir}/man8/ceph-bluestore-tool.8* -%{_unitdir}/ceph-osd@.service -%{_unitdir}/ceph-osd.target -%attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd -%config(noreplace) %{_sysctldir}/90-ceph-osd.conf +%{_bindir}/%{name}-clsinfo +%{_bindir}/%{name}-bluestore-tool +%{_bindir}/%{name}-erasure-code-tool +%{_bindir}/%{name}-objectstore-tool +%{_bindir}/%{name}-osdomap-tool +%{_bindir}/%{name}-osd +%{_libexecdir}/%{name}/%{name}-osd-prestart.sh +%{_mandir}/man8/%{name}-clsinfo.8* +%{_mandir}/man8/%{name}-osd.8* +%{_mandir}/man8/%{name}-bluestore-tool.8* +%{_unitdir}/%{name}-osd@.service +%{_unitdir}/%{name}-osd.target +%attr(750,%{name},%{name}) %dir %{_localstatedir}/lib/%{name}/osd +%config(noreplace) %{_sysctldir}/90-%{name}-osd.conf %post osd -%systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +%systemd_post %{name}-osd@\*.service %{name}-volume@\*.service %{name}-osd.target if [ $1 -eq 1 ] ; then -/usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || : +/usr/bin/systemctl start %{name}-osd.target >/dev/null 2>&1 || : fi %if 0%{?sysctl_apply} - %sysctl_apply 90-ceph-osd.conf + %sysctl_apply 90-%{name}-osd.conf %else - /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || : + /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-%{name}-osd.conf > /dev/null 2>&1 || : %endif %preun osd -%systemd_preun ceph-osd@\*.service ceph-osd.target +%systemd_preun %{name}-osd@\*.service %{name}-osd.target %postun osd -%systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target +%systemd_postun %{name}-osd@\*.service %{name}-volume@\*.service %{name}-osd.target if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-osd@\*.service %{name}-volume@\*.service > /dev/null 2>&1 || : fi fi %files volume -%{_sbindir}/ceph-volume -%{_sbindir}/ceph-volume-systemd -%dir %{python3_sitelib}/ceph_volume -%{python3_sitelib}/ceph_volume/* -%{python3_sitelib}/ceph_volume-* -%{_mandir}/man8/ceph-volume.8* -%{_mandir}/man8/ceph-volume-systemd.8* -%{_unitdir}/ceph-volume@.service +%{_sbindir}/%{name}-volume +%{_sbindir}/%{name}-volume-systemd +%dir %{python3_sitelib}/%{name}_volume +%{python3_sitelib}/%{name}_volume/* +%{python3_sitelib}/%{name}_volume-* +%{_mandir}/man8/%{name}-volume.8* +%{_mandir}/man8/%{name}-volume-systemd.8* +%{_unitdir}/%{name}-volume@.service %post volume -%systemd_post ceph-volume@\*.service +%systemd_post %{name}-volume@\*.service %preun volume -%systemd_preun ceph-volume@\*.service +%systemd_preun %{name}-volume@\*.service %postun volume -%systemd_postun ceph-volume@\*.service +%systemd_postun %{name}-volume@\*.service if [ $1 -ge 1 ] ; then - # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to - # "yes". In any case: if units are not running, do not touch them. - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl try-restart ceph-volume@\*.service > /dev/null 2>&1 || : + /usr/bin/systemctl try-restart %{name}-volume@\*.service > /dev/null 2>&1 || : fi fi @@ -1619,19 +1484,19 @@ fi %files resource-agents %dir %{_prefix}/lib/ocf %dir %{_prefix}/lib/ocf/resource.d -%dir %{_prefix}/lib/ocf/resource.d/ceph -%attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd +%dir %{_prefix}/lib/ocf/resource.d/%{name} +%attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/%{name}/rbd %endif %files -n librados2 %{_libdir}/librados.so.* -%dir %{_libdir}/ceph -%{_libdir}/ceph/libceph-common.so.* +%dir %{_libdir}/%{name} +%{_libdir}/%{name}/lib%{name}-common.so.* %if %{with lttng} %{_libdir}/librados_tp.so.* %endif -%dir %{_sysconfdir}/ceph +%dir %{_sysconfdir}/%{name} %post -n librados2 -p /sbin/ldconfig @@ -1663,15 +1528,15 @@ fi %{python3_sitearch}/rados.cpython*.so %{python3_sitearch}/rados-*.egg-info -%files -n libcephsqlite -%{_libdir}/libcephsqlite.so +%files -n lib%{name}sqlite +%{_libdir}/lib%{name}sqlite.so -%post -n libcephsqlite -p /sbin/ldconfig +%post -n lib%{name}sqlite -p /sbin/ldconfig -%postun -n libcephsqlite -p /sbin/ldconfig +%postun -n lib%{name}sqlite -p /sbin/ldconfig -%files -n libcephsqlite-devel -%{_includedir}/libcephsqlite.h +%files -n lib%{name}sqlite-devel +%{_includedir}/lib%{name}sqlite.h %if 0%{with libradosstriper} %files -n libradosstriper1 @@ -1693,8 +1558,8 @@ fi %if %{with lttng} %{_libdir}/librbd_tp.so.* %endif -%dir %{_libdir}/ceph/librbd -%{_libdir}/ceph/librbd/libceph_*.so* +%dir %{_libdir}/%{name}/librbd +%{_libdir}/%{name}/librbd/lib%{name}_*.so* %post -n librbd1 -p /sbin/ldconfig @@ -1731,97 +1596,97 @@ fi %{_libdir}/librgw_rados_tp.so %endif -%files -n python%{python3_pkgversion}-rgw -%{python3_sitearch}/rgw.cpython*.so -%{python3_sitearch}/rgw-*.egg-info +%files -n lib%{name}fs2 +%{_libdir}/lib%{name}fs.so.* +%dir %{_sysconfdir}/%{name} + +%post -n lib%{name}fs2 -p /sbin/ldconfig + +%postun -n lib%{name}fs2 -p /sbin/ldconfig + +%files -n lib%{name}fs-devel +%dir %{_includedir}/%{name}fs +%{_includedir}/%{name}fs/lib%{name}fs.h +%{_includedir}/%{name}fs/%{name}_ll_client.h +%dir %{_includedir}/%{name}fs/metrics +%{_includedir}/%{name}fs/metrics/Types.h +%{_libdir}/lib%{name}fs.so %files -n python%{python3_pkgversion}-rbd %{python3_sitearch}/rbd.cpython*.so %{python3_sitearch}/rbd-*.egg-info -%files -n libcephfs2 -%{_libdir}/libcephfs.so.* -%dir %{_sysconfdir}/ceph - -%post -n libcephfs2 -p /sbin/ldconfig - -%postun -n libcephfs2 -p /sbin/ldconfig - -%files -n libcephfs-devel -%dir %{_includedir}/cephfs -%{_includedir}/cephfs/libcephfs.h -%{_includedir}/cephfs/ceph_ll_client.h -%dir %{_includedir}/cephfs/metrics -%{_includedir}/cephfs/metrics/Types.h -%{_libdir}/libcephfs.so - -%files -n python%{python3_pkgversion}-cephfs -%{python3_sitearch}/cephfs.cpython*.so -%{python3_sitearch}/cephfs-*.egg-info - -%files -n python%{python3_pkgversion}-ceph-argparse -%{python3_sitelib}/ceph_argparse.py -%{python3_sitelib}/__pycache__/ceph_argparse.cpython*.py* -%{python3_sitelib}/ceph_daemon.py -%{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py* - -%files -n python%{python3_pkgversion}-ceph-common -%{python3_sitelib}/ceph -%{python3_sitelib}/ceph-*.egg-info - -%if 0%{with cephfs_shell} -%files -n cephfs-shell -%{python3_sitelib}/cephfs_shell-*.egg-info -%{_bindir}/cephfs-shell -%{_mandir}/man8/cephfs-shell.8* -%endif - -%files -n cephfs-top -%{python3_sitelib}/cephfs_top-*.egg-info -%{_bindir}/cephfs-top -%{_mandir}/man8/cephfs-top.8* - -%if 0%{with ceph_test_package} -%files -n ceph-test -%{_bindir}/ceph-client-debug -%{_bindir}/ceph_bench_log -%{_bindir}/ceph_multi_stress_watch -%{_bindir}/ceph_erasure_code_benchmark -%{_bindir}/ceph_omapbench -%{_bindir}/ceph_objectstore_bench -%{_bindir}/ceph_perf_objectstore -%{_bindir}/ceph_perf_local -%{_bindir}/ceph_perf_msgr_client -%{_bindir}/ceph_perf_msgr_server -%{_bindir}/ceph_psim -%{_bindir}/ceph_radosacl -%{_bindir}/ceph_rgw_jsonparser -%{_bindir}/ceph_rgw_multiparser -%{_bindir}/ceph_scratchtool -%{_bindir}/ceph_scratchtoolpp -%{_bindir}/ceph_test_* -%{_bindir}/ceph-coverage -%{_bindir}/ceph-debugpack -%{_bindir}/ceph-dedup-tool -%{_mandir}/man8/ceph-debugpack.8* -%dir %{_libdir}/ceph -%{_libdir}/ceph/ceph-monstore-update-crush.sh -%endif - -%if 0%{with cephfs_java} -%files -n libcephfs_jni1 -%{_libdir}/libcephfs_jni.so.* - -%post -n libcephfs_jni1 -p /sbin/ldconfig - -%postun -n libcephfs_jni1 -p /sbin/ldconfig - -%files -n libcephfs_jni-devel -%{_libdir}/libcephfs_jni.so - -%files -n cephfs-java -%{_javadir}/libcephfs.jar -%{_javadir}/libcephfs-test.jar +%files -n python%{python3_pkgversion}-rgw +%{python3_sitearch}/rgw.cpython*.so +%{python3_sitearch}/rgw-*.egg-info + +%files -n python%{python3_pkgversion}-%{name}fs +%{python3_sitearch}/%{name}fs.cpython*.so +%{python3_sitearch}/%{name}fs-*.egg-info + +%files -n python%{python3_pkgversion}-%{name}-argparse +%{python3_sitelib}/%{name}_argparse.py +%{python3_sitelib}/__pycache__/%{name}_argparse.cpython*.py* +%{python3_sitelib}/%{name}_daemon.py +%{python3_sitelib}/__pycache__/%{name}_daemon.cpython*.py* + +%files -n python%{python3_pkgversion}-%{name}-common +%{python3_sitelib}/%{name} +%{python3_sitelib}/%{name}-*.egg-info + +%if 0%{with %{name}fs_shell} +%files -n %{name}fs-shell +%{python3_sitelib}/%{name}fs_shell-*.egg-info +%{_bindir}/%{name}fs-shell +%{_mandir}/man8/%{name}fs-shell.8* +%endif + +%files -n %{name}fs-top +%{python3_sitelib}/%{name}fs_top-*.egg-info +%{_bindir}/%{name}fs-top +%{_mandir}/man8/%{name}fs-top.8* + +%if 0%{with %{name}_test_package} +%files -n %{name}-test +%{_bindir}/%{name}-client-debug +%{_bindir}/%{name}_bench_log +%{_bindir}/%{name}_multi_stress_watch +%{_bindir}/%{name}_erasure_code_benchmark +%{_bindir}/%{name}_omapbench +%{_bindir}/%{name}_objectstore_bench +%{_bindir}/%{name}_perf_objectstore +%{_bindir}/%{name}_perf_local +%{_bindir}/%{name}_perf_msgr_client +%{_bindir}/%{name}_perf_msgr_server +%{_bindir}/%{name}_psim +%{_bindir}/%{name}_radosacl +%{_bindir}/%{name}_rgw_jsonparser +%{_bindir}/%{name}_rgw_multiparser +%{_bindir}/%{name}_scratchtool +%{_bindir}/%{name}_scratchtoolpp +%{_bindir}/%{name}_test_* +%{_bindir}/%{name}-coverage +%{_bindir}/%{name}-debugpack +%{_bindir}/%{name}-dedup-tool +%{_mandir}/man8/%{name}-debugpack.8* +%dir %{_libdir}/%{name} +%{_libdir}/%{name}/%{name}-monstore-update-crush.sh +%endif + +%if 0%{with %{name}fs_java} +%files -n lib%{name}fs_jni1 +%{_libdir}/lib%{name}fs_jni.so.* + +%post -n lib%{name}fs_jni1 -p /sbin/ldconfig + +%postun -n lib%{name}fs_jni1 -p /sbin/ldconfig + +%files -n lib%{name}fs_jni-devel +%{_libdir}/lib%{name}fs_jni.so + +%files -n %{name}fs-java +%{_javadir}/lib%{name}fs.jar +%{_javadir}/lib%{name}fs-test.jar %endif %files -n rados-objclass-devel @@ -1830,104 +1695,84 @@ fi %if 0%{with selinux} %files selinux -%attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp -%{_datadir}/selinux/devel/include/contrib/ceph.if -%{_mandir}/man8/ceph_selinux.8* +%attr(0600,root,root) %{_datadir}/selinux/packages/%{name}.pp +%{_datadir}/selinux/devel/include/contrib/%{name}.if +%{_mandir}/man8/%{name}_selinux.8* %post selinux -# backup file_contexts before update . /etc/selinux/config FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre -# Install the policy -/usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp +/usr/sbin/semodule -i %{_datadir}/selinux/packages/%{name}.pp -# Load the policy if SELinux is enabled if ! /usr/sbin/selinuxenabled; then - # Do not relabel if selinux is not enabled exit 0 fi if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then - # Do not relabel if file contexts did not change exit 0 fi -# Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes -SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph +SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi -# Check whether the daemons are running -/usr/bin/systemctl status ceph.target > /dev/null 2>&1 +/usr/bin/systemctl status %{name}.target > /dev/null 2>&1 STATUS=$? -# Stop the daemons if they were running if test $STATUS -eq 0; then if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + /usr/bin/systemctl stop %{name}.target > /dev/null 2>&1 fi fi -# Relabel the files fix for first package install /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null rm -f ${FILE_CONTEXT}.pre -# The fixfiles command won't fix label for /var/run/ceph -/usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 +/usr/sbin/restorecon -R /var/run/%{name} > /dev/null 2>&1 -# Start the daemons iff they were running before if test $STATUS -eq 0; then if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + /usr/bin/systemctl start %{name}.target > /dev/null 2>&1 || : fi fi exit 0 %postun selinux if [ $1 -eq 0 ]; then - # backup file_contexts before update . /etc/selinux/config FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre - # Remove the module - /usr/sbin/semodule -n -r ceph > /dev/null 2>&1 + /usr/sbin/semodule -n -r %{name} > /dev/null 2>&1 - # Reload the policy if SELinux is enabled if ! /usr/sbin/selinuxenabled ; then - # Do not relabel if SELinux is not enabled exit 0 fi - # Stop ceph.target while relabeling if CEPH_AUTO_RESTART_ON_UPGRADE=yes - SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph + SYSCONF_CEPH=%{_sysconfdir}/sysconfig/%{name} if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then source $SYSCONF_CEPH fi - # Check whether the daemons are running - /usr/bin/systemctl status ceph.target > /dev/null 2>&1 + /usr/bin/systemctl status %{name}.target > /dev/null 2>&1 STATUS=$? - # Stop the daemons if they were running if test $STATUS -eq 0; then if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl stop ceph.target > /dev/null 2>&1 + /usr/bin/systemctl stop %{name}.target > /dev/null 2>&1 fi fi /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null rm -f ${FILE_CONTEXT}.pre - # The fixfiles command won't fix label for /var/run/ceph - /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1 + /usr/sbin/restorecon -R /var/run/%{name} > /dev/null 2>&1 - # Start the daemons if they were running before if test $STATUS -eq 0; then if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then - /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || : + /usr/bin/systemctl start %{name}.target > /dev/null 2>&1 || : fi fi fi @@ -1935,14 +1780,17 @@ exit 0 %endif %files grafana-dashboards -%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard -%config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/* +%attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/%{name}-dashboard +%config %{_sysconfdir}/grafana/dashboards/%{name}-dashboard/* %files prometheus-alerts -%attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph -%config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml +%attr(0755,root,root) %dir %{_sysconfdir}/prometheus/%{name} +%config %{_sysconfdir}/prometheus/%{name}/%{name}_default_alerts.yml %changelog +* Sat Apr 15 2023 Jing Zhang - 17.2.5-2 +- Refact spec and removeBuildRequirements: mold + * Thu Mar 02 2023 Funda Wang - 17.2.5-1 - New version 17.2.5 - Build with bundled boost for now, 17.x does not support boost 1.81 -- Gitee