2 # spec file for package ceph
4 # Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file
5 # at the top-level directory of this distribution and at
6 # https://github.com/ceph/ceph/blob/master/COPYING
8 # All modifications and additions to the file contributed by third parties
9 # remain the property of their copyright owners, unless otherwise agreed
12 # This file is under the GNU Lesser General Public License, version 2.1
14 # Please submit bugfixes or comments via http://tracker.ceph.com/
17 #################################################################################
18 # conditional build section
20 # please read http://rpm.org/user_doc/conditional_builds.html for explanation of
22 #################################################################################
23 %bcond_with make_check
25 %bcond_with cmake_verbose_logging
26 %bcond_without ceph_test_package
30 %bcond_without tcmalloc
32 %bcond_with system_pmdk
33 %if 0%{?fedora} || 0%{?rhel}
34 %bcond_without selinux
35 %ifarch x86_64 ppc64le
36 %bcond_without rbd_rwl_cache
37 %bcond_without rbd_ssd_cache
39 %bcond_with rbd_rwl_cache
40 %bcond_with rbd_ssd_cache
43 %bcond_with cephfs_java
45 %bcond_without cephfs_java
47 %bcond_without amqp_endpoint
48 %bcond_without kafka_endpoint
50 %bcond_without libradosstriper
52 %global luarocks_package_name luarocks
53 %bcond_without lua_packages
54 %global _remote_tarball_prefix https://download.ceph.com/tarballs/
57 %bcond_with amqp_endpoint
58 %bcond_with cephfs_java
59 %bcond_with kafka_endpoint
60 %bcond_with libradosstriper
61 %ifarch x86_64 aarch64 ppc64le
63 %bcond_without rbd_rwl_cache
64 %bcond_without rbd_ssd_cache
67 %bcond_with rbd_rwl_cache
68 %bcond_with rbd_ssd_cache
72 #Compat macro for _fillupdir macro introduced in Nov 2017
73 %if ! %{defined _fillupdir}
74 %global _fillupdir /var/adm/fillup-templates
79 %bcond_without lua_packages
82 %global luarocks_package_name lua53-luarocks
85 %global luarocks_package_name lua54-luarocks
89 %bcond_with lua_packages
94 %if 0%{?fedora} || 0%{?suse_version} >= 1500
95 # distros that ship cmd2 and/or colorama
96 %bcond_without cephfs_shell
98 # distros that do _not_ ship cmd2/colorama
99 %bcond_with cephfs_shell
101 %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8
105 # get selinux policy version
106 # Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos
108 %global _selinux_policy_version 0.0.0
110 %{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0}
114 %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d}
115 %{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create}
116 %{!?python3_pkgversion: %global python3_pkgversion 3}
117 %{!?python3_version_nodots: %global python3_version_nodots 3}
118 %{!?python3_version: %global python3_version 3}
120 # disable dwz which compresses the debuginfo
121 %global _find_debuginfo_dwz_opts %{nil}
122 %if ! 0%{?suse_version}
123 # use multi-threaded xz compression: xz level 7 using ncpus threads
124 %global _source_payload w7T%{_smp_build_ncpus}.xzdio
125 %global _binary_payload w7T%{_smp_build_ncpus}.xzdio
128 %define smp_limit_mem_per_job() %( \
130 kb_total=$(head -3 /proc/meminfo | sed -n 's/MemAvailable:\\s*\\(.*\\) kB.*/\\1/p') \
131 jobs=$(( $kb_total / $kb_per_job )) \
132 [ $jobs -lt 1 ] && jobs=1 \
135 %if 0%{?_smp_ncpus_max} == 0
136 %if 0%{?__isa_bits} == 32
137 # 32-bit builds can use 3G memory max, which is not enough even for -j2
138 %global _smp_ncpus_max 1
140 # 3.0 GiB mem per job
141 %global _smp_ncpus_max %{smp_limit_mem_per_job 3000000}
145 #################################################################################
146 # main package definition
147 #################################################################################
149 Version: @PROJECT_VERSION@
150 Release: @RPM_RELEASE@%{?dist}
151 %if 0%{?fedora} || 0%{?rhel}
155 # define _epoch_prefix macro which will expand to the empty string if epoch is
157 %global _epoch_prefix %{?epoch:%{epoch}:}
159 Summary: User space components of the Ceph file system
160 License: LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT
161 %if 0%{?suse_version}
162 Group: System/Filesystems
164 URL: http://ceph.com/
165 Source0: %{?_remote_tarball_prefix}@TARBALL_BASENAME@.tar.bz2
166 %if 0%{?suse_version}
167 # _insert_obs_source_lines_here
168 ExclusiveArch: x86_64 aarch64 ppc64le s390x
170 #################################################################################
171 # dependencies that apply across all distro families
172 #################################################################################
173 Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release}
174 Requires: ceph-mds = %{_epoch_prefix}%{version}-%{release}
175 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
176 Requires: ceph-mon = %{_epoch_prefix}%{version}-%{release}
177 Requires(post): binutils
178 %if 0%{with cephfs_java}
179 BuildRequires: java-devel
180 BuildRequires: sharutils
183 BuildRequires: checkpolicy
184 BuildRequires: selinux-policy-devel
187 BuildRequires: cmake > 3.5
188 BuildRequires: cryptsetup
189 BuildRequires: fuse-devel
190 %if 0%{with seastar} && 0%{?rhel}
191 BuildRequires: gcc-toolset-9-gcc-c++ >= 9.2.1-2.3
193 BuildRequires: gcc-c++
196 %if 0%{with tcmalloc}
197 # libprofiler did not build on ppc64le until 2.7.90
198 %if 0%{?fedora} || 0%{?rhel} >= 8
199 BuildRequires: gperftools-devel >= 2.7.90
201 %if 0%{?rhel} && 0%{?rhel} < 8
202 BuildRequires: gperftools-devel >= 2.6.1
204 %if 0%{?suse_version}
205 BuildRequires: gperftools-devel >= 2.4
208 BuildRequires: leveldb-devel > 1.2
209 BuildRequires: libaio-devel
210 BuildRequires: libblkid-devel >= 2.17
211 BuildRequires: cryptsetup-devel
212 BuildRequires: libcurl-devel
213 BuildRequires: libcap-ng-devel
214 BuildRequires: fmt-devel >= 5.2.1
215 BuildRequires: pkgconfig(libudev)
216 BuildRequires: libnl3-devel
217 BuildRequires: liboath-devel
218 BuildRequires: libtool
219 BuildRequires: libxml2-devel
221 BuildRequires: ncurses-devel
222 BuildRequires: libicu-devel
223 BuildRequires: parted
226 BuildRequires: pkgconfig
227 BuildRequires: procps
228 BuildRequires: python%{python3_pkgversion}
229 BuildRequires: python%{python3_pkgversion}-devel
230 BuildRequires: python%{python3_pkgversion}-setuptools
231 BuildRequires: python%{python3_pkgversion}-Cython
232 BuildRequires: snappy-devel
233 BuildRequires: sqlite-devel
235 BuildRequires: pkgconfig(udev)
236 BuildRequires: util-linux
237 BuildRequires: valgrind-devel
239 BuildRequires: xfsprogs
240 BuildRequires: xfsprogs-devel
241 BuildRequires: xmlstarlet
243 BuildRequires: lua-devel
244 %if 0%{with amqp_endpoint}
245 BuildRequires: librabbitmq-devel
247 %if 0%{with kafka_endpoint}
248 BuildRequires: librdkafka-devel
250 %if 0%{with lua_packages}
251 BuildRequires: %{luarocks_package_name}
253 %if 0%{with make_check}
255 BuildRequires: libuuid-devel
256 BuildRequires: python%{python3_pkgversion}-bcrypt
257 BuildRequires: python%{python3_pkgversion}-nose
258 BuildRequires: python%{python3_pkgversion}-pecan
259 BuildRequires: python%{python3_pkgversion}-requests
260 BuildRequires: python%{python3_pkgversion}-dateutil
261 BuildRequires: python%{python3_pkgversion}-virtualenv
262 BuildRequires: python%{python3_pkgversion}-coverage
263 BuildRequires: python%{python3_pkgversion}-pyOpenSSL
267 BuildRequires: libzbd-devel
272 %if 0%{?fedora} || 0%{?rhel}
273 BuildRequires: json-devel
275 %if 0%{?suse_version}
276 BuildRequires: nlohmann_json-devel
278 BuildRequires: libevent-devel
279 BuildRequires: yaml-cpp-devel
281 %if 0%{with system_pmdk}
282 BuildRequires: libpmem-devel
283 BuildRequires: libpmemobj-devel
286 BuildRequires: c-ares-devel
287 BuildRequires: gnutls-devel
288 BuildRequires: hwloc-devel
289 BuildRequires: libpciaccess-devel
290 BuildRequires: lksctp-tools-devel
291 BuildRequires: protobuf-devel
293 BuildRequires: systemtap-sdt-devel
294 BuildRequires: yaml-cpp-devel
296 BuildRequires: libubsan
297 BuildRequires: libasan
298 BuildRequires: libatomic
301 BuildRequires: gcc-toolset-9-annobin
302 BuildRequires: gcc-toolset-9-libubsan-devel
303 BuildRequires: gcc-toolset-9-libasan-devel
304 BuildRequires: gcc-toolset-9-libatomic-devel
307 #################################################################################
308 # distro-conditional dependencies
309 #################################################################################
310 %if 0%{?suse_version}
311 BuildRequires: pkgconfig(systemd)
312 BuildRequires: systemd-rpm-macros
314 PreReq: %fillup_prereq
315 BuildRequires: fdupes
316 BuildRequires: memory-constraints
317 BuildRequires: net-tools
318 BuildRequires: libbz2-devel
319 BuildRequires: mozilla-nss-devel
320 BuildRequires: keyutils-devel
321 BuildRequires: libopenssl-devel
322 BuildRequires: lsb-release
323 BuildRequires: openldap2-devel
325 #BuildRequires: krb5-devel
326 BuildRequires: cunit-devel
327 BuildRequires: python%{python3_pkgversion}-PrettyTable
328 BuildRequires: python%{python3_pkgversion}-PyYAML
329 BuildRequires: python%{python3_pkgversion}-Sphinx
330 BuildRequires: rdma-core-devel
331 BuildRequires: liblz4-devel >= 1.7
332 # for prometheus-alerts
333 BuildRequires: golang-github-prometheus-prometheus
335 %if 0%{?fedora} || 0%{?rhel}
337 BuildRequires: boost-random
338 BuildRequires: nss-devel
339 BuildRequires: keyutils-libs-devel
340 BuildRequires: libibverbs-devel
341 BuildRequires: librdmacm-devel
342 BuildRequires: openldap-devel
343 #BuildRequires: krb5-devel
344 BuildRequires: openssl-devel
345 BuildRequires: CUnit-devel
346 BuildRequires: redhat-lsb-core
347 BuildRequires: python%{python3_pkgversion}-devel
348 BuildRequires: python%{python3_pkgversion}-prettytable
349 BuildRequires: python%{python3_pkgversion}-pyyaml
350 BuildRequires: python%{python3_pkgversion}-sphinx
351 BuildRequires: lz4-devel >= 1.7
353 # distro-conditional make check dependencies
354 %if 0%{with make_check}
355 %if 0%{?fedora} || 0%{?rhel}
356 BuildRequires: golang-github-prometheus
357 BuildRequires: libtool-ltdl-devel
358 BuildRequires: ninja-build
359 BuildRequires: xmlsec1
360 BuildRequires: xmlsec1-devel
362 BuildRequires: xmlsec1-nss
364 BuildRequires: xmlsec1-openssl
365 BuildRequires: xmlsec1-openssl-devel
366 BuildRequires: python%{python3_pkgversion}-cherrypy
367 BuildRequires: python%{python3_pkgversion}-jwt
368 BuildRequires: python%{python3_pkgversion}-routes
369 BuildRequires: python%{python3_pkgversion}-scipy
370 BuildRequires: python%{python3_pkgversion}-werkzeug
371 BuildRequires: python%{python3_pkgversion}-pyOpenSSL
373 %if 0%{?suse_version}
374 BuildRequires: golang-github-prometheus-prometheus
375 BuildRequires: libxmlsec1-1
376 BuildRequires: libxmlsec1-nss1
377 BuildRequires: libxmlsec1-openssl1
379 BuildRequires: python%{python3_pkgversion}-CherryPy
380 BuildRequires: python%{python3_pkgversion}-PyJWT
381 BuildRequires: python%{python3_pkgversion}-Routes
382 BuildRequires: python%{python3_pkgversion}-Werkzeug
383 BuildRequires: python%{python3_pkgversion}-numpy-devel
384 BuildRequires: xmlsec1-devel
385 BuildRequires: xmlsec1-openssl-devel
388 # lttng and babeltrace for rbd-replay-prep
390 %if 0%{?fedora} || 0%{?rhel}
391 BuildRequires: lttng-ust-devel
392 BuildRequires: libbabeltrace-devel
394 %if 0%{?suse_version}
395 BuildRequires: lttng-ust-devel
396 BuildRequires: babeltrace-devel
399 %if 0%{?suse_version}
400 BuildRequires: libexpat-devel
402 %if 0%{?rhel} || 0%{?fedora}
403 BuildRequires: expat-devel
406 %if 0%{?fedora} || 0%{?rhel}
407 BuildRequires: redhat-rpm-config
410 %if 0%{?fedora} || 0%{?rhel}
411 BuildRequires: cryptopp-devel
412 BuildRequires: numactl-devel
413 BuildRequires: protobuf-compiler
415 %if 0%{?suse_version}
416 BuildRequires: libcryptopp-devel
417 BuildRequires: libnuma-devel
421 BuildRequires: /usr/bin/pathfix.py
425 Ceph is a massively scalable, open-source, distributed storage system that runs
426 on commodity hardware and delivers object, block and file system storage.
429 #################################################################################
431 #################################################################################
433 Summary: Ceph Base Package
434 %if 0%{?suse_version}
435 Group: System/Filesystems
437 Provides: ceph-test:/usr/bin/ceph-kvstore-tool
438 Requires: ceph-common = %{_epoch_prefix}%{version}-%{release}
439 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
440 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
441 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
442 Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
444 Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release}
453 Requires: python%{python3_pkgversion}-setuptools
457 %if 0%{?rhel} && 0%{?rhel} < 8
458 # The following is necessary due to tracker 36508 and can be removed once the
459 # associated upstream bugs are resolved.
460 %if 0%{with tcmalloc}
461 Requires: gperftools-libs >= 2.6.1
468 Base is the package that includes all the files shared amongst ceph servers
471 Summary: Utility to bootstrap Ceph clusters
474 Requires: python%{python3_pkgversion}
476 Recommends: podman >= 2.0.2
478 %description -n cephadm
479 Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed
480 with systemd and podman.
482 %package -n ceph-common
484 %if 0%{?suse_version}
485 Group: System/Filesystems
487 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
488 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
489 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
490 Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
491 Requires: python%{python3_pkgversion}-rbd = %{_epoch_prefix}%{version}-%{release}
492 Requires: python%{python3_pkgversion}-cephfs = %{_epoch_prefix}%{version}-%{release}
493 Requires: python%{python3_pkgversion}-rgw = %{_epoch_prefix}%{version}-%{release}
494 Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
495 Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
497 Requires: libjaeger = %{_epoch_prefix}%{version}-%{release}
499 %if 0%{?fedora} || 0%{?rhel}
500 Requires: python%{python3_pkgversion}-prettytable
502 %if 0%{?suse_version}
503 Requires: python%{python3_pkgversion}-PrettyTable
505 %if 0%{with libradosstriper}
506 Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
509 %if 0%{?suse_version}
510 Requires(pre): pwdutils
512 %description -n ceph-common
513 Common utilities to mount and interact with a ceph storage cluster.
514 Comprised of files that are common to Ceph clients and servers.
517 Summary: Ceph Metadata Server Daemon
518 %if 0%{?suse_version}
519 Group: System/Filesystems
521 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
523 ceph-mds is the metadata server daemon for the Ceph distributed file system.
524 One or more instances of ceph-mds collectively manage the file system
525 namespace, coordinating access to the shared OSD cluster.
528 Summary: Ceph Monitor Daemon
529 %if 0%{?suse_version}
530 Group: System/Filesystems
532 Provides: ceph-test:/usr/bin/ceph-monstore-tool
533 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
536 %if 0%{?suse_version}
537 Requires: smartmontools
539 Recommends: smartmontools
543 Requires: libjaeger = %{_epoch_prefix}%{version}-%{release}
546 ceph-mon is the cluster monitor daemon for the Ceph distributed file
547 system. One or more instances of ceph-mon form a Paxos part-time
548 parliament cluster that provides extremely reliable and durable storage
549 of cluster membership, configuration, and state.
552 Summary: Ceph Manager Daemon
553 %if 0%{?suse_version}
554 Group: System/Filesystems
556 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
557 Requires: ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release}
558 Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release}
560 Recommends: ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
561 Recommends: ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
562 Recommends: ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
563 Recommends: ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release}
564 Recommends: python%{python3_pkgversion}-influxdb
567 ceph-mgr enables python modules that provide services (such as the REST
568 module derived from Calamari) and expose CLI hooks. ceph-mgr gathers
569 the cluster maps, the daemon metadata, and performance counters, and
570 exposes all these to the python modules.
572 %package mgr-dashboard
573 Summary: Ceph Dashboard
575 %if 0%{?suse_version}
576 Group: System/Filesystems
578 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
579 Requires: ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release}
580 Requires: ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release}
581 %if 0%{?fedora} || 0%{?rhel}
582 Requires: python%{python3_pkgversion}-cherrypy
583 Requires: python%{python3_pkgversion}-jwt
584 Requires: python%{python3_pkgversion}-routes
585 Requires: python%{python3_pkgversion}-werkzeug
587 Recommends: python%{python3_pkgversion}-saml
590 %if 0%{?suse_version}
591 Requires: python%{python3_pkgversion}-CherryPy
592 Requires: python%{python3_pkgversion}-PyJWT
593 Requires: python%{python3_pkgversion}-Routes
594 Requires: python%{python3_pkgversion}-Werkzeug
595 Recommends: python%{python3_pkgversion}-python3-saml
597 %description mgr-dashboard
598 ceph-mgr-dashboard is a manager module, providing a web-based application
599 to monitor and manage many aspects of a Ceph cluster and related components.
600 See the Dashboard documentation at http://docs.ceph.com/ for details and a
601 detailed feature overview.
603 %package mgr-diskprediction-local
604 Summary: Ceph Manager module for predicting disk failures
606 %if 0%{?suse_version}
607 Group: System/Filesystems
609 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
610 Requires: python%{python3_pkgversion}-numpy
611 %if 0%{?fedora} || 0%{?suse_version}
612 Requires: python%{python3_pkgversion}-scikit-learn
614 Requires: python3-scipy
615 %description mgr-diskprediction-local
616 ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict
617 disk failures using local algorithms and machine-learning databases.
619 %package mgr-modules-core
620 Summary: Ceph Manager modules which are always enabled
622 %if 0%{?suse_version}
623 Group: System/Filesystems
625 Requires: python%{python3_pkgversion}-bcrypt
626 Requires: python%{python3_pkgversion}-pecan
627 Requires: python%{python3_pkgversion}-pyOpenSSL
628 Requires: python%{python3_pkgversion}-requests
629 Requires: python%{python3_pkgversion}-dateutil
630 %if 0%{?fedora} || 0%{?rhel} >= 8
631 Requires: python%{python3_pkgversion}-cherrypy
632 Requires: python%{python3_pkgversion}-pyyaml
633 Requires: python%{python3_pkgversion}-werkzeug
635 %if 0%{?suse_version}
636 Requires: python%{python3_pkgversion}-CherryPy
637 Requires: python%{python3_pkgversion}-PyYAML
638 Requires: python%{python3_pkgversion}-Werkzeug
641 Recommends: ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release}
643 %description mgr-modules-core
644 ceph-mgr-modules-core provides a set of modules which are always
649 Summary: Ceph Manager module for Rook-based orchestration
650 %if 0%{?suse_version}
651 Group: System/Filesystems
653 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
654 Requires: python%{python3_pkgversion}-kubernetes
655 Requires: python%{python3_pkgversion}-jsonpatch
656 %description mgr-rook
657 ceph-mgr-rook is a ceph-mgr module for orchestration functions using
660 %package mgr-k8sevents
662 Summary: Ceph Manager module to orchestrate ceph-events to kubernetes' events API
663 %if 0%{?suse_version}
664 Group: System/Filesystems
666 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
667 Requires: python%{python3_pkgversion}-kubernetes
668 %description mgr-k8sevents
669 ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events
670 to kubernetes' events API
673 Summary: Ceph Manager module for cephadm-based orchestration
675 %if 0%{?suse_version}
676 Group: System/Filesystems
678 Requires: ceph-mgr = %{_epoch_prefix}%{version}-%{release}
679 Requires: python%{python3_pkgversion}-remoto
680 Requires: cephadm = %{_epoch_prefix}%{version}-%{release}
681 %if 0%{?suse_version}
683 Requires: python%{python3_pkgversion}-Jinja2
685 %if 0%{?rhel} || 0%{?fedora}
686 Requires: openssh-clients
687 Requires: python%{python3_pkgversion}-jinja2
689 %description mgr-cephadm
690 ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using
691 the integrated cephadm deployment tool management operations.
694 Summary: Ceph fuse-based client
695 %if 0%{?suse_version}
696 Group: System/Filesystems
699 Requires: python%{python3_pkgversion}
701 FUSE based client for Ceph distributed network file system
703 %package -n cephfs-mirror
704 Summary: Ceph daemon for mirroring CephFS snapshots
705 %if 0%{?suse_version}
706 Group: System/Filesystems
708 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
709 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
710 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
711 %description -n cephfs-mirror
712 Daemon for mirroring CephFS snapshots between Ceph clusters.
715 Summary: Ceph fuse-based client
716 %if 0%{?suse_version}
717 Group: System/Filesystems
719 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
720 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
721 %description -n rbd-fuse
722 FUSE based client to map Ceph rbd images to files
724 %package -n rbd-mirror
725 Summary: Ceph daemon for mirroring RBD images
726 %if 0%{?suse_version}
727 Group: System/Filesystems
729 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
730 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
731 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
732 %description -n rbd-mirror
733 Daemon for mirroring RBD images between Ceph clusters, streaming
734 changes asynchronously.
736 %package immutable-object-cache
737 Summary: Ceph daemon for immutable object cache
738 %if 0%{?suse_version}
739 Group: System/Filesystems
741 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
742 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
743 %description immutable-object-cache
744 Daemon for immutable object cache.
747 Summary: Ceph RBD client base on NBD
748 %if 0%{?suse_version}
749 Group: System/Filesystems
751 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
752 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
753 %description -n rbd-nbd
754 NBD based client to map Ceph rbd images to local device
757 Summary: Rados REST gateway
758 %if 0%{?suse_version}
759 Group: System/Filesystems
761 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
763 Requires: ceph-selinux = %{_epoch_prefix}%{version}-%{release}
765 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
766 Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
767 %if 0%{?rhel} || 0%{?fedora}
774 RADOS is a distributed object store used by the Ceph distributed
775 storage system. This package provides a REST gateway to the
776 object store that aims to implement a superset of Amazon's S3
777 service as well as the OpenStack Object Storage ("Swift") API.
779 %package -n cephfs-top
780 Summary: top(1) like utility for Ceph Filesystem
782 Requires: python%{python3_pkgversion}-rados
783 %description -n cephfs-top
784 This package provides a top(1) like utility to display Ceph Filesystem metrics
788 %package resource-agents
789 Summary: OCF-compliant resource agents for Ceph daemons
791 %if 0%{?suse_version}
792 Group: System/Filesystems
794 Requires: ceph-base = %{_epoch_prefix}%{version}
795 Requires: resource-agents
796 %description resource-agents
797 Resource agents for monitoring and managing Ceph daemons
798 under Open Cluster Framework (OCF) compliant resource
799 managers such as Pacemaker.
803 Summary: Ceph Object Storage Daemon
804 %if 0%{?suse_version}
805 Group: System/Filesystems
807 Provides: ceph-test:/usr/bin/ceph-osdomap-tool
808 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
811 Requires: libstoragemgmt
812 Requires: python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
815 %if 0%{?suse_version}
816 Requires: smartmontools
818 Recommends: smartmontools
822 ceph-osd is the object storage daemon for the Ceph distributed file
823 system. It is responsible for storing objects on a local file system
824 and providing access to them over the network.
828 Summary: Ceph Object Storage Daemon (crimson)
829 %if 0%{?suse_version}
830 Group: System/Filesystems
832 Requires: ceph-osd = %{_epoch_prefix}%{version}-%{release}
834 %description crimson-osd
835 crimson-osd is the object storage daemon for the Ceph distributed file
836 system. It is responsible for storing objects on a local file system
837 and providing access to them over the network.
840 %package -n librados2
841 Summary: RADOS distributed object store client library
842 %if 0%{?suse_version}
843 Group: System/Libraries
845 %if 0%{?rhel} || 0%{?fedora}
846 Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
848 %description -n librados2
849 RADOS is a reliable, autonomic distributed object storage cluster
850 developed as part of the Ceph distributed storage system. This is a
851 shared library allowing applications to access the distributed object
852 store using a simple file-like interface.
854 %package -n librados-devel
855 Summary: RADOS headers
856 %if 0%{?suse_version}
857 Group: Development/Libraries/C and C++
859 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
860 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
861 Provides: librados2-devel = %{_epoch_prefix}%{version}-%{release}
862 Obsoletes: librados2-devel < %{_epoch_prefix}%{version}-%{release}
863 %description -n librados-devel
864 This package contains C libraries and headers needed to develop programs
865 that use RADOS object store.
867 %package -n libradospp-devel
868 Summary: RADOS headers
869 %if 0%{?suse_version}
870 Group: Development/Libraries/C and C++
872 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
873 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
874 %description -n libradospp-devel
875 This package contains C++ libraries and headers needed to develop programs
876 that use RADOS object store.
879 Summary: RADOS gateway client library
880 %if 0%{?suse_version}
881 Group: System/Libraries
883 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
884 %description -n librgw2
885 This package provides a library implementation of the RADOS gateway
886 (distributed object store with S3 and Swift personalities).
888 %package -n librgw-devel
889 Summary: RADOS gateway client library
890 %if 0%{?suse_version}
891 Group: Development/Libraries/C and C++
893 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
894 Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
895 Provides: librgw2-devel = %{_epoch_prefix}%{version}-%{release}
896 Obsoletes: librgw2-devel < %{_epoch_prefix}%{version}-%{release}
897 %description -n librgw-devel
898 This package contains libraries and headers needed to develop programs
899 that use RADOS gateway client library.
901 %package -n python%{python3_pkgversion}-rgw
902 Summary: Python 3 libraries for the RADOS gateway
903 %if 0%{?suse_version}
904 Group: Development/Libraries/Python
906 Requires: librgw2 = %{_epoch_prefix}%{version}-%{release}
907 Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
908 %{?python_provide:%python_provide python%{python3_pkgversion}-rgw}
909 Provides: python-rgw = %{_epoch_prefix}%{version}-%{release}
910 Obsoletes: python-rgw < %{_epoch_prefix}%{version}-%{release}
911 %description -n python%{python3_pkgversion}-rgw
912 This package contains Python 3 libraries for interacting with Ceph RADOS
915 %package -n python%{python3_pkgversion}-rados
916 Summary: Python 3 libraries for the RADOS object store
917 %if 0%{?suse_version}
918 Group: Development/Libraries/Python
920 Requires: python%{python3_pkgversion}
921 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
922 %{?python_provide:%python_provide python%{python3_pkgversion}-rados}
923 Provides: python-rados = %{_epoch_prefix}%{version}-%{release}
924 Obsoletes: python-rados < %{_epoch_prefix}%{version}-%{release}
925 %description -n python%{python3_pkgversion}-rados
926 This package contains Python 3 libraries for interacting with Ceph RADOS
929 %package -n libcephsqlite
930 Summary: SQLite3 VFS for Ceph
931 %if 0%{?suse_version}
932 Group: System/Libraries
934 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
935 %description -n libcephsqlite
936 A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
937 distributed object store.
939 %package -n libcephsqlite-devel
940 Summary: SQLite3 VFS for Ceph headers
941 %if 0%{?suse_version}
942 Group: Development/Libraries/C and C++
944 Requires: sqlite-devel
945 Requires: libcephsqlite = %{_epoch_prefix}%{version}-%{release}
946 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
947 Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
948 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
949 Provides: libcephsqlite-devel = %{_epoch_prefix}%{version}-%{release}
950 Obsoletes: libcephsqlite-devel < %{_epoch_prefix}%{version}-%{release}
951 %description -n libcephsqlite-devel
952 A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
953 distributed object store.
955 %if 0%{with libradosstriper}
956 %package -n libradosstriper1
957 Summary: RADOS striping interface
958 %if 0%{?suse_version}
959 Group: System/Libraries
961 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
962 %description -n libradosstriper1
963 Striping interface built on top of the rados library, allowing
964 to stripe bigger objects onto several standard rados objects using
965 an interface very similar to the rados one.
967 %package -n libradosstriper-devel
968 Summary: RADOS striping interface headers
969 %if 0%{?suse_version}
970 Group: Development/Libraries/C and C++
972 Requires: libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
973 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
974 Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
975 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
976 Provides: libradosstriper1-devel = %{_epoch_prefix}%{version}-%{release}
977 Obsoletes: libradosstriper1-devel < %{_epoch_prefix}%{version}-%{release}
978 %description -n libradosstriper-devel
979 This package contains libraries and headers needed to develop programs
980 that use RADOS striping interface.
984 Summary: RADOS block device client library
985 %if 0%{?suse_version}
986 Group: System/Libraries
988 Requires: librados2 = %{_epoch_prefix}%{version}-%{release}
989 %if 0%{?suse_version}
990 Requires(post): coreutils
992 %if 0%{?rhel} || 0%{?fedora}
993 Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
995 %description -n librbd1
996 RBD is a block device striped across multiple distributed objects in
997 RADOS, a reliable, autonomic distributed object storage cluster
998 developed as part of the Ceph distributed storage system. This is a
999 shared library allowing applications to manage these block devices.
1001 %package -n librbd-devel
1002 Summary: RADOS block device headers
1003 %if 0%{?suse_version}
1004 Group: Development/Libraries/C and C++
1006 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
1007 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
1008 Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
1009 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
1010 Provides: librbd1-devel = %{_epoch_prefix}%{version}-%{release}
1011 Obsoletes: librbd1-devel < %{_epoch_prefix}%{version}-%{release}
1012 %description -n librbd-devel
1013 This package contains libraries and headers needed to develop programs
1014 that use RADOS block device.
1016 %package -n python%{python3_pkgversion}-rbd
1017 Summary: Python 3 libraries for the RADOS block device
1018 %if 0%{?suse_version}
1019 Group: Development/Libraries/Python
1021 Requires: librbd1 = %{_epoch_prefix}%{version}-%{release}
1022 Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
1023 %{?python_provide:%python_provide python%{python3_pkgversion}-rbd}
1024 Provides: python-rbd = %{_epoch_prefix}%{version}-%{release}
1025 Obsoletes: python-rbd < %{_epoch_prefix}%{version}-%{release}
1026 %description -n python%{python3_pkgversion}-rbd
1027 This package contains Python 3 libraries for interacting with Ceph RADOS
1030 %package -n libcephfs2
1031 Summary: Ceph distributed file system client library
1032 %if 0%{?suse_version}
1033 Group: System/Libraries
1035 Obsoletes: libcephfs1 < %{_epoch_prefix}%{version}-%{release}
1036 %if 0%{?rhel} || 0%{?fedora}
1037 Obsoletes: ceph-libs < %{_epoch_prefix}%{version}-%{release}
1038 Obsoletes: ceph-libcephfs
1040 %description -n libcephfs2
1041 Ceph is a distributed network file system designed to provide excellent
1042 performance, reliability, and scalability. This is a shared library
1043 allowing applications to access a Ceph distributed file system via a
1044 POSIX-like interface.
1046 %package -n libcephfs-devel
1047 Summary: Ceph distributed file system headers
1048 %if 0%{?suse_version}
1049 Group: Development/Libraries/C and C++
1051 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1052 Requires: librados-devel = %{_epoch_prefix}%{version}-%{release}
1053 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
1054 Provides: libcephfs2-devel = %{_epoch_prefix}%{version}-%{release}
1055 Obsoletes: libcephfs2-devel < %{_epoch_prefix}%{version}-%{release}
1056 %description -n libcephfs-devel
1057 This package contains libraries and headers needed to develop programs
1058 that use Ceph distributed file system.
1061 %package -n libjaeger
1062 Summary: Ceph distributed file system tracing library
1063 %if 0%{?suse_version}
1064 Group: System/Libraries
1066 Provides: libjaegertracing.so.0()(64bit)
1067 Provides: libopentracing.so.1()(64bit)
1068 Provides: libthrift.so.0.13.0()(64bit)
1069 %description -n libjaeger
1070 This package contains libraries needed to provide distributed
1074 %package -n python%{python3_pkgversion}-cephfs
1075 Summary: Python 3 libraries for Ceph distributed file system
1076 %if 0%{?suse_version}
1077 Group: Development/Libraries/Python
1079 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1080 Requires: python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
1081 Requires: python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
1082 %{?python_provide:%python_provide python%{python3_pkgversion}-cephfs}
1083 Provides: python-cephfs = %{_epoch_prefix}%{version}-%{release}
1084 Obsoletes: python-cephfs < %{_epoch_prefix}%{version}-%{release}
1085 %description -n python%{python3_pkgversion}-cephfs
1086 This package contains Python 3 libraries for interacting with Ceph distributed
1089 %package -n python%{python3_pkgversion}-ceph-argparse
1090 Summary: Python 3 utility libraries for Ceph CLI
1091 %if 0%{?suse_version}
1092 Group: Development/Libraries/Python
1094 %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-argparse}
1095 %description -n python%{python3_pkgversion}-ceph-argparse
1096 This package contains types and routines for Python 3 used by the Ceph CLI as
1097 well as the RESTful interface. These have to do with querying the daemons for
1098 command-description information, validating user command input against those
1099 descriptions, and submitting the command to the appropriate daemon.
1101 %package -n python%{python3_pkgversion}-ceph-common
1102 Summary: Python 3 utility libraries for Ceph
1103 %if 0%{?fedora} || 0%{?rhel} >= 8
1104 Requires: python%{python3_pkgversion}-pyyaml
1106 %if 0%{?suse_version}
1107 Requires: python%{python3_pkgversion}-PyYAML
1109 %if 0%{?suse_version}
1110 Group: Development/Libraries/Python
1112 %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common}
1113 %description -n python%{python3_pkgversion}-ceph-common
1114 This package contains data structures, classes and functions used by Ceph.
1115 It also contains utilities used for the cephadm orchestrator.
1117 %if 0%{with cephfs_shell}
1118 %package -n cephfs-shell
1119 Summary: Interactive shell for Ceph file system
1120 Requires: python%{python3_pkgversion}-cmd2
1121 Requires: python%{python3_pkgversion}-colorama
1122 Requires: python%{python3_pkgversion}-cephfs
1123 %description -n cephfs-shell
1124 This package contains an interactive tool that allows accessing a Ceph
1125 file system without mounting it by providing a nice pseudo-shell which
1126 works like an FTP client.
1129 %if 0%{with ceph_test_package}
1130 %package -n ceph-test
1131 Summary: Ceph benchmarks and test tools
1132 %if 0%{?suse_version}
1133 Group: System/Benchmark
1135 Requires: ceph-common = %{_epoch_prefix}%{version}-%{release}
1136 Requires: xmlstarlet
1139 %description -n ceph-test
1140 This package contains Ceph benchmarks and test tools.
1143 %if 0%{with cephfs_java}
1145 %package -n libcephfs_jni1
1146 Summary: Java Native Interface library for CephFS Java bindings
1147 %if 0%{?suse_version}
1148 Group: System/Libraries
1151 Requires: libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1152 %description -n libcephfs_jni1
1153 This package contains the Java Native Interface library for CephFS Java
1156 %package -n libcephfs_jni-devel
1157 Summary: Development files for CephFS Java Native Interface library
1158 %if 0%{?suse_version}
1159 Group: Development/Libraries/Java
1162 Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
1163 Obsoletes: ceph-devel < %{_epoch_prefix}%{version}-%{release}
1164 Provides: libcephfs_jni1-devel = %{_epoch_prefix}%{version}-%{release}
1165 Obsoletes: libcephfs_jni1-devel < %{_epoch_prefix}%{version}-%{release}
1166 %description -n libcephfs_jni-devel
1167 This package contains the development files for CephFS Java Native Interface
1170 %package -n cephfs-java
1171 Summary: Java libraries for the Ceph File System
1172 %if 0%{?suse_version}
1173 Group: System/Libraries
1176 Requires: libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
1178 BuildRequires: junit
1179 %description -n cephfs-java
1180 This package contains the Java libraries for the Ceph File System.
1184 %package -n rados-objclass-devel
1185 Summary: RADOS object class development kit
1186 %if 0%{?suse_version}
1187 Group: Development/Libraries/C and C++
1189 Requires: libradospp-devel = %{_epoch_prefix}%{version}-%{release}
1190 %description -n rados-objclass-devel
1191 This package contains libraries and headers needed to develop RADOS object
1194 %if 0%{with selinux}
1197 Summary: SELinux support for Ceph MON, OSD and MDS
1198 %if 0%{?suse_version}
1199 Group: System/Filesystems
1201 Requires: ceph-base = %{_epoch_prefix}%{version}-%{release}
1202 Requires: policycoreutils, libselinux-utils
1203 Requires(post): ceph-base = %{_epoch_prefix}%{version}-%{release}
1204 Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk
1205 Requires(postun): policycoreutils
1206 %description selinux
1207 This package contains SELinux support for Ceph MON, OSD and MDS. The package
1208 also performs file-system relabelling which can take a long time on heavily
1209 populated file-systems.
1213 %package grafana-dashboards
1214 Summary: The set of Grafana dashboards for monitoring purposes
1216 %if 0%{?suse_version}
1217 Group: System/Filesystems
1219 %description grafana-dashboards
1220 This package provides a set of Grafana dashboards for monitoring of
1221 Ceph clusters. The dashboards require a Prometheus server setup
1222 collecting data from Ceph Manager "prometheus" module and Prometheus
1223 project "node_exporter" module. The dashboards are designed to be
1224 integrated with the Ceph Manager Dashboard web UI.
1226 %package prometheus-alerts
1227 Summary: Prometheus alerts for a Ceph deployment
1229 Group: System/Monitoring
1230 %description prometheus-alerts
1231 This package provides Ceph default alerts for Prometheus.
1233 #################################################################################
1235 #################################################################################
1237 %autosetup -p1 -n @TARBALL_BASENAME@
1240 # LTO can be enabled as soon as the following GCC bug is fixed:
1241 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200
1242 %define _lto_cflags %{nil}
1244 %if 0%{with seastar} && 0%{?rhel}
1245 . /opt/rh/gcc-toolset-9/enable
1248 %if 0%{with cephfs_java}
1250 for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do
1251 [ -d $i ] && java_inc="$java_inc -I$i"
1255 %if 0%{?suse_version}
1256 %limit_build -m 2600
1259 export CPPFLAGS="$java_inc"
1260 export CFLAGS="$RPM_OPT_FLAGS"
1261 export CXXFLAGS="$RPM_OPT_FLAGS"
1262 export LDFLAGS="$RPM_LD_FLAGS"
1264 %if 0%{with seastar}
1265 # seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk()
1266 export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g')
1271 %{?!_vpath_builddir:%global _vpath_builddir %{_target_platform}}
1273 # TODO: drop this step once we can use `cmake -B`
1274 mkdir -p %{_vpath_builddir}
1275 pushd %{_vpath_builddir}
1277 -DCMAKE_INSTALL_PREFIX=%{_prefix} \
1278 -DCMAKE_INSTALL_LIBDIR:PATH=%{_libdir} \
1279 -DCMAKE_INSTALL_LIBEXECDIR:PATH=%{_libexecdir} \
1280 -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=%{_localstatedir} \
1281 -DCMAKE_INSTALL_SYSCONFDIR:PATH=%{_sysconfdir} \
1282 -DCMAKE_INSTALL_MANDIR:PATH=%{_mandir} \
1283 -DCMAKE_INSTALL_DOCDIR:PATH=%{_docdir}/ceph \
1284 -DCMAKE_INSTALL_INCLUDEDIR:PATH=%{_includedir} \
1285 -DCMAKE_INSTALL_SYSTEMD_SERVICEDIR:PATH=%{_unitdir} \
1286 -DWITH_MANPAGE:BOOL=ON \
1287 -DWITH_PYTHON3:STRING=%{python3_version} \
1288 -DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \
1289 %if 0%{without ceph_test_package}
1290 -DWITH_TESTS:BOOL=OFF \
1292 %if 0%{with cephfs_java}
1293 -DWITH_CEPHFS_JAVA:BOOL=ON \
1295 %if 0%{with selinux}
1296 -DWITH_SELINUX:BOOL=ON \
1299 -DWITH_LTTNG:BOOL=ON \
1300 -DWITH_BABELTRACE:BOOL=ON \
1302 -DWITH_LTTNG:BOOL=OFF \
1303 -DWITH_BABELTRACE:BOOL=OFF \
1305 $CEPH_EXTRA_CMAKE_ARGS \
1307 -DWITH_OCF:BOOL=ON \
1309 %if 0%{with cephfs_shell}
1310 -DWITH_CEPHFS_SHELL:BOOL=ON \
1312 %if 0%{with libradosstriper}
1313 -DWITH_LIBRADOSSTRIPER:BOOL=ON \
1315 -DWITH_LIBRADOSSTRIPER:BOOL=OFF \
1317 %if 0%{with amqp_endpoint}
1318 -DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=ON \
1320 -DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=OFF \
1322 %if 0%{with kafka_endpoint}
1323 -DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=ON \
1325 -DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=OFF \
1327 %if 0%{without lua_packages}
1328 -DWITH_RADOSGW_LUA_PACKAGES:BOOL=OFF \
1331 -DWITH_ZBD:BOOL=ON \
1333 %if 0%{with cmake_verbose_logging}
1334 -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
1336 %if 0%{with rbd_rwl_cache}
1337 -DWITH_RBD_RWL:BOOL=ON \
1339 %if 0%{with rbd_ssd_cache}
1340 -DWITH_RBD_SSD_CACHE:BOOL=ON \
1342 %if 0%{with system_pmdk}
1343 -DWITH_SYSTEM_PMDK:BOOL=ON \
1345 %if 0%{?suse_version}
1346 -DBOOST_J:STRING=%{jobs} \
1348 -DBOOST_J:STRING=%{_smp_build_ncpus} \
1350 -DWITH_GRAFANA:BOOL=ON
1352 %if %{with cmake_verbose_logging}
1353 cat ./CMakeFiles/CMakeOutput.log
1354 cat ./CMakeFiles/CMakeError.log
1357 %if 0%{?suse_version}
1365 %if 0%{with make_check}
1367 # run in-tree unittests
1368 pushd %{_vpath_builddir}
1369 ctest %{_smp_mflags}
1375 pushd %{_vpath_builddir}
1377 # we have dropped sysvinit bits
1378 rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph
1381 %if 0%{with seastar}
1382 # package crimson-osd with the name of ceph-osd
1383 install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd
1386 install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
1387 %if 0%{?fedora} || 0%{?rhel}
1388 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
1390 %if 0%{?suse_version}
1391 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name}
1393 install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf
1394 install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset
1395 mkdir -p %{buildroot}%{_sbindir}
1396 install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph
1397 chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf
1398 install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING
1399 install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf
1400 install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
1402 install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm
1403 mkdir -p %{buildroot}%{_sharedstatedir}/cephadm
1404 chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm
1405 mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh
1406 chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh
1407 touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
1408 chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
1410 # firewall templates and /sbin/mount.ceph symlink
1411 %if 0%{?suse_version} && !0%{?usrmerged}
1412 mkdir -p %{buildroot}/sbin
1413 ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph
1417 install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules
1420 install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl
1423 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/*
1424 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/*
1427 #set up placeholder directories
1428 mkdir -p %{buildroot}%{_sysconfdir}/ceph
1429 mkdir -p %{buildroot}%{_localstatedir}/run/ceph
1430 mkdir -p %{buildroot}%{_localstatedir}/log/ceph
1431 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp
1432 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon
1433 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd
1434 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds
1435 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr
1436 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash
1437 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted
1438 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw
1439 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd
1440 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds
1441 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw
1442 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr
1443 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd
1444 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
1447 install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml
1449 %if 0%{?suse_version}
1450 # create __pycache__ directories and their contents
1451 %py3_compile %{buildroot}%{python3_sitelib}
1452 # hardlink duplicate files under /usr to save space
1453 %fdupes %{buildroot}%{_prefix}
1457 %py_byte_compile %{__python3} %{buildroot}%{python3_sitelib}
1463 #################################################################################
1464 # files and systemd scriptlets
1465 #################################################################################
1469 %{_bindir}/ceph-crash
1470 %{_bindir}/crushtool
1471 %{_bindir}/monmaptool
1472 %{_bindir}/osdmaptool
1473 %{_bindir}/ceph-kvstore-tool
1475 %{_presetdir}/50-ceph.preset
1476 %{_sbindir}/ceph-create-keys
1477 %dir %{_libexecdir}/ceph
1478 %{_libexecdir}/ceph/ceph_common.sh
1479 %dir %{_libdir}/rados-classes
1480 %{_libdir}/rados-classes/*
1481 %dir %{_libdir}/ceph
1482 %dir %{_libdir}/ceph/erasure-code
1483 %{_libdir}/ceph/erasure-code/libec_*.so*
1484 %dir %{_libdir}/ceph/compressor
1485 %{_libdir}/ceph/compressor/libceph_*.so*
1486 %{_unitdir}/ceph-crash.service
1487 %dir %{_libdir}/ceph/crypto
1488 %{_libdir}/ceph/crypto/libceph_*.so*
1490 %{_libdir}/libos_tp.so*
1491 %{_libdir}/libosd_tp.so*
1493 %config(noreplace) %{_sysconfdir}/logrotate.d/ceph
1494 %if 0%{?fedora} || 0%{?rhel}
1495 %config(noreplace) %{_sysconfdir}/sysconfig/ceph
1497 %if 0%{?suse_version}
1498 %{_fillupdir}/sysconfig.*
1500 %{_unitdir}/ceph.target
1501 %dir %{python3_sitelib}/ceph_volume
1502 %{python3_sitelib}/ceph_volume/*
1503 %{python3_sitelib}/ceph_volume-*
1504 %{_mandir}/man8/ceph-deploy.8*
1505 %{_mandir}/man8/ceph-create-keys.8*
1506 %{_mandir}/man8/ceph-run.8*
1507 %{_mandir}/man8/crushtool.8*
1508 %{_mandir}/man8/osdmaptool.8*
1509 %{_mandir}/man8/monmaptool.8*
1510 %{_mandir}/man8/ceph-kvstore-tool.8*
1511 #set up placeholder directories
1512 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash
1513 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted
1514 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp
1515 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd
1516 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds
1517 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw
1518 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr
1519 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd
1520 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
1524 %if 0%{?suse_version}
1526 if [ $1 -eq 1 ] ; then
1527 /usr/bin/systemctl preset ceph.target ceph-crash.service >/dev/null 2>&1 || :
1530 %if 0%{?fedora} || 0%{?rhel}
1531 %systemd_post ceph.target ceph-crash.service
1533 if [ $1 -eq 1 ] ; then
1534 /usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || :
1538 %if 0%{?suse_version}
1539 %service_del_preun ceph.target ceph-crash.service
1541 %if 0%{?fedora} || 0%{?rhel}
1542 %systemd_preun ceph.target ceph-crash.service
1547 %systemd_postun ceph.target
1550 getent group cephadm >/dev/null || groupadd -r cephadm
1551 getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm
1554 %if ! 0%{?suse_version}
1556 userdel -r cephadm || true
1562 %{_mandir}/man8/cephadm.8*
1563 %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm
1564 %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh
1565 %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
1568 %dir %{_docdir}/ceph
1569 %doc %{_docdir}/ceph/sample.ceph.conf
1570 %license %{_docdir}/ceph/COPYING
1572 %{_bindir}/ceph-authtool
1573 %{_bindir}/ceph-conf
1574 %{_bindir}/ceph-dencoder
1575 %{_bindir}/ceph-rbdnamer
1577 %{_bindir}/cephfs-data-scan
1578 %{_bindir}/cephfs-journal-tool
1579 %{_bindir}/cephfs-table-tool
1581 %{_bindir}/radosgw-admin
1583 %{_bindir}/rbd-replay
1584 %{_bindir}/rbd-replay-many
1586 %{_sbindir}/mount.ceph
1587 %if 0%{?suse_version} && !0%{?usrmerged}
1591 %{_bindir}/rbd-replay-prep
1593 %{_bindir}/ceph-post-file
1594 %dir %{_libdir}/ceph/denc
1595 %{_libdir}/ceph/denc/denc-mod-*.so
1596 %{_tmpfilesdir}/ceph-common.conf
1597 %{_mandir}/man8/ceph-authtool.8*
1598 %{_mandir}/man8/ceph-conf.8*
1599 %{_mandir}/man8/ceph-dencoder.8*
1600 %{_mandir}/man8/ceph-diff-sorted.8*
1601 %{_mandir}/man8/ceph-rbdnamer.8*
1602 %{_mandir}/man8/ceph-syn.8*
1603 %{_mandir}/man8/ceph-post-file.8*
1604 %{_mandir}/man8/ceph.8*
1605 %{_mandir}/man8/mount.ceph.8*
1606 %{_mandir}/man8/rados.8*
1607 %{_mandir}/man8/radosgw-admin.8*
1608 %{_mandir}/man8/rbd.8*
1609 %{_mandir}/man8/rbdmap.8*
1610 %{_mandir}/man8/rbd-replay.8*
1611 %{_mandir}/man8/rbd-replay-many.8*
1612 %{_mandir}/man8/rbd-replay-prep.8*
1613 %{_mandir}/man8/rgw-orphan-list.8*
1614 %dir %{_datadir}/ceph/
1615 %{_datadir}/ceph/known_hosts_drop.ceph.com
1616 %{_datadir}/ceph/id_rsa_drop.ceph.com
1617 %{_datadir}/ceph/id_rsa_drop.ceph.com.pub
1618 %dir %{_sysconfdir}/ceph/
1619 %config %{_sysconfdir}/bash_completion.d/ceph
1620 %config %{_sysconfdir}/bash_completion.d/rados
1621 %config %{_sysconfdir}/bash_completion.d/rbd
1622 %config %{_sysconfdir}/bash_completion.d/radosgw-admin
1623 %config(noreplace) %{_sysconfdir}/ceph/rbdmap
1624 %{_unitdir}/rbdmap.service
1625 %dir %{_udevrulesdir}
1626 %{_udevrulesdir}/50-rbd.rules
1627 %attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/
1628 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/
1633 %if 0%{?rhel} || 0%{?fedora}
1634 /usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || :
1635 /usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
1637 %if 0%{?suse_version}
1638 if ! getent group ceph >/dev/null ; then
1639 CEPH_GROUP_ID_OPTION=""
1640 getent group $CEPH_GROUP_ID >/dev/null || CEPH_GROUP_ID_OPTION="-g $CEPH_GROUP_ID"
1641 groupadd ceph $CEPH_GROUP_ID_OPTION -r 2>/dev/null || :
1643 if ! getent passwd ceph >/dev/null ; then
1644 CEPH_USER_ID_OPTION=""
1645 getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
1646 useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin 2>/dev/null || :
1648 usermod -c "Ceph storage service" \
1649 -d %{_localstatedir}/lib/ceph \
1657 %tmpfiles_create %{_tmpfilesdir}/ceph-common.conf
1660 # Package removal cleanup
1661 if [ "$1" -eq "0" ] ; then
1662 rm -rf %{_localstatedir}/log/ceph
1663 rm -rf %{_sysconfdir}/ceph
1668 %{_mandir}/man8/ceph-mds.8*
1669 %{_unitdir}/ceph-mds@.service
1670 %{_unitdir}/ceph-mds.target
1671 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds
1674 %if 0%{?suse_version}
1675 if [ $1 -eq 1 ] ; then
1676 /usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || :
1679 %if 0%{?fedora} || 0%{?rhel}
1680 %systemd_post ceph-mds@\*.service ceph-mds.target
1682 if [ $1 -eq 1 ] ; then
1683 /usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || :
1687 %if 0%{?suse_version}
1688 %service_del_preun ceph-mds@\*.service ceph-mds.target
1690 %if 0%{?fedora} || 0%{?rhel}
1691 %systemd_preun ceph-mds@\*.service ceph-mds.target
1695 %systemd_postun ceph-mds@\*.service ceph-mds.target
1696 if [ $1 -ge 1 ] ; then
1697 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1698 # "yes". In any case: if units are not running, do not touch them.
1699 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1700 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1701 source $SYSCONF_CEPH
1703 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1704 /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || :
1710 %dir %{_datadir}/ceph/mgr
1711 %{_datadir}/ceph/mgr/mgr_module.*
1712 %{_datadir}/ceph/mgr/mgr_util.*
1713 %{_unitdir}/ceph-mgr@.service
1714 %{_unitdir}/ceph-mgr.target
1715 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr
1718 %if 0%{?suse_version}
1719 if [ $1 -eq 1 ] ; then
1720 /usr/bin/systemctl preset ceph-mgr@\*.service ceph-mgr.target >/dev/null 2>&1 || :
1723 %if 0%{?fedora} || 0%{?rhel}
1724 %systemd_post ceph-mgr@\*.service ceph-mgr.target
1726 if [ $1 -eq 1 ] ; then
1727 /usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || :
1731 %if 0%{?suse_version}
1732 %service_del_preun ceph-mgr@\*.service ceph-mgr.target
1734 %if 0%{?fedora} || 0%{?rhel}
1735 %systemd_preun ceph-mgr@\*.service ceph-mgr.target
1739 %systemd_postun ceph-mgr@\*.service ceph-mgr.target
1740 if [ $1 -ge 1 ] ; then
1741 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1742 # "yes". In any case: if units are not running, do not touch them.
1743 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1744 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1745 source $SYSCONF_CEPH
1747 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1748 /usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || :
1752 %files mgr-dashboard
1753 %{_datadir}/ceph/mgr/dashboard
1756 if [ $1 -eq 1 ] ; then
1757 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1760 %postun mgr-dashboard
1761 if [ $1 -eq 1 ] ; then
1762 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1765 %files mgr-diskprediction-local
1766 %{_datadir}/ceph/mgr/diskprediction_local
1768 %post mgr-diskprediction-local
1769 if [ $1 -eq 1 ] ; then
1770 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1773 %postun mgr-diskprediction-local
1774 if [ $1 -eq 1 ] ; then
1775 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1778 %files mgr-modules-core
1779 %dir %{_datadir}/ceph/mgr
1780 %{_datadir}/ceph/mgr/alerts
1781 %{_datadir}/ceph/mgr/balancer
1782 %{_datadir}/ceph/mgr/crash
1783 %{_datadir}/ceph/mgr/devicehealth
1784 %{_datadir}/ceph/mgr/influx
1785 %{_datadir}/ceph/mgr/insights
1786 %{_datadir}/ceph/mgr/iostat
1787 %{_datadir}/ceph/mgr/localpool
1788 %{_datadir}/ceph/mgr/mds_autoscaler
1789 %{_datadir}/ceph/mgr/mirroring
1790 %{_datadir}/ceph/mgr/nfs
1791 %{_datadir}/ceph/mgr/orchestrator
1792 %{_datadir}/ceph/mgr/osd_perf_query
1793 %{_datadir}/ceph/mgr/osd_support
1794 %{_datadir}/ceph/mgr/pg_autoscaler
1795 %{_datadir}/ceph/mgr/progress
1796 %{_datadir}/ceph/mgr/prometheus
1797 %{_datadir}/ceph/mgr/rbd_support
1798 %{_datadir}/ceph/mgr/restful
1799 %{_datadir}/ceph/mgr/selftest
1800 %{_datadir}/ceph/mgr/snap_schedule
1801 %{_datadir}/ceph/mgr/stats
1802 %{_datadir}/ceph/mgr/status
1803 %{_datadir}/ceph/mgr/telegraf
1804 %{_datadir}/ceph/mgr/telemetry
1805 %{_datadir}/ceph/mgr/test_orchestrator
1806 %{_datadir}/ceph/mgr/volumes
1807 %{_datadir}/ceph/mgr/zabbix
1810 %{_datadir}/ceph/mgr/rook
1813 if [ $1 -eq 1 ] ; then
1814 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1818 if [ $1 -eq 1 ] ; then
1819 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1822 %files mgr-k8sevents
1823 %{_datadir}/ceph/mgr/k8sevents
1826 if [ $1 -eq 1 ] ; then
1827 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1830 %postun mgr-k8sevents
1831 if [ $1 -eq 1 ] ; then
1832 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1836 %{_datadir}/ceph/mgr/cephadm
1839 if [ $1 -eq 1 ] ; then
1840 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1844 if [ $1 -eq 1 ] ; then
1845 /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1850 %{_bindir}/ceph-monstore-tool
1851 %{_mandir}/man8/ceph-mon.8*
1852 %{_unitdir}/ceph-mon@.service
1853 %{_unitdir}/ceph-mon.target
1854 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon
1857 %if 0%{?suse_version}
1858 if [ $1 -eq 1 ] ; then
1859 /usr/bin/systemctl preset ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || :
1862 %if 0%{?fedora} || 0%{?rhel}
1863 %systemd_post ceph-mon@\*.service ceph-mon.target
1865 if [ $1 -eq 1 ] ; then
1866 /usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || :
1870 %if 0%{?suse_version}
1871 %service_del_preun ceph-mon@\*.service ceph-mon.target
1873 %if 0%{?fedora} || 0%{?rhel}
1874 %systemd_preun ceph-mon@\*.service ceph-mon.target
1878 %systemd_postun ceph-mon@\*.service ceph-mon.target
1879 if [ $1 -ge 1 ] ; then
1880 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1881 # "yes". In any case: if units are not running, do not touch them.
1882 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1883 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1884 source $SYSCONF_CEPH
1886 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1887 /usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || :
1892 %{_bindir}/ceph-fuse
1893 %{_mandir}/man8/ceph-fuse.8*
1894 %{_sbindir}/mount.fuse.ceph
1895 %{_mandir}/man8/mount.fuse.ceph.8*
1896 %{_unitdir}/ceph-fuse@.service
1897 %{_unitdir}/ceph-fuse.target
1899 %files -n cephfs-mirror
1900 %{_bindir}/cephfs-mirror
1901 %{_mandir}/man8/cephfs-mirror.8*
1902 %{_unitdir}/cephfs-mirror@.service
1903 %{_unitdir}/cephfs-mirror.target
1905 %post -n cephfs-mirror
1906 %if 0%{?suse_version}
1907 if [ $1 -eq 1 ] ; then
1908 /usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || :
1911 %if 0%{?fedora} || 0%{?rhel}
1912 %systemd_post cephfs-mirror@\*.service cephfs-mirror.target
1914 if [ $1 -eq 1 ] ; then
1915 /usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || :
1918 %preun -n cephfs-mirror
1919 %if 0%{?suse_version}
1920 %service_del_preun cephfs-mirror@\*.service cephfs-mirror.target
1922 %if 0%{?fedora} || 0%{?rhel}
1923 %systemd_preun cephfs-mirror@\*.service cephfs-mirror.target
1926 %postun -n cephfs-mirror
1927 %systemd_postun cephfs-mirror@\*.service cephfs-mirror.target
1928 if [ $1 -ge 1 ] ; then
1929 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1930 # "yes". In any case: if units are not running, do not touch them.
1931 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1932 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1933 source $SYSCONF_CEPH
1935 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1936 /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || :
1942 %{_mandir}/man8/rbd-fuse.8*
1944 %files -n rbd-mirror
1945 %{_bindir}/rbd-mirror
1946 %{_mandir}/man8/rbd-mirror.8*
1947 %{_unitdir}/ceph-rbd-mirror@.service
1948 %{_unitdir}/ceph-rbd-mirror.target
1951 %if 0%{?suse_version}
1952 if [ $1 -eq 1 ] ; then
1953 /usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || :
1956 %if 0%{?fedora} || 0%{?rhel}
1957 %systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1959 if [ $1 -eq 1 ] ; then
1960 /usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || :
1963 %preun -n rbd-mirror
1964 %if 0%{?suse_version}
1965 %service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1967 %if 0%{?fedora} || 0%{?rhel}
1968 %systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1971 %postun -n rbd-mirror
1972 %systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1973 if [ $1 -ge 1 ] ; then
1974 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1975 # "yes". In any case: if units are not running, do not touch them.
1976 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1977 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1978 source $SYSCONF_CEPH
1980 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1981 /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || :
1985 %files immutable-object-cache
1986 %{_bindir}/ceph-immutable-object-cache
1987 %{_mandir}/man8/ceph-immutable-object-cache.8*
1988 %{_unitdir}/ceph-immutable-object-cache@.service
1989 %{_unitdir}/ceph-immutable-object-cache.target
1991 %post immutable-object-cache
1992 %if 0%{?suse_version}
1993 if [ $1 -eq 1 ] ; then
1994 /usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || :
1997 %if 0%{?fedora} || 0%{?rhel}
1998 %systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2000 if [ $1 -eq 1 ] ; then
2001 /usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || :
2004 %preun immutable-object-cache
2005 %if 0%{?suse_version}
2006 %service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2008 %if 0%{?fedora} || 0%{?rhel}
2009 %systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2012 %postun immutable-object-cache
2013 test -n "$FIRST_ARG" || FIRST_ARG=$1
2014 %systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2015 if [ $FIRST_ARG -ge 1 ] ; then
2016 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2017 # "yes". In any case: if units are not running, do not touch them.
2018 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2019 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2020 source $SYSCONF_CEPH
2022 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2023 /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || :
2029 %{_mandir}/man8/rbd-nbd.8*
2030 %dir %{_libexecdir}/rbd-nbd
2031 %{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
2034 %{_bindir}/ceph-diff-sorted
2036 %{_bindir}/radosgw-token
2037 %{_bindir}/radosgw-es
2038 %{_bindir}/radosgw-object-expirer
2039 %{_bindir}/rgw-gap-list
2040 %{_bindir}/rgw-gap-list-comparator
2041 %{_bindir}/rgw-orphan-list
2042 %{_libdir}/libradosgw.so*
2043 %{_mandir}/man8/radosgw.8*
2044 %dir %{_localstatedir}/lib/ceph/radosgw
2045 %{_unitdir}/ceph-radosgw@.service
2046 %{_unitdir}/ceph-radosgw.target
2050 %if 0%{?suse_version}
2051 if [ $1 -eq 1 ] ; then
2052 /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || :
2055 %if 0%{?fedora} || 0%{?rhel}
2056 %systemd_post ceph-radosgw@\*.service ceph-radosgw.target
2058 if [ $1 -eq 1 ] ; then
2059 /usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || :
2063 %if 0%{?suse_version}
2064 %service_del_preun ceph-radosgw@\*.service ceph-radosgw.target
2066 %if 0%{?fedora} || 0%{?rhel}
2067 %systemd_preun ceph-radosgw@\*.service ceph-radosgw.target
2072 %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target
2073 if [ $1 -ge 1 ] ; then
2074 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2075 # "yes". In any case: if units are not running, do not touch them.
2076 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2077 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2078 source $SYSCONF_CEPH
2080 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2081 /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || :
2086 %{_bindir}/ceph-clsinfo
2087 %{_bindir}/ceph-bluestore-tool
2088 %{_bindir}/ceph-erasure-code-tool
2089 %{_bindir}/ceph-objectstore-tool
2090 %{_bindir}/ceph-osdomap-tool
2092 %{_libexecdir}/ceph/ceph-osd-prestart.sh
2093 %{_sbindir}/ceph-volume
2094 %{_sbindir}/ceph-volume-systemd
2095 %{_mandir}/man8/ceph-clsinfo.8*
2096 %{_mandir}/man8/ceph-osd.8*
2097 %{_mandir}/man8/ceph-bluestore-tool.8*
2098 %{_mandir}/man8/ceph-volume.8*
2099 %{_mandir}/man8/ceph-volume-systemd.8*
2100 %{_unitdir}/ceph-osd@.service
2101 %{_unitdir}/ceph-osd.target
2102 %{_unitdir}/ceph-volume@.service
2103 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd
2104 %config(noreplace) %{_sysctldir}/90-ceph-osd.conf
2105 %{_sysconfdir}/sudoers.d/ceph-osd-smartctl
2108 %if 0%{?suse_version}
2109 if [ $1 -eq 1 ] ; then
2110 /usr/bin/systemctl preset ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target >/dev/null 2>&1 || :
2113 %if 0%{?fedora} || 0%{?rhel}
2114 %systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2116 if [ $1 -eq 1 ] ; then
2117 /usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || :
2119 %if 0%{?sysctl_apply}
2120 %sysctl_apply 90-ceph-osd.conf
2122 /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || :
2126 %if 0%{?suse_version}
2127 %service_del_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2129 %if 0%{?fedora} || 0%{?rhel}
2130 %systemd_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2134 %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2135 if [ $1 -ge 1 ] ; then
2136 # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2137 # "yes". In any case: if units are not running, do not touch them.
2138 SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2139 if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2140 source $SYSCONF_CEPH
2142 if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2143 /usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || :
2147 %if 0%{with seastar}
2149 %{_bindir}/crimson-osd
2154 %files resource-agents
2155 %dir %{_prefix}/lib/ocf
2156 %dir %{_prefix}/lib/ocf/resource.d
2157 %dir %{_prefix}/lib/ocf/resource.d/ceph
2158 %attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd
2163 %{_libdir}/librados.so.*
2164 %dir %{_libdir}/ceph
2165 %{_libdir}/ceph/libceph-common.so.*
2167 %{_libdir}/librados_tp.so.*
2169 %dir %{_sysconfdir}/ceph
2171 %post -n librados2 -p /sbin/ldconfig
2173 %postun -n librados2 -p /sbin/ldconfig
2175 %files -n librados-devel
2176 %dir %{_includedir}/rados
2177 %{_includedir}/rados/librados.h
2178 %{_includedir}/rados/rados_types.h
2179 %{_libdir}/librados.so
2181 %{_libdir}/librados_tp.so
2183 %{_bindir}/librados-config
2184 %{_mandir}/man8/librados-config.8*
2186 %files -n libradospp-devel
2187 %dir %{_includedir}/rados
2188 %{_includedir}/rados/buffer.h
2189 %{_includedir}/rados/buffer_fwd.h
2190 %{_includedir}/rados/crc32c.h
2191 %{_includedir}/rados/inline_memory.h
2192 %{_includedir}/rados/librados.hpp
2193 %{_includedir}/rados/librados_fwd.hpp
2194 %{_includedir}/rados/page.h
2195 %{_includedir}/rados/rados_types.hpp
2197 %files -n python%{python3_pkgversion}-rados
2198 %{python3_sitearch}/rados.cpython*.so
2199 %{python3_sitearch}/rados-*.egg-info
2201 %files -n libcephsqlite
2202 %{_libdir}/libcephsqlite.so
2204 %post -n libcephsqlite -p /sbin/ldconfig
2206 %postun -n libcephsqlite -p /sbin/ldconfig
2208 %files -n libcephsqlite-devel
2209 %{_includedir}/libcephsqlite.h
2211 %if 0%{with libradosstriper}
2212 %files -n libradosstriper1
2213 %{_libdir}/libradosstriper.so.*
2215 %post -n libradosstriper1 -p /sbin/ldconfig
2217 %postun -n libradosstriper1 -p /sbin/ldconfig
2219 %files -n libradosstriper-devel
2220 %dir %{_includedir}/radosstriper
2221 %{_includedir}/radosstriper/libradosstriper.h
2222 %{_includedir}/radosstriper/libradosstriper.hpp
2223 %{_libdir}/libradosstriper.so
2227 %{_libdir}/librbd.so.*
2229 %{_libdir}/librbd_tp.so.*
2231 %dir %{_libdir}/ceph/librbd
2232 %{_libdir}/ceph/librbd/libceph_*.so*
2234 %post -n librbd1 -p /sbin/ldconfig
2236 %postun -n librbd1 -p /sbin/ldconfig
2238 %files -n librbd-devel
2239 %dir %{_includedir}/rbd
2240 %{_includedir}/rbd/librbd.h
2241 %{_includedir}/rbd/librbd.hpp
2242 %{_includedir}/rbd/features.h
2243 %{_libdir}/librbd.so
2245 %{_libdir}/librbd_tp.so
2249 %{_libdir}/librgw.so.*
2251 %{_libdir}/librgw_op_tp.so.*
2252 %{_libdir}/librgw_rados_tp.so.*
2255 %post -n librgw2 -p /sbin/ldconfig
2257 %postun -n librgw2 -p /sbin/ldconfig
2259 %files -n librgw-devel
2260 %dir %{_includedir}/rados
2261 %{_includedir}/rados/librgw.h
2262 %{_includedir}/rados/rgw_file.h
2263 %{_libdir}/librgw.so
2265 %{_libdir}/librgw_op_tp.so
2266 %{_libdir}/librgw_rados_tp.so
2269 %files -n python%{python3_pkgversion}-rgw
2270 %{python3_sitearch}/rgw.cpython*.so
2271 %{python3_sitearch}/rgw-*.egg-info
2273 %files -n python%{python3_pkgversion}-rbd
2274 %{python3_sitearch}/rbd.cpython*.so
2275 %{python3_sitearch}/rbd-*.egg-info
2277 %files -n libcephfs2
2278 %{_libdir}/libcephfs.so.*
2279 %dir %{_sysconfdir}/ceph
2281 %post -n libcephfs2 -p /sbin/ldconfig
2283 %postun -n libcephfs2 -p /sbin/ldconfig
2285 %files -n libcephfs-devel
2286 %dir %{_includedir}/cephfs
2287 %{_includedir}/cephfs/libcephfs.h
2288 %{_includedir}/cephfs/ceph_ll_client.h
2289 %dir %{_includedir}/cephfs/metrics
2290 %{_includedir}/cephfs/metrics/Types.h
2291 %{_libdir}/libcephfs.so
2295 %{_libdir}/libopentracing.so.*
2296 %{_libdir}/libthrift.so.*
2297 %{_libdir}/libjaegertracing.so.*
2298 %post -n libjaeger -p /sbin/ldconfig
2299 %postun -n libjaeger -p /sbin/ldconfig
2302 %files -n python%{python3_pkgversion}-cephfs
2303 %{python3_sitearch}/cephfs.cpython*.so
2304 %{python3_sitearch}/cephfs-*.egg-info
2306 %files -n python%{python3_pkgversion}-ceph-argparse
2307 %{python3_sitelib}/ceph_argparse.py
2308 %{python3_sitelib}/__pycache__/ceph_argparse.cpython*.py*
2309 %{python3_sitelib}/ceph_daemon.py
2310 %{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py*
2312 %files -n python%{python3_pkgversion}-ceph-common
2313 %{python3_sitelib}/ceph
2314 %{python3_sitelib}/ceph-*.egg-info
2316 %if 0%{with cephfs_shell}
2317 %files -n cephfs-shell
2318 %{python3_sitelib}/cephfs_shell-*.egg-info
2319 %{_bindir}/cephfs-shell
2320 %{_mandir}/man8/cephfs-shell.8*
2323 %files -n cephfs-top
2324 %{python3_sitelib}/cephfs_top-*.egg-info
2325 %{_bindir}/cephfs-top
2326 %{_mandir}/man8/cephfs-top.8*
2328 %if 0%{with ceph_test_package}
2330 %{_bindir}/ceph-client-debug
2331 %{_bindir}/ceph_bench_log
2332 %{_bindir}/ceph_kvstorebench
2333 %{_bindir}/ceph_multi_stress_watch
2334 %{_bindir}/ceph_erasure_code_benchmark
2335 %{_bindir}/ceph_omapbench
2336 %{_bindir}/ceph_objectstore_bench
2337 %{_bindir}/ceph_perf_objectstore
2338 %{_bindir}/ceph_perf_local
2339 %{_bindir}/ceph_perf_msgr_client
2340 %{_bindir}/ceph_perf_msgr_server
2341 %{_bindir}/ceph_psim
2342 %{_bindir}/ceph_radosacl
2343 %{_bindir}/ceph_rgw_jsonparser
2344 %{_bindir}/ceph_rgw_multiparser
2345 %{_bindir}/ceph_scratchtool
2346 %{_bindir}/ceph_scratchtoolpp
2347 %{_bindir}/ceph_test_*
2348 %{_bindir}/ceph-coverage
2349 %{_bindir}/ceph-debugpack
2350 %{_bindir}/ceph-dedup-tool
2351 %if 0%{with seastar}
2352 %{_bindir}/crimson-store-nbd
2354 %{_mandir}/man8/ceph-debugpack.8*
2355 %dir %{_libdir}/ceph
2356 %{_libdir}/ceph/ceph-monstore-update-crush.sh
2359 %if 0%{with cephfs_java}
2360 %files -n libcephfs_jni1
2361 %{_libdir}/libcephfs_jni.so.*
2363 %post -n libcephfs_jni1 -p /sbin/ldconfig
2365 %postun -n libcephfs_jni1 -p /sbin/ldconfig
2367 %files -n libcephfs_jni-devel
2368 %{_libdir}/libcephfs_jni.so
2370 %files -n cephfs-java
2371 %{_javadir}/libcephfs.jar
2372 %{_javadir}/libcephfs-test.jar
2375 %files -n rados-objclass-devel
2376 %dir %{_includedir}/rados
2377 %{_includedir}/rados/objclass.h
2379 %if 0%{with selinux}
2381 %attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp
2382 %{_datadir}/selinux/devel/include/contrib/ceph.if
2383 %{_mandir}/man8/ceph_selinux.8*
2386 # backup file_contexts before update
2387 . /etc/selinux/config
2388 FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
2389 cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
2391 # Install the policy
2392 /usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp
2394 # Load the policy if SELinux is enabled
2395 if ! /usr/sbin/selinuxenabled; then
2396 # Do not relabel if selinux is not enabled
2400 if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then
2401 # Do not relabel if file contexts did not change
2405 # Check whether the daemons are running
2406 /usr/bin/systemctl status ceph.target > /dev/null 2>&1
2409 # Stop the daemons if they were running
2410 if test $STATUS -eq 0; then
2411 /usr/bin/systemctl stop ceph.target > /dev/null 2>&1
2414 # Relabel the files fix for first package install
2415 /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
2417 rm -f ${FILE_CONTEXT}.pre
2418 # The fixfiles command won't fix label for /var/run/ceph
2419 /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
2421 # Start the daemons iff they were running before
2422 if test $STATUS -eq 0; then
2423 /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
2428 if [ $1 -eq 0 ]; then
2429 # backup file_contexts before update
2430 . /etc/selinux/config
2431 FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
2432 cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
2435 /usr/sbin/semodule -n -r ceph > /dev/null 2>&1
2437 # Reload the policy if SELinux is enabled
2438 if ! /usr/sbin/selinuxenabled ; then
2439 # Do not relabel if SELinux is not enabled
2443 # Check whether the daemons are running
2444 /usr/bin/systemctl status ceph.target > /dev/null 2>&1
2447 # Stop the daemons if they were running
2448 if test $STATUS -eq 0; then
2449 /usr/bin/systemctl stop ceph.target > /dev/null 2>&1
2452 /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
2453 rm -f ${FILE_CONTEXT}.pre
2454 # The fixfiles command won't fix label for /var/run/ceph
2455 /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
2457 # Start the daemons if they were running before
2458 if test $STATUS -eq 0; then
2459 /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
2465 %files grafana-dashboards
2466 %if 0%{?suse_version}
2467 %attr(0755,root,root) %dir %{_sysconfdir}/grafana
2468 %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards
2470 %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard
2471 %config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/*
2472 %doc monitoring/grafana/dashboards/README
2473 %doc monitoring/grafana/README.md
2475 %files prometheus-alerts
2476 %if 0%{?suse_version}
2477 %attr(0755,root,root) %dir %{_sysconfdir}/prometheus
2479 %attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph
2480 %config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml