Merge pull request #56987 from idryomov/wip-65573
[ceph.git] / ceph.spec.in
1 #
2 # spec file for package ceph
3 #
4 # Copyright (C) 2004-2019 The Ceph Project Developers. See COPYING file
5 # at the top-level directory of this distribution and at
6 # https://github.com/ceph/ceph/blob/master/COPYING
7 #
8 # All modifications and additions to the file contributed by third parties
9 # remain the property of their copyright owners, unless otherwise agreed
10 # upon.
11 #
12 # This file is under the GNU Lesser General Public License, version 2.1
13 #
14 # Please submit bugfixes or comments via http://tracker.ceph.com/
15 #
16
17 #################################################################################
18 # conditional build section
19 #
20 # please read http://rpm.org/user_doc/conditional_builds.html for explanation of
21 # bcond syntax!
22 #################################################################################
23 %bcond_with make_check
24 %bcond_with zbd
25 %bcond_with cmake_verbose_logging
26 %bcond_without ceph_test_package
27 %ifarch s390
28 %bcond_with tcmalloc
29 %else
30 %bcond_without tcmalloc
31 %endif
32 %bcond_with system_pmdk
33 %if 0%{?fedora} || 0%{?rhel}
34 %bcond_without selinux
35 %ifarch x86_64 ppc64le
36 %bcond_without rbd_rwl_cache
37 %bcond_without rbd_ssd_cache
38 %else
39 %bcond_with rbd_rwl_cache
40 %bcond_with rbd_ssd_cache
41 %endif
42 %if 0%{?rhel} >= 8
43 %bcond_with cephfs_java
44 %else
45 %bcond_without cephfs_java
46 %endif
47 %bcond_without amqp_endpoint
48 %bcond_without kafka_endpoint
49 %bcond_without lttng
50 %bcond_without libradosstriper
51 %bcond_without ocf
52 %global luarocks_package_name luarocks
53 %bcond_without lua_packages
54 %global _remote_tarball_prefix https://download.ceph.com/tarballs/
55 %endif
56 %if 0%{?suse_version}
57 %bcond_with amqp_endpoint
58 %bcond_with cephfs_java
59 %bcond_with kafka_endpoint
60 %bcond_with libradosstriper
61 %ifarch x86_64 aarch64 ppc64le
62 %bcond_without lttng
63 %bcond_without rbd_rwl_cache
64 %bcond_without rbd_ssd_cache
65 %else
66 %bcond_with lttng
67 %bcond_with rbd_rwl_cache
68 %bcond_with rbd_ssd_cache
69 %endif
70 %bcond_with ocf
71 %bcond_with selinux
72 #Compat macro for _fillupdir macro introduced in Nov 2017
73 %if ! %{defined _fillupdir}
74 %global _fillupdir /var/adm/fillup-templates
75 %endif
76 #luarocks
77 %if 0%{?is_opensuse}
78 # openSUSE
79 %bcond_without lua_packages
80 %if 0%{?sle_version}
81 # openSUSE Leap
82 %global luarocks_package_name lua53-luarocks
83 %else
84 # openSUSE Tumbleweed
85 %global luarocks_package_name lua54-luarocks
86 %endif
87 %else
88 # SLE
89 %bcond_with lua_packages
90 %endif
91 %endif
92 %bcond_with seastar
93 %bcond_with jaeger
94 %if 0%{?fedora} || 0%{?suse_version} >= 1500
95 # distros that ship cmd2 and/or colorama
96 %bcond_without cephfs_shell
97 %else
98 # distros that do _not_ ship cmd2/colorama
99 %bcond_with cephfs_shell
100 %endif
101 %if 0%{?fedora} || 0%{?suse_version} || 0%{?rhel} >= 8
102 %global weak_deps 1
103 %endif
104 %if %{with selinux}
105 # get selinux policy version
106 # Force 0.0.0 policy version for centos builds to avoid repository sync issues between rhel and centos
107 %if 0%{?centos}
108 %global _selinux_policy_version 0.0.0
109 %else
110 %{!?_selinux_policy_version: %global _selinux_policy_version 0.0.0}
111 %endif
112 %endif
113
114 %{!?_udevrulesdir: %global _udevrulesdir /lib/udev/rules.d}
115 %{!?tmpfiles_create: %global tmpfiles_create systemd-tmpfiles --create}
116 %{!?python3_pkgversion: %global python3_pkgversion 3}
117 %{!?python3_version_nodots: %global python3_version_nodots 3}
118 %{!?python3_version: %global python3_version 3}
119
120 # disable dwz which compresses the debuginfo
121 %global _find_debuginfo_dwz_opts %{nil}
122 %if ! 0%{?suse_version}
123 # use multi-threaded xz compression: xz level 7 using ncpus threads
124 %global _source_payload w7T%{_smp_build_ncpus}.xzdio
125 %global _binary_payload w7T%{_smp_build_ncpus}.xzdio
126 %endif
127
128 %define smp_limit_mem_per_job() %( \
129   kb_per_job=%1 \
130   kb_total=$(head -3 /proc/meminfo | sed -n 's/MemAvailable:\\s*\\(.*\\) kB.*/\\1/p') \
131   jobs=$(( $kb_total / $kb_per_job )) \
132   [ $jobs -lt 1 ] && jobs=1 \
133   echo $jobs )
134
135 %if 0%{?_smp_ncpus_max} == 0
136 %if 0%{?__isa_bits} == 32
137 # 32-bit builds can use 3G memory max, which is not enough even for -j2
138 %global _smp_ncpus_max 1
139 %else
140 # 3.0 GiB mem per job
141 %global _smp_ncpus_max %{smp_limit_mem_per_job 3000000}
142 %endif
143 %endif
144
145 #################################################################################
146 # main package definition
147 #################################################################################
148 Name:           ceph
149 Version:        @PROJECT_VERSION@
150 Release:        @RPM_RELEASE@%{?dist}
151 %if 0%{?fedora} || 0%{?rhel}
152 Epoch:          2
153 %endif
154
155 # define _epoch_prefix macro which will expand to the empty string if epoch is
156 # undefined
157 %global _epoch_prefix %{?epoch:%{epoch}:}
158
159 Summary:        User space components of the Ceph file system
160 License:        LGPL-2.1 and LGPL-3.0 and CC-BY-SA-3.0 and GPL-2.0 and BSL-1.0 and BSD-3-Clause and MIT
161 %if 0%{?suse_version}
162 Group:          System/Filesystems
163 %endif
164 URL:            http://ceph.com/
165 Source0:        %{?_remote_tarball_prefix}@TARBALL_BASENAME@.tar.bz2
166 %if 0%{?suse_version}
167 # _insert_obs_source_lines_here
168 ExclusiveArch:  x86_64 aarch64 ppc64le s390x
169 %endif
170 #################################################################################
171 # dependencies that apply across all distro families
172 #################################################################################
173 Requires:       ceph-osd = %{_epoch_prefix}%{version}-%{release}
174 Requires:       ceph-mds = %{_epoch_prefix}%{version}-%{release}
175 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
176 Requires:       ceph-mon = %{_epoch_prefix}%{version}-%{release}
177 Requires(post): binutils
178 %if 0%{with cephfs_java}
179 BuildRequires:  java-devel
180 BuildRequires:  sharutils
181 %endif
182 %if 0%{with selinux}
183 BuildRequires:  checkpolicy
184 BuildRequires:  selinux-policy-devel
185 %endif
186 BuildRequires:  gperf
187 BuildRequires:  cmake > 3.5
188 BuildRequires:  cryptsetup
189 BuildRequires:  fuse-devel
190 %if 0%{with seastar} && 0%{?rhel}
191 BuildRequires:  gcc-toolset-9-gcc-c++ >= 9.2.1-2.3
192 %else
193 BuildRequires:  gcc-c++
194 %endif
195 BuildRequires:  gdbm
196 %if 0%{with tcmalloc}
197 # libprofiler did not build on ppc64le until 2.7.90
198 %if 0%{?fedora} || 0%{?rhel} >= 8
199 BuildRequires:  gperftools-devel >= 2.7.90
200 %endif
201 %if 0%{?rhel} && 0%{?rhel} < 8
202 BuildRequires:  gperftools-devel >= 2.6.1
203 %endif
204 %if 0%{?suse_version}
205 BuildRequires:  gperftools-devel >= 2.4
206 %endif
207 %endif
208 BuildRequires:  leveldb-devel > 1.2
209 BuildRequires:  libaio-devel
210 BuildRequires:  libblkid-devel >= 2.17
211 BuildRequires:  cryptsetup-devel
212 BuildRequires:  libcurl-devel
213 BuildRequires:  libcap-ng-devel
214 BuildRequires:  fmt-devel >= 5.2.1
215 BuildRequires:  pkgconfig(libudev)
216 BuildRequires:  libnl3-devel
217 BuildRequires:  liboath-devel
218 BuildRequires:  libtool
219 BuildRequires:  libxml2-devel
220 BuildRequires:  make
221 BuildRequires:  ncurses-devel
222 BuildRequires:  libicu-devel
223 BuildRequires:  parted
224 BuildRequires:  patch
225 BuildRequires:  perl
226 BuildRequires:  pkgconfig
227 BuildRequires:  procps
228 BuildRequires:  python%{python3_pkgversion}
229 BuildRequires:  python%{python3_pkgversion}-devel
230 BuildRequires:  python%{python3_pkgversion}-setuptools
231 BuildRequires:  python%{python3_pkgversion}-Cython
232 BuildRequires:  snappy-devel
233 BuildRequires:  sqlite-devel
234 BuildRequires:  sudo
235 BuildRequires:  pkgconfig(udev)
236 BuildRequires:  util-linux
237 BuildRequires:  valgrind-devel
238 BuildRequires:  which
239 BuildRequires:  xfsprogs
240 BuildRequires:  xfsprogs-devel
241 BuildRequires:  xmlstarlet
242 BuildRequires:  nasm
243 BuildRequires:  lua-devel
244 %if 0%{with amqp_endpoint}
245 BuildRequires:  librabbitmq-devel
246 %endif
247 %if 0%{with kafka_endpoint}
248 BuildRequires:  librdkafka-devel
249 %endif
250 %if 0%{with lua_packages}
251 BuildRequires:  %{luarocks_package_name}
252 %endif
253 %if 0%{with make_check}
254 BuildRequires:  jq
255 BuildRequires:  libuuid-devel
256 BuildRequires:  python%{python3_pkgversion}-bcrypt
257 BuildRequires:  python%{python3_pkgversion}-nose
258 BuildRequires:  python%{python3_pkgversion}-pecan
259 BuildRequires:  python%{python3_pkgversion}-requests
260 BuildRequires:  python%{python3_pkgversion}-dateutil
261 BuildRequires:  python%{python3_pkgversion}-virtualenv
262 BuildRequires:  python%{python3_pkgversion}-coverage
263 BuildRequires:  python%{python3_pkgversion}-pyOpenSSL
264 BuildRequires:  socat
265 %endif
266 %if 0%{with zbd}
267 BuildRequires:  libzbd-devel
268 %endif
269 %if 0%{with jaeger}
270 BuildRequires:  bison
271 BuildRequires:  flex
272 %if 0%{?fedora} || 0%{?rhel}
273 BuildRequires:  json-devel
274 %endif
275 %if 0%{?suse_version}
276 BuildRequires:  nlohmann_json-devel
277 %endif
278 BuildRequires:  libevent-devel
279 BuildRequires:  yaml-cpp-devel
280 %endif
281 %if 0%{with system_pmdk}
282 BuildRequires:  libpmem-devel
283 BuildRequires:  libpmemobj-devel
284 %endif
285 %if 0%{with seastar}
286 BuildRequires:  c-ares-devel
287 BuildRequires:  gnutls-devel
288 BuildRequires:  hwloc-devel
289 BuildRequires:  libpciaccess-devel
290 BuildRequires:  lksctp-tools-devel
291 BuildRequires:  protobuf-devel
292 BuildRequires:  ragel
293 BuildRequires:  systemtap-sdt-devel
294 BuildRequires:  yaml-cpp-devel
295 %if 0%{?fedora}
296 BuildRequires:  libubsan
297 BuildRequires:  libasan
298 BuildRequires:  libatomic
299 %endif
300 %if 0%{?rhel}
301 BuildRequires:  gcc-toolset-9-annobin
302 BuildRequires:  gcc-toolset-9-libubsan-devel
303 BuildRequires:  gcc-toolset-9-libasan-devel
304 BuildRequires:  gcc-toolset-9-libatomic-devel
305 %endif
306 %endif
307 #################################################################################
308 # distro-conditional dependencies
309 #################################################################################
310 %if 0%{?suse_version}
311 BuildRequires:  pkgconfig(systemd)
312 BuildRequires:  systemd-rpm-macros
313 %{?systemd_requires}
314 PreReq:         %fillup_prereq
315 BuildRequires:  fdupes
316 BuildRequires:  memory-constraints
317 BuildRequires:  net-tools
318 BuildRequires:  libbz2-devel
319 BuildRequires:  mozilla-nss-devel
320 BuildRequires:  keyutils-devel
321 BuildRequires:  libopenssl-devel
322 BuildRequires:  lsb-release
323 BuildRequires:  openldap2-devel
324 #BuildRequires:  krb5
325 #BuildRequires:  krb5-devel
326 BuildRequires:  cunit-devel
327 BuildRequires:  python%{python3_pkgversion}-PrettyTable
328 BuildRequires:  python%{python3_pkgversion}-PyYAML
329 BuildRequires:  python%{python3_pkgversion}-Sphinx
330 BuildRequires:  rdma-core-devel
331 BuildRequires:  liblz4-devel >= 1.7
332 # for prometheus-alerts
333 BuildRequires:  golang-github-prometheus-prometheus
334 %endif
335 %if 0%{?fedora} || 0%{?rhel}
336 Requires:       systemd
337 BuildRequires:  boost-random
338 BuildRequires:  nss-devel
339 BuildRequires:  keyutils-libs-devel
340 BuildRequires:  libibverbs-devel
341 BuildRequires:  librdmacm-devel
342 BuildRequires:  openldap-devel
343 #BuildRequires:  krb5-devel
344 BuildRequires:  openssl-devel
345 BuildRequires:  CUnit-devel
346 BuildRequires:  redhat-lsb-core
347 BuildRequires:  python%{python3_pkgversion}-devel
348 BuildRequires:  python%{python3_pkgversion}-prettytable
349 BuildRequires:  python%{python3_pkgversion}-pyyaml
350 BuildRequires:  python%{python3_pkgversion}-sphinx
351 BuildRequires:  lz4-devel >= 1.7
352 %endif
353 # distro-conditional make check dependencies
354 %if 0%{with make_check}
355 %if 0%{?fedora} || 0%{?rhel}
356 BuildRequires:  golang-github-prometheus
357 BuildRequires:  libtool-ltdl-devel
358 BuildRequires:  ninja-build
359 BuildRequires:  xmlsec1
360 BuildRequires:  xmlsec1-devel
361 %ifarch x86_64
362 BuildRequires:  xmlsec1-nss
363 %endif
364 BuildRequires:  xmlsec1-openssl
365 BuildRequires:  xmlsec1-openssl-devel
366 BuildRequires:  python%{python3_pkgversion}-cherrypy
367 BuildRequires:  python%{python3_pkgversion}-jwt
368 BuildRequires:  python%{python3_pkgversion}-routes
369 BuildRequires:  python%{python3_pkgversion}-scipy
370 BuildRequires:  python%{python3_pkgversion}-werkzeug
371 BuildRequires:  python%{python3_pkgversion}-pyOpenSSL
372 %endif
373 %if 0%{?suse_version}
374 BuildRequires:  golang-github-prometheus-prometheus
375 BuildRequires:  libxmlsec1-1
376 BuildRequires:  libxmlsec1-nss1
377 BuildRequires:  libxmlsec1-openssl1
378 BuildRequires:  ninja
379 BuildRequires:  python%{python3_pkgversion}-CherryPy
380 BuildRequires:  python%{python3_pkgversion}-PyJWT
381 BuildRequires:  python%{python3_pkgversion}-Routes
382 BuildRequires:  python%{python3_pkgversion}-Werkzeug
383 BuildRequires:  python%{python3_pkgversion}-numpy-devel
384 BuildRequires:  xmlsec1-devel
385 BuildRequires:  xmlsec1-openssl-devel
386 %endif
387 %endif
388 # lttng and babeltrace for rbd-replay-prep
389 %if %{with lttng}
390 %if 0%{?fedora} || 0%{?rhel}
391 BuildRequires:  lttng-ust-devel
392 BuildRequires:  libbabeltrace-devel
393 %endif
394 %if 0%{?suse_version}
395 BuildRequires:  lttng-ust-devel
396 BuildRequires:  babeltrace-devel
397 %endif
398 %endif
399 %if 0%{?suse_version}
400 BuildRequires:  libexpat-devel
401 %endif
402 %if 0%{?rhel} || 0%{?fedora}
403 BuildRequires:  expat-devel
404 %endif
405 #hardened-cc1
406 %if 0%{?fedora} || 0%{?rhel}
407 BuildRequires:  redhat-rpm-config
408 %endif
409 %if 0%{with seastar}
410 %if 0%{?fedora} || 0%{?rhel}
411 BuildRequires:  cryptopp-devel
412 BuildRequires:  numactl-devel
413 BuildRequires:  protobuf-compiler
414 %endif
415 %if 0%{?suse_version}
416 BuildRequires:  libcryptopp-devel
417 BuildRequires:  libnuma-devel
418 %endif
419 %endif
420 %if 0%{?rhel} >= 8
421 BuildRequires:  /usr/bin/pathfix.py
422 %endif
423
424 %description
425 Ceph is a massively scalable, open-source, distributed storage system that runs
426 on commodity hardware and delivers object, block and file system storage.
427
428
429 #################################################################################
430 # subpackages
431 #################################################################################
432 %package base
433 Summary:       Ceph Base Package
434 %if 0%{?suse_version}
435 Group:         System/Filesystems
436 %endif
437 Provides:      ceph-test:/usr/bin/ceph-kvstore-tool
438 Requires:      ceph-common = %{_epoch_prefix}%{version}-%{release}
439 Requires:      librbd1 = %{_epoch_prefix}%{version}-%{release}
440 Requires:      librados2 = %{_epoch_prefix}%{version}-%{release}
441 Requires:      libcephfs2 = %{_epoch_prefix}%{version}-%{release}
442 Requires:      librgw2 = %{_epoch_prefix}%{version}-%{release}
443 %if 0%{with selinux}
444 Requires:      ceph-selinux = %{_epoch_prefix}%{version}-%{release}
445 %endif
446 Requires:      cryptsetup
447 Requires:      e2fsprogs
448 Requires:      findutils
449 Requires:      grep
450 Requires:      logrotate
451 Requires:      parted
452 Requires:      psmisc
453 Requires:      python%{python3_pkgversion}-setuptools
454 Requires:      util-linux
455 Requires:      xfsprogs
456 Requires:      which
457 %if 0%{?rhel} && 0%{?rhel} < 8
458 # The following is necessary due to tracker 36508 and can be removed once the
459 # associated upstream bugs are resolved.
460 %if 0%{with tcmalloc}
461 Requires:      gperftools-libs >= 2.6.1
462 %endif
463 %endif
464 %if 0%{?weak_deps}
465 Recommends:    chrony
466 %endif
467 %description base
468 Base is the package that includes all the files shared amongst ceph servers
469
470 %package -n cephadm
471 Summary:        Utility to bootstrap Ceph clusters
472 BuildArch:      noarch
473 Requires:       lvm2
474 Requires:       python%{python3_pkgversion}
475 %if 0%{?weak_deps}
476 Recommends:     podman >= 2.0.2
477 %endif
478 %description -n cephadm
479 Utility to bootstrap a Ceph cluster and manage Ceph daemons deployed
480 with systemd and podman.
481
482 %package -n ceph-common
483 Summary:        Ceph Common
484 %if 0%{?suse_version}
485 Group:          System/Filesystems
486 %endif
487 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
488 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
489 Requires:       libcephfs2 = %{_epoch_prefix}%{version}-%{release}
490 Requires:       python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
491 Requires:       python%{python3_pkgversion}-rbd = %{_epoch_prefix}%{version}-%{release}
492 Requires:       python%{python3_pkgversion}-cephfs = %{_epoch_prefix}%{version}-%{release}
493 Requires:       python%{python3_pkgversion}-rgw = %{_epoch_prefix}%{version}-%{release}
494 Requires:       python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
495 Requires:       python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
496 %if 0%{with jaeger}
497 Requires:       libjaeger = %{_epoch_prefix}%{version}-%{release}
498 %endif
499 %if 0%{?fedora} || 0%{?rhel}
500 Requires:       python%{python3_pkgversion}-prettytable
501 %endif
502 %if 0%{?suse_version}
503 Requires:       python%{python3_pkgversion}-PrettyTable
504 %endif
505 %if 0%{with libradosstriper}
506 Requires:       libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
507 %endif
508 %{?systemd_requires}
509 %if 0%{?suse_version}
510 Requires(pre):  pwdutils
511 %endif
512 %description -n ceph-common
513 Common utilities to mount and interact with a ceph storage cluster.
514 Comprised of files that are common to Ceph clients and servers.
515
516 %package mds
517 Summary:        Ceph Metadata Server Daemon
518 %if 0%{?suse_version}
519 Group:          System/Filesystems
520 %endif
521 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
522 %description mds
523 ceph-mds is the metadata server daemon for the Ceph distributed file system.
524 One or more instances of ceph-mds collectively manage the file system
525 namespace, coordinating access to the shared OSD cluster.
526
527 %package mon
528 Summary:        Ceph Monitor Daemon
529 %if 0%{?suse_version}
530 Group:          System/Filesystems
531 %endif
532 Provides:       ceph-test:/usr/bin/ceph-monstore-tool
533 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
534 %if 0%{?weak_deps}
535 Recommends:     nvme-cli
536 %if 0%{?suse_version}
537 Requires:       smartmontools
538 %else
539 Recommends:     smartmontools
540 %endif
541 %endif
542 %if 0%{with jaeger}
543 Requires:       libjaeger = %{_epoch_prefix}%{version}-%{release}
544 %endif
545 %description mon
546 ceph-mon is the cluster monitor daemon for the Ceph distributed file
547 system. One or more instances of ceph-mon form a Paxos part-time
548 parliament cluster that provides extremely reliable and durable storage
549 of cluster membership, configuration, and state.
550
551 %package mgr
552 Summary:        Ceph Manager Daemon
553 %if 0%{?suse_version}
554 Group:          System/Filesystems
555 %endif
556 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
557 Requires:       ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release}
558 Requires:           libcephsqlite = %{_epoch_prefix}%{version}-%{release}
559 %if 0%{?weak_deps}
560 Recommends:     ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
561 Recommends:     ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
562 Recommends:     ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
563 Recommends:     ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release}
564 Recommends:     python%{python3_pkgversion}-influxdb
565 %endif
566 %description mgr
567 ceph-mgr enables python modules that provide services (such as the REST
568 module derived from Calamari) and expose CLI hooks.  ceph-mgr gathers
569 the cluster maps, the daemon metadata, and performance counters, and
570 exposes all these to the python modules.
571
572 %package mgr-dashboard
573 Summary:        Ceph Dashboard
574 BuildArch:      noarch
575 %if 0%{?suse_version}
576 Group:          System/Filesystems
577 %endif
578 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
579 Requires:       ceph-grafana-dashboards = %{_epoch_prefix}%{version}-%{release}
580 Requires:       ceph-prometheus-alerts = %{_epoch_prefix}%{version}-%{release}
581 %if 0%{?fedora} || 0%{?rhel}
582 Requires:       python%{python3_pkgversion}-cherrypy
583 Requires:       python%{python3_pkgversion}-jwt
584 Requires:       python%{python3_pkgversion}-routes
585 Requires:       python%{python3_pkgversion}-werkzeug
586 %if 0%{?weak_deps}
587 Recommends:     python%{python3_pkgversion}-saml
588 %endif
589 %endif
590 %if 0%{?suse_version}
591 Requires:       python%{python3_pkgversion}-CherryPy
592 Requires:       python%{python3_pkgversion}-PyJWT
593 Requires:       python%{python3_pkgversion}-Routes
594 Requires:       python%{python3_pkgversion}-Werkzeug
595 Recommends:     python%{python3_pkgversion}-python3-saml
596 %endif
597 %description mgr-dashboard
598 ceph-mgr-dashboard is a manager module, providing a web-based application
599 to monitor and manage many aspects of a Ceph cluster and related components.
600 See the Dashboard documentation at http://docs.ceph.com/ for details and a
601 detailed feature overview.
602
603 %package mgr-diskprediction-local
604 Summary:        Ceph Manager module for predicting disk failures
605 BuildArch:      noarch
606 %if 0%{?suse_version}
607 Group:          System/Filesystems
608 %endif
609 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
610 Requires:       python%{python3_pkgversion}-numpy
611 %if 0%{?fedora} || 0%{?suse_version}
612 Requires:       python%{python3_pkgversion}-scikit-learn
613 %endif
614 Requires:       python3-scipy
615 %description mgr-diskprediction-local
616 ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict
617 disk failures using local algorithms and machine-learning databases.
618
619 %package mgr-modules-core
620 Summary:        Ceph Manager modules which are always enabled
621 BuildArch:      noarch
622 %if 0%{?suse_version}
623 Group:          System/Filesystems
624 %endif
625 Requires:       python%{python3_pkgversion}-bcrypt
626 Requires:       python%{python3_pkgversion}-pecan
627 Requires:       python%{python3_pkgversion}-pyOpenSSL
628 Requires:       python%{python3_pkgversion}-requests
629 Requires:       python%{python3_pkgversion}-dateutil
630 %if 0%{?fedora} || 0%{?rhel} >= 8
631 Requires:       python%{python3_pkgversion}-cherrypy
632 Requires:       python%{python3_pkgversion}-pyyaml
633 Requires:       python%{python3_pkgversion}-werkzeug
634 %endif
635 %if 0%{?suse_version}
636 Requires:       python%{python3_pkgversion}-CherryPy
637 Requires:       python%{python3_pkgversion}-PyYAML
638 Requires:       python%{python3_pkgversion}-Werkzeug
639 %endif
640 %if 0%{?weak_deps}
641 Recommends:     ceph-mgr-rook = %{_epoch_prefix}%{version}-%{release}
642 %endif
643 %description mgr-modules-core
644 ceph-mgr-modules-core provides a set of modules which are always
645 enabled by ceph-mgr.
646
647 %package mgr-rook
648 BuildArch:      noarch
649 Summary:        Ceph Manager module for Rook-based orchestration
650 %if 0%{?suse_version}
651 Group:          System/Filesystems
652 %endif
653 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
654 Requires:       python%{python3_pkgversion}-kubernetes
655 Requires:       python%{python3_pkgversion}-jsonpatch
656 %description mgr-rook
657 ceph-mgr-rook is a ceph-mgr module for orchestration functions using
658 a Rook backend.
659
660 %package mgr-k8sevents
661 BuildArch:      noarch
662 Summary:        Ceph Manager module to orchestrate ceph-events to kubernetes' events API
663 %if 0%{?suse_version}
664 Group:          System/Filesystems
665 %endif
666 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
667 Requires:       python%{python3_pkgversion}-kubernetes
668 %description mgr-k8sevents
669 ceph-mgr-k8sevents is a ceph-mgr module that sends every ceph-events
670 to kubernetes' events API
671
672 %package mgr-cephadm
673 Summary:        Ceph Manager module for cephadm-based orchestration
674 BuildArch:      noarch
675 %if 0%{?suse_version}
676 Group:          System/Filesystems
677 %endif
678 Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
679 Requires:       python%{python3_pkgversion}-remoto
680 Requires:       cephadm = %{_epoch_prefix}%{version}-%{release}
681 %if 0%{?suse_version}
682 Requires:       openssh
683 Requires:       python%{python3_pkgversion}-Jinja2
684 %endif
685 %if 0%{?rhel} || 0%{?fedora}
686 Requires:       openssh-clients
687 Requires:       python%{python3_pkgversion}-jinja2
688 %endif
689 %description mgr-cephadm
690 ceph-mgr-cephadm is a ceph-mgr module for orchestration functions using
691 the integrated cephadm deployment tool management operations.
692
693 %package fuse
694 Summary:        Ceph fuse-based client
695 %if 0%{?suse_version}
696 Group:          System/Filesystems
697 %endif
698 Requires:       fuse
699 Requires:       python%{python3_pkgversion}
700 %description fuse
701 FUSE based client for Ceph distributed network file system
702
703 %package -n cephfs-mirror
704 Summary:        Ceph daemon for mirroring CephFS snapshots
705 %if 0%{?suse_version}
706 Group:          System/Filesystems
707 %endif
708 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
709 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
710 Requires:       libcephfs2 = %{_epoch_prefix}%{version}-%{release}
711 %description -n cephfs-mirror
712 Daemon for mirroring CephFS snapshots between Ceph clusters.
713
714 %package -n rbd-fuse
715 Summary:        Ceph fuse-based client
716 %if 0%{?suse_version}
717 Group:          System/Filesystems
718 %endif
719 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
720 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
721 %description -n rbd-fuse
722 FUSE based client to map Ceph rbd images to files
723
724 %package -n rbd-mirror
725 Summary:        Ceph daemon for mirroring RBD images
726 %if 0%{?suse_version}
727 Group:          System/Filesystems
728 %endif
729 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
730 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
731 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
732 %description -n rbd-mirror
733 Daemon for mirroring RBD images between Ceph clusters, streaming
734 changes asynchronously.
735
736 %package immutable-object-cache
737 Summary:        Ceph daemon for immutable object cache
738 %if 0%{?suse_version}
739 Group:          System/Filesystems
740 %endif
741 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
742 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
743 %description immutable-object-cache
744 Daemon for immutable object cache.
745
746 %package -n rbd-nbd
747 Summary:        Ceph RBD client base on NBD
748 %if 0%{?suse_version}
749 Group:          System/Filesystems
750 %endif
751 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
752 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
753 %description -n rbd-nbd
754 NBD based client to map Ceph rbd images to local device
755
756 %package radosgw
757 Summary:        Rados REST gateway
758 %if 0%{?suse_version}
759 Group:          System/Filesystems
760 %endif
761 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
762 %if 0%{with selinux}
763 Requires:       ceph-selinux = %{_epoch_prefix}%{version}-%{release}
764 %endif
765 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
766 Requires:       librgw2 = %{_epoch_prefix}%{version}-%{release}
767 %if 0%{?rhel} || 0%{?fedora}
768 Requires:       mailcap
769 %endif
770 %if 0%{?weak_deps}
771 Recommends:     gawk
772 %endif
773 %description radosgw
774 RADOS is a distributed object store used by the Ceph distributed
775 storage system.  This package provides a REST gateway to the
776 object store that aims to implement a superset of Amazon's S3
777 service as well as the OpenStack Object Storage ("Swift") API.
778
779 %package -n cephfs-top
780 Summary:    top(1) like utility for Ceph Filesystem
781 BuildArch:  noarch
782 Requires:   python%{python3_pkgversion}-rados
783 %description -n cephfs-top
784 This package provides a top(1) like utility to display Ceph Filesystem metrics
785 in realtime.
786
787 %if %{with ocf}
788 %package resource-agents
789 Summary:        OCF-compliant resource agents for Ceph daemons
790 BuildArch:      noarch
791 %if 0%{?suse_version}
792 Group:          System/Filesystems
793 %endif
794 Requires:       ceph-base = %{_epoch_prefix}%{version}
795 Requires:       resource-agents
796 %description resource-agents
797 Resource agents for monitoring and managing Ceph daemons
798 under Open Cluster Framework (OCF) compliant resource
799 managers such as Pacemaker.
800 %endif
801
802 %package osd
803 Summary:        Ceph Object Storage Daemon
804 %if 0%{?suse_version}
805 Group:          System/Filesystems
806 %endif
807 Provides:       ceph-test:/usr/bin/ceph-osdomap-tool
808 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
809 Requires:       lvm2
810 Requires:       sudo
811 Requires:       libstoragemgmt
812 Requires:       python%{python3_pkgversion}-ceph-common = %{_epoch_prefix}%{version}-%{release}
813 %if 0%{?weak_deps}
814 Recommends:     nvme-cli
815 %if 0%{?suse_version}
816 Requires:       smartmontools
817 %else
818 Recommends:     smartmontools
819 %endif
820 %endif
821 %description osd
822 ceph-osd is the object storage daemon for the Ceph distributed file
823 system.  It is responsible for storing objects on a local file system
824 and providing access to them over the network.
825
826 %if 0%{with seastar}
827 %package crimson-osd
828 Summary:        Ceph Object Storage Daemon (crimson)
829 %if 0%{?suse_version}
830 Group:          System/Filesystems
831 %endif
832 Requires:       ceph-osd = %{_epoch_prefix}%{version}-%{release}
833 Requires:       binutils
834 %description crimson-osd
835 crimson-osd is the object storage daemon for the Ceph distributed file
836 system.  It is responsible for storing objects on a local file system
837 and providing access to them over the network.
838 %endif
839
840 %package -n librados2
841 Summary:        RADOS distributed object store client library
842 %if 0%{?suse_version}
843 Group:          System/Libraries
844 %endif
845 %if 0%{?rhel} || 0%{?fedora}
846 Obsoletes:      ceph-libs < %{_epoch_prefix}%{version}-%{release}
847 %endif
848 %description -n librados2
849 RADOS is a reliable, autonomic distributed object storage cluster
850 developed as part of the Ceph distributed storage system. This is a
851 shared library allowing applications to access the distributed object
852 store using a simple file-like interface.
853
854 %package -n librados-devel
855 Summary:        RADOS headers
856 %if 0%{?suse_version}
857 Group:          Development/Libraries/C and C++
858 %endif
859 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
860 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
861 Provides:       librados2-devel = %{_epoch_prefix}%{version}-%{release}
862 Obsoletes:      librados2-devel < %{_epoch_prefix}%{version}-%{release}
863 %description -n librados-devel
864 This package contains C libraries and headers needed to develop programs
865 that use RADOS object store.
866
867 %package -n libradospp-devel
868 Summary:        RADOS headers
869 %if 0%{?suse_version}
870 Group:          Development/Libraries/C and C++
871 %endif
872 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
873 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
874 %description -n libradospp-devel
875 This package contains C++ libraries and headers needed to develop programs
876 that use RADOS object store.
877
878 %package -n librgw2
879 Summary:        RADOS gateway client library
880 %if 0%{?suse_version}
881 Group:          System/Libraries
882 %endif
883 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
884 %description -n librgw2
885 This package provides a library implementation of the RADOS gateway
886 (distributed object store with S3 and Swift personalities).
887
888 %package -n librgw-devel
889 Summary:        RADOS gateway client library
890 %if 0%{?suse_version}
891 Group:          Development/Libraries/C and C++
892 %endif
893 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
894 Requires:       librgw2 = %{_epoch_prefix}%{version}-%{release}
895 Provides:       librgw2-devel = %{_epoch_prefix}%{version}-%{release}
896 Obsoletes:      librgw2-devel < %{_epoch_prefix}%{version}-%{release}
897 %description -n librgw-devel
898 This package contains libraries and headers needed to develop programs
899 that use RADOS gateway client library.
900
901 %package -n python%{python3_pkgversion}-rgw
902 Summary:        Python 3 libraries for the RADOS gateway
903 %if 0%{?suse_version}
904 Group:          Development/Libraries/Python
905 %endif
906 Requires:       librgw2 = %{_epoch_prefix}%{version}-%{release}
907 Requires:       python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
908 %{?python_provide:%python_provide python%{python3_pkgversion}-rgw}
909 Provides:       python-rgw = %{_epoch_prefix}%{version}-%{release}
910 Obsoletes:      python-rgw < %{_epoch_prefix}%{version}-%{release}
911 %description -n python%{python3_pkgversion}-rgw
912 This package contains Python 3 libraries for interacting with Ceph RADOS
913 gateway.
914
915 %package -n python%{python3_pkgversion}-rados
916 Summary:        Python 3 libraries for the RADOS object store
917 %if 0%{?suse_version}
918 Group:          Development/Libraries/Python
919 %endif
920 Requires:       python%{python3_pkgversion}
921 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
922 %{?python_provide:%python_provide python%{python3_pkgversion}-rados}
923 Provides:       python-rados = %{_epoch_prefix}%{version}-%{release}
924 Obsoletes:      python-rados < %{_epoch_prefix}%{version}-%{release}
925 %description -n python%{python3_pkgversion}-rados
926 This package contains Python 3 libraries for interacting with Ceph RADOS
927 object store.
928
929 %package -n libcephsqlite
930 Summary:        SQLite3 VFS for Ceph
931 %if 0%{?suse_version}
932 Group:          System/Libraries
933 %endif
934 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
935 %description -n libcephsqlite
936 A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
937 distributed object store.
938
939 %package -n libcephsqlite-devel
940 Summary:        SQLite3 VFS for Ceph headers
941 %if 0%{?suse_version}
942 Group:          Development/Libraries/C and C++
943 %endif
944 Requires:       sqlite-devel
945 Requires:       libcephsqlite = %{_epoch_prefix}%{version}-%{release}
946 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
947 Requires:       libradospp-devel = %{_epoch_prefix}%{version}-%{release}
948 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
949 Provides:       libcephsqlite-devel = %{_epoch_prefix}%{version}-%{release}
950 Obsoletes:      libcephsqlite-devel < %{_epoch_prefix}%{version}-%{release}
951 %description -n libcephsqlite-devel
952 A SQLite3 VFS for storing and manipulating databases stored on Ceph's RADOS
953 distributed object store.
954
955 %if 0%{with libradosstriper}
956 %package -n libradosstriper1
957 Summary:        RADOS striping interface
958 %if 0%{?suse_version}
959 Group:          System/Libraries
960 %endif
961 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
962 %description -n libradosstriper1
963 Striping interface built on top of the rados library, allowing
964 to stripe bigger objects onto several standard rados objects using
965 an interface very similar to the rados one.
966
967 %package -n libradosstriper-devel
968 Summary:        RADOS striping interface headers
969 %if 0%{?suse_version}
970 Group:          Development/Libraries/C and C++
971 %endif
972 Requires:       libradosstriper1 = %{_epoch_prefix}%{version}-%{release}
973 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
974 Requires:       libradospp-devel = %{_epoch_prefix}%{version}-%{release}
975 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
976 Provides:       libradosstriper1-devel = %{_epoch_prefix}%{version}-%{release}
977 Obsoletes:      libradosstriper1-devel < %{_epoch_prefix}%{version}-%{release}
978 %description -n libradosstriper-devel
979 This package contains libraries and headers needed to develop programs
980 that use RADOS striping interface.
981 %endif
982
983 %package -n librbd1
984 Summary:        RADOS block device client library
985 %if 0%{?suse_version}
986 Group:          System/Libraries
987 %endif
988 Requires:       librados2 = %{_epoch_prefix}%{version}-%{release}
989 %if 0%{?suse_version}
990 Requires(post): coreutils
991 %endif
992 %if 0%{?rhel} || 0%{?fedora}
993 Obsoletes:      ceph-libs < %{_epoch_prefix}%{version}-%{release}
994 %endif
995 %description -n librbd1
996 RBD is a block device striped across multiple distributed objects in
997 RADOS, a reliable, autonomic distributed object storage cluster
998 developed as part of the Ceph distributed storage system. This is a
999 shared library allowing applications to manage these block devices.
1000
1001 %package -n librbd-devel
1002 Summary:        RADOS block device headers
1003 %if 0%{?suse_version}
1004 Group:          Development/Libraries/C and C++
1005 %endif
1006 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
1007 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
1008 Requires:       libradospp-devel = %{_epoch_prefix}%{version}-%{release}
1009 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
1010 Provides:       librbd1-devel = %{_epoch_prefix}%{version}-%{release}
1011 Obsoletes:      librbd1-devel < %{_epoch_prefix}%{version}-%{release}
1012 %description -n librbd-devel
1013 This package contains libraries and headers needed to develop programs
1014 that use RADOS block device.
1015
1016 %package -n python%{python3_pkgversion}-rbd
1017 Summary:        Python 3 libraries for the RADOS block device
1018 %if 0%{?suse_version}
1019 Group:          Development/Libraries/Python
1020 %endif
1021 Requires:       librbd1 = %{_epoch_prefix}%{version}-%{release}
1022 Requires:       python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
1023 %{?python_provide:%python_provide python%{python3_pkgversion}-rbd}
1024 Provides:       python-rbd = %{_epoch_prefix}%{version}-%{release}
1025 Obsoletes:      python-rbd < %{_epoch_prefix}%{version}-%{release}
1026 %description -n python%{python3_pkgversion}-rbd
1027 This package contains Python 3 libraries for interacting with Ceph RADOS
1028 block device.
1029
1030 %package -n libcephfs2
1031 Summary:        Ceph distributed file system client library
1032 %if 0%{?suse_version}
1033 Group:          System/Libraries
1034 %endif
1035 Obsoletes:      libcephfs1 < %{_epoch_prefix}%{version}-%{release}
1036 %if 0%{?rhel} || 0%{?fedora}
1037 Obsoletes:      ceph-libs < %{_epoch_prefix}%{version}-%{release}
1038 Obsoletes:      ceph-libcephfs
1039 %endif
1040 %description -n libcephfs2
1041 Ceph is a distributed network file system designed to provide excellent
1042 performance, reliability, and scalability. This is a shared library
1043 allowing applications to access a Ceph distributed file system via a
1044 POSIX-like interface.
1045
1046 %package -n libcephfs-devel
1047 Summary:        Ceph distributed file system headers
1048 %if 0%{?suse_version}
1049 Group:          Development/Libraries/C and C++
1050 %endif
1051 Requires:       libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1052 Requires:       librados-devel = %{_epoch_prefix}%{version}-%{release}
1053 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
1054 Provides:       libcephfs2-devel = %{_epoch_prefix}%{version}-%{release}
1055 Obsoletes:      libcephfs2-devel < %{_epoch_prefix}%{version}-%{release}
1056 %description -n libcephfs-devel
1057 This package contains libraries and headers needed to develop programs
1058 that use Ceph distributed file system.
1059
1060 %if 0%{with jaeger}
1061 %package -n libjaeger
1062 Summary:        Ceph distributed file system tracing library
1063 %if 0%{?suse_version}
1064 Group:          System/Libraries
1065 %endif
1066 Provides:       libjaegertracing.so.0()(64bit)
1067 Provides:       libopentracing.so.1()(64bit)
1068 Provides:       libthrift.so.0.13.0()(64bit)
1069 %description -n libjaeger
1070 This package contains libraries needed to provide distributed
1071 tracing for Ceph.
1072 %endif
1073
1074 %package -n python%{python3_pkgversion}-cephfs
1075 Summary:        Python 3 libraries for Ceph distributed file system
1076 %if 0%{?suse_version}
1077 Group:          Development/Libraries/Python
1078 %endif
1079 Requires:       libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1080 Requires:       python%{python3_pkgversion}-rados = %{_epoch_prefix}%{version}-%{release}
1081 Requires:       python%{python3_pkgversion}-ceph-argparse = %{_epoch_prefix}%{version}-%{release}
1082 %{?python_provide:%python_provide python%{python3_pkgversion}-cephfs}
1083 Provides:       python-cephfs = %{_epoch_prefix}%{version}-%{release}
1084 Obsoletes:      python-cephfs < %{_epoch_prefix}%{version}-%{release}
1085 %description -n python%{python3_pkgversion}-cephfs
1086 This package contains Python 3 libraries for interacting with Ceph distributed
1087 file system.
1088
1089 %package -n python%{python3_pkgversion}-ceph-argparse
1090 Summary:        Python 3 utility libraries for Ceph CLI
1091 %if 0%{?suse_version}
1092 Group:          Development/Libraries/Python
1093 %endif
1094 %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-argparse}
1095 %description -n python%{python3_pkgversion}-ceph-argparse
1096 This package contains types and routines for Python 3 used by the Ceph CLI as
1097 well as the RESTful interface. These have to do with querying the daemons for
1098 command-description information, validating user command input against those
1099 descriptions, and submitting the command to the appropriate daemon.
1100
1101 %package -n python%{python3_pkgversion}-ceph-common
1102 Summary:        Python 3 utility libraries for Ceph
1103 %if 0%{?fedora} || 0%{?rhel} >= 8
1104 Requires:       python%{python3_pkgversion}-pyyaml
1105 %endif
1106 %if 0%{?suse_version}
1107 Requires:       python%{python3_pkgversion}-PyYAML
1108 %endif
1109 %if 0%{?suse_version}
1110 Group:          Development/Libraries/Python
1111 %endif
1112 %{?python_provide:%python_provide python%{python3_pkgversion}-ceph-common}
1113 %description -n python%{python3_pkgversion}-ceph-common
1114 This package contains data structures, classes and functions used by Ceph.
1115 It also contains utilities used for the cephadm orchestrator.
1116
1117 %if 0%{with cephfs_shell}
1118 %package -n cephfs-shell
1119 Summary:    Interactive shell for Ceph file system
1120 Requires:   python%{python3_pkgversion}-cmd2
1121 Requires:   python%{python3_pkgversion}-colorama
1122 Requires:   python%{python3_pkgversion}-cephfs
1123 %description -n cephfs-shell
1124 This package contains an interactive tool that allows accessing a Ceph
1125 file system without mounting it  by providing a nice pseudo-shell which
1126 works like an FTP client.
1127 %endif
1128
1129 %if 0%{with ceph_test_package}
1130 %package -n ceph-test
1131 Summary:        Ceph benchmarks and test tools
1132 %if 0%{?suse_version}
1133 Group:          System/Benchmark
1134 %endif
1135 Requires:       ceph-common = %{_epoch_prefix}%{version}-%{release}
1136 Requires:       xmlstarlet
1137 Requires:       jq
1138 Requires:       socat
1139 %description -n ceph-test
1140 This package contains Ceph benchmarks and test tools.
1141 %endif
1142
1143 %if 0%{with cephfs_java}
1144
1145 %package -n libcephfs_jni1
1146 Summary:        Java Native Interface library for CephFS Java bindings
1147 %if 0%{?suse_version}
1148 Group:          System/Libraries
1149 %endif
1150 Requires:       java
1151 Requires:       libcephfs2 = %{_epoch_prefix}%{version}-%{release}
1152 %description -n libcephfs_jni1
1153 This package contains the Java Native Interface library for CephFS Java
1154 bindings.
1155
1156 %package -n libcephfs_jni-devel
1157 Summary:        Development files for CephFS Java Native Interface library
1158 %if 0%{?suse_version}
1159 Group:          Development/Libraries/Java
1160 %endif
1161 Requires:       java
1162 Requires:       libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
1163 Obsoletes:      ceph-devel < %{_epoch_prefix}%{version}-%{release}
1164 Provides:       libcephfs_jni1-devel = %{_epoch_prefix}%{version}-%{release}
1165 Obsoletes:      libcephfs_jni1-devel < %{_epoch_prefix}%{version}-%{release}
1166 %description -n libcephfs_jni-devel
1167 This package contains the development files for CephFS Java Native Interface
1168 library.
1169
1170 %package -n cephfs-java
1171 Summary:        Java libraries for the Ceph File System
1172 %if 0%{?suse_version}
1173 Group:          System/Libraries
1174 %endif
1175 Requires:       java
1176 Requires:       libcephfs_jni1 = %{_epoch_prefix}%{version}-%{release}
1177 Requires:       junit
1178 BuildRequires:  junit
1179 %description -n cephfs-java
1180 This package contains the Java libraries for the Ceph File System.
1181
1182 %endif
1183
1184 %package -n rados-objclass-devel
1185 Summary:        RADOS object class development kit
1186 %if 0%{?suse_version}
1187 Group:          Development/Libraries/C and C++
1188 %endif
1189 Requires:       libradospp-devel = %{_epoch_prefix}%{version}-%{release}
1190 %description -n rados-objclass-devel
1191 This package contains libraries and headers needed to develop RADOS object
1192 class plugins.
1193
1194 %if 0%{with selinux}
1195
1196 %package selinux
1197 Summary:        SELinux support for Ceph MON, OSD and MDS
1198 %if 0%{?suse_version}
1199 Group:          System/Filesystems
1200 %endif
1201 Requires:       ceph-base = %{_epoch_prefix}%{version}-%{release}
1202 Requires:       policycoreutils, libselinux-utils
1203 Requires(post): ceph-base = %{_epoch_prefix}%{version}-%{release}
1204 Requires(post): selinux-policy-base >= %{_selinux_policy_version}, policycoreutils, gawk
1205 Requires(postun): policycoreutils
1206 %description selinux
1207 This package contains SELinux support for Ceph MON, OSD and MDS. The package
1208 also performs file-system relabelling which can take a long time on heavily
1209 populated file-systems.
1210
1211 %endif
1212
1213 %package grafana-dashboards
1214 Summary:        The set of Grafana dashboards for monitoring purposes
1215 BuildArch:      noarch
1216 %if 0%{?suse_version}
1217 Group:          System/Filesystems
1218 %endif
1219 %description grafana-dashboards
1220 This package provides a set of Grafana dashboards for monitoring of
1221 Ceph clusters. The dashboards require a Prometheus server setup
1222 collecting data from Ceph Manager "prometheus" module and Prometheus
1223 project "node_exporter" module. The dashboards are designed to be
1224 integrated with the Ceph Manager Dashboard web UI.
1225
1226 %package prometheus-alerts
1227 Summary:        Prometheus alerts for a Ceph deployment
1228 BuildArch:      noarch
1229 Group:          System/Monitoring
1230 %description prometheus-alerts
1231 This package provides Ceph default alerts for Prometheus.
1232
1233 #################################################################################
1234 # common
1235 #################################################################################
1236 %prep
1237 %autosetup -p1 -n @TARBALL_BASENAME@
1238
1239 %build
1240 # LTO can be enabled as soon as the following GCC bug is fixed:
1241 # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=48200
1242 %define _lto_cflags %{nil}
1243
1244 %if 0%{with seastar} && 0%{?rhel}
1245 . /opt/rh/gcc-toolset-9/enable
1246 %endif
1247
1248 %if 0%{with cephfs_java}
1249 # Find jni.h
1250 for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do
1251     [ -d $i ] && java_inc="$java_inc -I$i"
1252 done
1253 %endif
1254
1255 %if 0%{?suse_version}
1256 %limit_build -m 2600
1257 %endif
1258
1259 export CPPFLAGS="$java_inc"
1260 export CFLAGS="$RPM_OPT_FLAGS"
1261 export CXXFLAGS="$RPM_OPT_FLAGS"
1262 export LDFLAGS="$RPM_LD_FLAGS"
1263
1264 %if 0%{with seastar}
1265 # seastar uses longjmp() to implement coroutine. and this annoys longjmp_chk()
1266 export CXXFLAGS=$(echo $RPM_OPT_FLAGS | sed -e 's/-Wp,-D_FORTIFY_SOURCE=2//g')
1267 %endif
1268
1269 env | sort
1270
1271 %{?!_vpath_builddir:%global _vpath_builddir %{_target_platform}}
1272
1273 # TODO: drop this step once we can use `cmake -B`
1274 mkdir -p %{_vpath_builddir}
1275 pushd %{_vpath_builddir}
1276 cmake .. \
1277     -DCMAKE_INSTALL_PREFIX=%{_prefix} \
1278     -DCMAKE_INSTALL_LIBDIR:PATH=%{_libdir} \
1279     -DCMAKE_INSTALL_LIBEXECDIR:PATH=%{_libexecdir} \
1280     -DCMAKE_INSTALL_LOCALSTATEDIR:PATH=%{_localstatedir} \
1281     -DCMAKE_INSTALL_SYSCONFDIR:PATH=%{_sysconfdir} \
1282     -DCMAKE_INSTALL_MANDIR:PATH=%{_mandir} \
1283     -DCMAKE_INSTALL_DOCDIR:PATH=%{_docdir}/ceph \
1284     -DCMAKE_INSTALL_INCLUDEDIR:PATH=%{_includedir} \
1285     -DCMAKE_INSTALL_SYSTEMD_SERVICEDIR:PATH=%{_unitdir} \
1286     -DWITH_MANPAGE:BOOL=ON \
1287     -DWITH_PYTHON3:STRING=%{python3_version} \
1288     -DWITH_MGR_DASHBOARD_FRONTEND:BOOL=OFF \
1289 %if 0%{without ceph_test_package}
1290     -DWITH_TESTS:BOOL=OFF \
1291 %endif
1292 %if 0%{with cephfs_java}
1293     -DWITH_CEPHFS_JAVA:BOOL=ON \
1294 %endif
1295 %if 0%{with selinux}
1296     -DWITH_SELINUX:BOOL=ON \
1297 %endif
1298 %if %{with lttng}
1299     -DWITH_LTTNG:BOOL=ON \
1300     -DWITH_BABELTRACE:BOOL=ON \
1301 %else
1302     -DWITH_LTTNG:BOOL=OFF \
1303     -DWITH_BABELTRACE:BOOL=OFF \
1304 %endif
1305     $CEPH_EXTRA_CMAKE_ARGS \
1306 %if 0%{with ocf}
1307     -DWITH_OCF:BOOL=ON \
1308 %endif
1309 %if 0%{with cephfs_shell}
1310     -DWITH_CEPHFS_SHELL:BOOL=ON \
1311 %endif
1312 %if 0%{with libradosstriper}
1313     -DWITH_LIBRADOSSTRIPER:BOOL=ON \
1314 %else
1315     -DWITH_LIBRADOSSTRIPER:BOOL=OFF \
1316 %endif
1317 %if 0%{with amqp_endpoint}
1318     -DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=ON \
1319 %else
1320     -DWITH_RADOSGW_AMQP_ENDPOINT:BOOL=OFF \
1321 %endif
1322 %if 0%{with kafka_endpoint}
1323     -DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=ON \
1324 %else
1325     -DWITH_RADOSGW_KAFKA_ENDPOINT:BOOL=OFF \
1326 %endif
1327 %if 0%{without lua_packages}
1328     -DWITH_RADOSGW_LUA_PACKAGES:BOOL=OFF \
1329 %endif
1330 %if 0%{with zbd}
1331     -DWITH_ZBD:BOOL=ON \
1332 %endif
1333 %if 0%{with cmake_verbose_logging}
1334     -DCMAKE_VERBOSE_MAKEFILE:BOOL=ON \
1335 %endif
1336 %if 0%{with rbd_rwl_cache}
1337     -DWITH_RBD_RWL:BOOL=ON \
1338 %endif
1339 %if 0%{with rbd_ssd_cache}
1340     -DWITH_RBD_SSD_CACHE:BOOL=ON \
1341 %endif
1342 %if 0%{with system_pmdk}
1343     -DWITH_SYSTEM_PMDK:BOOL=ON \
1344 %endif
1345 %if 0%{?suse_version}
1346     -DBOOST_J:STRING=%{jobs} \
1347 %else
1348     -DBOOST_J:STRING=%{_smp_build_ncpus} \
1349 %endif
1350     -DWITH_GRAFANA:BOOL=ON
1351
1352 %if %{with cmake_verbose_logging}
1353 cat ./CMakeFiles/CMakeOutput.log
1354 cat ./CMakeFiles/CMakeError.log
1355 %endif
1356
1357 %if 0%{?suse_version}
1358 make %{_smp_mflags}
1359 %else
1360 %make_build
1361 %endif
1362
1363 popd
1364
1365 %if 0%{with make_check}
1366 %check
1367 # run in-tree unittests
1368 pushd %{_vpath_builddir}
1369 ctest %{_smp_mflags}
1370 popd
1371 %endif
1372
1373
1374 %install
1375 pushd %{_vpath_builddir}
1376 %make_install
1377 # we have dropped sysvinit bits
1378 rm -f %{buildroot}/%{_sysconfdir}/init.d/ceph
1379 popd
1380
1381 %if 0%{with seastar}
1382 # package crimson-osd with the name of ceph-osd
1383 install -m 0755 %{buildroot}%{_bindir}/crimson-osd %{buildroot}%{_bindir}/ceph-osd
1384 %endif
1385
1386 install -m 0644 -D src/etc-rbdmap %{buildroot}%{_sysconfdir}/ceph/rbdmap
1387 %if 0%{?fedora} || 0%{?rhel}
1388 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_sysconfdir}/sysconfig/ceph
1389 %endif
1390 %if 0%{?suse_version}
1391 install -m 0644 -D etc/sysconfig/ceph %{buildroot}%{_fillupdir}/sysconfig.%{name}
1392 %endif
1393 install -m 0644 -D systemd/ceph.tmpfiles.d %{buildroot}%{_tmpfilesdir}/ceph-common.conf
1394 install -m 0644 -D systemd/50-ceph.preset %{buildroot}%{_presetdir}/50-ceph.preset
1395 mkdir -p %{buildroot}%{_sbindir}
1396 install -m 0644 -D src/logrotate.conf %{buildroot}%{_sysconfdir}/logrotate.d/ceph
1397 chmod 0644 %{buildroot}%{_docdir}/ceph/sample.ceph.conf
1398 install -m 0644 -D COPYING %{buildroot}%{_docdir}/ceph/COPYING
1399 install -m 0644 -D etc/sysctl/90-ceph-osd.conf %{buildroot}%{_sysctldir}/90-ceph-osd.conf
1400 install -m 0755 -D src/tools/rbd_nbd/rbd-nbd_quiesce %{buildroot}%{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
1401
1402 install -m 0755 src/cephadm/cephadm %{buildroot}%{_sbindir}/cephadm
1403 mkdir -p %{buildroot}%{_sharedstatedir}/cephadm
1404 chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm
1405 mkdir -p %{buildroot}%{_sharedstatedir}/cephadm/.ssh
1406 chmod 0700 %{buildroot}%{_sharedstatedir}/cephadm/.ssh
1407 touch %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
1408 chmod 0600 %{buildroot}%{_sharedstatedir}/cephadm/.ssh/authorized_keys
1409
1410 # firewall templates and /sbin/mount.ceph symlink
1411 %if 0%{?suse_version} && !0%{?usrmerged}
1412 mkdir -p %{buildroot}/sbin
1413 ln -sf %{_sbindir}/mount.ceph %{buildroot}/sbin/mount.ceph
1414 %endif
1415
1416 # udev rules
1417 install -m 0644 -D udev/50-rbd.rules %{buildroot}%{_udevrulesdir}/50-rbd.rules
1418
1419 # sudoers.d
1420 install -m 0440 -D sudoers.d/ceph-osd-smartctl %{buildroot}%{_sysconfdir}/sudoers.d/ceph-osd-smartctl
1421
1422 %if 0%{?rhel} >= 8
1423 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_bindir}/*
1424 pathfix.py -pni "%{__python3} %{py3_shbang_opts}" %{buildroot}%{_sbindir}/*
1425 %endif
1426
1427 #set up placeholder directories
1428 mkdir -p %{buildroot}%{_sysconfdir}/ceph
1429 mkdir -p %{buildroot}%{_localstatedir}/run/ceph
1430 mkdir -p %{buildroot}%{_localstatedir}/log/ceph
1431 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/tmp
1432 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mon
1433 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/osd
1434 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mds
1435 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/mgr
1436 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash
1437 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/crash/posted
1438 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/radosgw
1439 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-osd
1440 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mds
1441 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rgw
1442 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-mgr
1443 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd
1444 mkdir -p %{buildroot}%{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
1445
1446 # prometheus alerts
1447 install -m 644 -D monitoring/prometheus/alerts/ceph_default_alerts.yml %{buildroot}/etc/prometheus/ceph/ceph_default_alerts.yml
1448
1449 %if 0%{?suse_version}
1450 # create __pycache__ directories and their contents
1451 %py3_compile %{buildroot}%{python3_sitelib}
1452 # hardlink duplicate files under /usr to save space
1453 %fdupes %{buildroot}%{_prefix}
1454 %endif
1455
1456 %if 0%{?rhel} == 8
1457 %py_byte_compile %{__python3} %{buildroot}%{python3_sitelib}
1458 %endif
1459
1460 %clean
1461 rm -rf %{buildroot}
1462
1463 #################################################################################
1464 # files and systemd scriptlets
1465 #################################################################################
1466 %files
1467
1468 %files base
1469 %{_bindir}/ceph-crash
1470 %{_bindir}/crushtool
1471 %{_bindir}/monmaptool
1472 %{_bindir}/osdmaptool
1473 %{_bindir}/ceph-kvstore-tool
1474 %{_bindir}/ceph-run
1475 %{_presetdir}/50-ceph.preset
1476 %{_sbindir}/ceph-create-keys
1477 %dir %{_libexecdir}/ceph
1478 %{_libexecdir}/ceph/ceph_common.sh
1479 %dir %{_libdir}/rados-classes
1480 %{_libdir}/rados-classes/*
1481 %dir %{_libdir}/ceph
1482 %dir %{_libdir}/ceph/erasure-code
1483 %{_libdir}/ceph/erasure-code/libec_*.so*
1484 %dir %{_libdir}/ceph/compressor
1485 %{_libdir}/ceph/compressor/libceph_*.so*
1486 %{_unitdir}/ceph-crash.service
1487 %dir %{_libdir}/ceph/crypto
1488 %{_libdir}/ceph/crypto/libceph_*.so*
1489 %if %{with lttng}
1490 %{_libdir}/libos_tp.so*
1491 %{_libdir}/libosd_tp.so*
1492 %endif
1493 %config(noreplace) %{_sysconfdir}/logrotate.d/ceph
1494 %if 0%{?fedora} || 0%{?rhel}
1495 %config(noreplace) %{_sysconfdir}/sysconfig/ceph
1496 %endif
1497 %if 0%{?suse_version}
1498 %{_fillupdir}/sysconfig.*
1499 %endif
1500 %{_unitdir}/ceph.target
1501 %dir %{python3_sitelib}/ceph_volume
1502 %{python3_sitelib}/ceph_volume/*
1503 %{python3_sitelib}/ceph_volume-*
1504 %{_mandir}/man8/ceph-deploy.8*
1505 %{_mandir}/man8/ceph-create-keys.8*
1506 %{_mandir}/man8/ceph-run.8*
1507 %{_mandir}/man8/crushtool.8*
1508 %{_mandir}/man8/osdmaptool.8*
1509 %{_mandir}/man8/monmaptool.8*
1510 %{_mandir}/man8/ceph-kvstore-tool.8*
1511 #set up placeholder directories
1512 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash
1513 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/crash/posted
1514 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/tmp
1515 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-osd
1516 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mds
1517 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rgw
1518 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-mgr
1519 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd
1520 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/bootstrap-rbd-mirror
1521
1522 %post base
1523 /sbin/ldconfig
1524 %if 0%{?suse_version}
1525 %fillup_only
1526 if [ $1 -eq 1 ] ; then
1527 /usr/bin/systemctl preset ceph.target ceph-crash.service >/dev/null 2>&1 || :
1528 fi
1529 %endif
1530 %if 0%{?fedora} || 0%{?rhel}
1531 %systemd_post ceph.target ceph-crash.service
1532 %endif
1533 if [ $1 -eq 1 ] ; then
1534 /usr/bin/systemctl start ceph.target ceph-crash.service >/dev/null 2>&1 || :
1535 fi
1536
1537 %preun base
1538 %if 0%{?suse_version}
1539 %service_del_preun ceph.target ceph-crash.service
1540 %endif
1541 %if 0%{?fedora} || 0%{?rhel}
1542 %systemd_preun ceph.target ceph-crash.service
1543 %endif
1544
1545 %postun base
1546 /sbin/ldconfig
1547 %systemd_postun ceph.target
1548
1549 %pre -n cephadm
1550 getent group cephadm >/dev/null || groupadd -r cephadm
1551 getent passwd cephadm >/dev/null || useradd -r -g cephadm -s /bin/bash -c "cephadm user for mgr/cephadm" -d %{_sharedstatedir}/cephadm cephadm
1552 exit 0
1553
1554 %if ! 0%{?suse_version}
1555 %postun -n cephadm
1556 userdel -r cephadm || true
1557 exit 0
1558 %endif
1559
1560 %files -n cephadm
1561 %{_sbindir}/cephadm
1562 %{_mandir}/man8/cephadm.8*
1563 %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm
1564 %attr(0700,cephadm,cephadm) %dir %{_sharedstatedir}/cephadm/.ssh
1565 %attr(0600,cephadm,cephadm) %{_sharedstatedir}/cephadm/.ssh/authorized_keys
1566
1567 %files common
1568 %dir %{_docdir}/ceph
1569 %doc %{_docdir}/ceph/sample.ceph.conf
1570 %license %{_docdir}/ceph/COPYING
1571 %{_bindir}/ceph
1572 %{_bindir}/ceph-authtool
1573 %{_bindir}/ceph-conf
1574 %{_bindir}/ceph-dencoder
1575 %{_bindir}/ceph-rbdnamer
1576 %{_bindir}/ceph-syn
1577 %{_bindir}/cephfs-data-scan
1578 %{_bindir}/cephfs-journal-tool
1579 %{_bindir}/cephfs-table-tool
1580 %{_bindir}/rados
1581 %{_bindir}/radosgw-admin
1582 %{_bindir}/rbd
1583 %{_bindir}/rbd-replay
1584 %{_bindir}/rbd-replay-many
1585 %{_bindir}/rbdmap
1586 %{_sbindir}/mount.ceph
1587 %if 0%{?suse_version} && !0%{?usrmerged}
1588 /sbin/mount.ceph
1589 %endif
1590 %if %{with lttng}
1591 %{_bindir}/rbd-replay-prep
1592 %endif
1593 %{_bindir}/ceph-post-file
1594 %dir %{_libdir}/ceph/denc
1595 %{_libdir}/ceph/denc/denc-mod-*.so
1596 %{_tmpfilesdir}/ceph-common.conf
1597 %{_mandir}/man8/ceph-authtool.8*
1598 %{_mandir}/man8/ceph-conf.8*
1599 %{_mandir}/man8/ceph-dencoder.8*
1600 %{_mandir}/man8/ceph-diff-sorted.8*
1601 %{_mandir}/man8/ceph-rbdnamer.8*
1602 %{_mandir}/man8/ceph-syn.8*
1603 %{_mandir}/man8/ceph-post-file.8*
1604 %{_mandir}/man8/ceph.8*
1605 %{_mandir}/man8/mount.ceph.8*
1606 %{_mandir}/man8/rados.8*
1607 %{_mandir}/man8/radosgw-admin.8*
1608 %{_mandir}/man8/rbd.8*
1609 %{_mandir}/man8/rbdmap.8*
1610 %{_mandir}/man8/rbd-replay.8*
1611 %{_mandir}/man8/rbd-replay-many.8*
1612 %{_mandir}/man8/rbd-replay-prep.8*
1613 %{_mandir}/man8/rgw-orphan-list.8*
1614 %dir %{_datadir}/ceph/
1615 %{_datadir}/ceph/known_hosts_drop.ceph.com
1616 %{_datadir}/ceph/id_rsa_drop.ceph.com
1617 %{_datadir}/ceph/id_rsa_drop.ceph.com.pub
1618 %dir %{_sysconfdir}/ceph/
1619 %config %{_sysconfdir}/bash_completion.d/ceph
1620 %config %{_sysconfdir}/bash_completion.d/rados
1621 %config %{_sysconfdir}/bash_completion.d/rbd
1622 %config %{_sysconfdir}/bash_completion.d/radosgw-admin
1623 %config(noreplace) %{_sysconfdir}/ceph/rbdmap
1624 %{_unitdir}/rbdmap.service
1625 %dir %{_udevrulesdir}
1626 %{_udevrulesdir}/50-rbd.rules
1627 %attr(3770,ceph,ceph) %dir %{_localstatedir}/log/ceph/
1628 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/
1629
1630 %pre common
1631 CEPH_GROUP_ID=167
1632 CEPH_USER_ID=167
1633 %if 0%{?rhel} || 0%{?fedora}
1634 /usr/sbin/groupadd ceph -g $CEPH_GROUP_ID -o -r 2>/dev/null || :
1635 /usr/sbin/useradd ceph -u $CEPH_USER_ID -o -r -g ceph -s /sbin/nologin -c "Ceph daemons" -d %{_localstatedir}/lib/ceph 2>/dev/null || :
1636 %endif
1637 %if 0%{?suse_version}
1638 if ! getent group ceph >/dev/null ; then
1639     CEPH_GROUP_ID_OPTION=""
1640     getent group $CEPH_GROUP_ID >/dev/null || CEPH_GROUP_ID_OPTION="-g $CEPH_GROUP_ID"
1641     groupadd ceph $CEPH_GROUP_ID_OPTION -r 2>/dev/null || :
1642 fi
1643 if ! getent passwd ceph >/dev/null ; then
1644     CEPH_USER_ID_OPTION=""
1645     getent passwd $CEPH_USER_ID >/dev/null || CEPH_USER_ID_OPTION="-u $CEPH_USER_ID"
1646     useradd ceph $CEPH_USER_ID_OPTION -r -g ceph -s /sbin/nologin 2>/dev/null || :
1647 fi
1648 usermod -c "Ceph storage service" \
1649         -d %{_localstatedir}/lib/ceph \
1650         -g ceph \
1651         -s /sbin/nologin \
1652         ceph
1653 %endif
1654 exit 0
1655
1656 %post common
1657 %tmpfiles_create %{_tmpfilesdir}/ceph-common.conf
1658
1659 %postun common
1660 # Package removal cleanup
1661 if [ "$1" -eq "0" ] ; then
1662     rm -rf %{_localstatedir}/log/ceph
1663     rm -rf %{_sysconfdir}/ceph
1664 fi
1665
1666 %files mds
1667 %{_bindir}/ceph-mds
1668 %{_mandir}/man8/ceph-mds.8*
1669 %{_unitdir}/ceph-mds@.service
1670 %{_unitdir}/ceph-mds.target
1671 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mds
1672
1673 %post mds
1674 %if 0%{?suse_version}
1675 if [ $1 -eq 1 ] ; then
1676   /usr/bin/systemctl preset ceph-mds@\*.service ceph-mds.target >/dev/null 2>&1 || :
1677 fi
1678 %endif
1679 %if 0%{?fedora} || 0%{?rhel}
1680 %systemd_post ceph-mds@\*.service ceph-mds.target
1681 %endif
1682 if [ $1 -eq 1 ] ; then
1683 /usr/bin/systemctl start ceph-mds.target >/dev/null 2>&1 || :
1684 fi
1685
1686 %preun mds
1687 %if 0%{?suse_version}
1688 %service_del_preun ceph-mds@\*.service ceph-mds.target
1689 %endif
1690 %if 0%{?fedora} || 0%{?rhel}
1691 %systemd_preun ceph-mds@\*.service ceph-mds.target
1692 %endif
1693
1694 %postun mds
1695 %systemd_postun ceph-mds@\*.service ceph-mds.target
1696 if [ $1 -ge 1 ] ; then
1697   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1698   # "yes". In any case: if units are not running, do not touch them.
1699   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1700   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1701     source $SYSCONF_CEPH
1702   fi
1703   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1704     /usr/bin/systemctl try-restart ceph-mds@\*.service > /dev/null 2>&1 || :
1705   fi
1706 fi
1707
1708 %files mgr
1709 %{_bindir}/ceph-mgr
1710 %dir %{_datadir}/ceph/mgr
1711 %{_datadir}/ceph/mgr/mgr_module.*
1712 %{_datadir}/ceph/mgr/mgr_util.*
1713 %{_unitdir}/ceph-mgr@.service
1714 %{_unitdir}/ceph-mgr.target
1715 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mgr
1716
1717 %post mgr
1718 %if 0%{?suse_version}
1719 if [ $1 -eq 1 ] ; then
1720   /usr/bin/systemctl preset ceph-mgr@\*.service ceph-mgr.target >/dev/null 2>&1 || :
1721 fi
1722 %endif
1723 %if 0%{?fedora} || 0%{?rhel}
1724 %systemd_post ceph-mgr@\*.service ceph-mgr.target
1725 %endif
1726 if [ $1 -eq 1 ] ; then
1727 /usr/bin/systemctl start ceph-mgr.target >/dev/null 2>&1 || :
1728 fi
1729
1730 %preun mgr
1731 %if 0%{?suse_version}
1732 %service_del_preun ceph-mgr@\*.service ceph-mgr.target
1733 %endif
1734 %if 0%{?fedora} || 0%{?rhel}
1735 %systemd_preun ceph-mgr@\*.service ceph-mgr.target
1736 %endif
1737
1738 %postun mgr
1739 %systemd_postun ceph-mgr@\*.service ceph-mgr.target
1740 if [ $1 -ge 1 ] ; then
1741   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1742   # "yes". In any case: if units are not running, do not touch them.
1743   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1744   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1745     source $SYSCONF_CEPH
1746   fi
1747   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1748     /usr/bin/systemctl try-restart ceph-mgr@\*.service > /dev/null 2>&1 || :
1749   fi
1750 fi
1751
1752 %files mgr-dashboard
1753 %{_datadir}/ceph/mgr/dashboard
1754
1755 %post mgr-dashboard
1756 if [ $1 -eq 1 ] ; then
1757     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1758 fi
1759
1760 %postun mgr-dashboard
1761 if [ $1 -eq 1 ] ; then
1762     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1763 fi
1764
1765 %files mgr-diskprediction-local
1766 %{_datadir}/ceph/mgr/diskprediction_local
1767
1768 %post mgr-diskprediction-local
1769 if [ $1 -eq 1 ] ; then
1770     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1771 fi
1772
1773 %postun mgr-diskprediction-local
1774 if [ $1 -eq 1 ] ; then
1775     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1776 fi
1777
1778 %files mgr-modules-core
1779 %dir %{_datadir}/ceph/mgr
1780 %{_datadir}/ceph/mgr/alerts
1781 %{_datadir}/ceph/mgr/balancer
1782 %{_datadir}/ceph/mgr/crash
1783 %{_datadir}/ceph/mgr/devicehealth
1784 %{_datadir}/ceph/mgr/influx
1785 %{_datadir}/ceph/mgr/insights
1786 %{_datadir}/ceph/mgr/iostat
1787 %{_datadir}/ceph/mgr/localpool
1788 %{_datadir}/ceph/mgr/mds_autoscaler
1789 %{_datadir}/ceph/mgr/mirroring
1790 %{_datadir}/ceph/mgr/nfs
1791 %{_datadir}/ceph/mgr/orchestrator
1792 %{_datadir}/ceph/mgr/osd_perf_query
1793 %{_datadir}/ceph/mgr/osd_support
1794 %{_datadir}/ceph/mgr/pg_autoscaler
1795 %{_datadir}/ceph/mgr/progress
1796 %{_datadir}/ceph/mgr/prometheus
1797 %{_datadir}/ceph/mgr/rbd_support
1798 %{_datadir}/ceph/mgr/restful
1799 %{_datadir}/ceph/mgr/selftest
1800 %{_datadir}/ceph/mgr/snap_schedule
1801 %{_datadir}/ceph/mgr/stats
1802 %{_datadir}/ceph/mgr/status
1803 %{_datadir}/ceph/mgr/telegraf
1804 %{_datadir}/ceph/mgr/telemetry
1805 %{_datadir}/ceph/mgr/test_orchestrator
1806 %{_datadir}/ceph/mgr/volumes
1807 %{_datadir}/ceph/mgr/zabbix
1808
1809 %files mgr-rook
1810 %{_datadir}/ceph/mgr/rook
1811
1812 %post mgr-rook
1813 if [ $1 -eq 1 ] ; then
1814     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1815 fi
1816
1817 %postun mgr-rook
1818 if [ $1 -eq 1 ] ; then
1819     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1820 fi
1821
1822 %files mgr-k8sevents
1823 %{_datadir}/ceph/mgr/k8sevents
1824
1825 %post mgr-k8sevents
1826 if [ $1 -eq 1 ] ; then
1827     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1828 fi
1829
1830 %postun mgr-k8sevents
1831 if [ $1 -eq 1 ] ; then
1832     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1833 fi
1834
1835 %files mgr-cephadm
1836 %{_datadir}/ceph/mgr/cephadm
1837
1838 %post mgr-cephadm
1839 if [ $1 -eq 1 ] ; then
1840     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1841 fi
1842
1843 %postun mgr-cephadm
1844 if [ $1 -eq 1 ] ; then
1845     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
1846 fi
1847
1848 %files mon
1849 %{_bindir}/ceph-mon
1850 %{_bindir}/ceph-monstore-tool
1851 %{_mandir}/man8/ceph-mon.8*
1852 %{_unitdir}/ceph-mon@.service
1853 %{_unitdir}/ceph-mon.target
1854 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/mon
1855
1856 %post mon
1857 %if 0%{?suse_version}
1858 if [ $1 -eq 1 ] ; then
1859   /usr/bin/systemctl preset ceph-mon@\*.service ceph-mon.target >/dev/null 2>&1 || :
1860 fi
1861 %endif
1862 %if 0%{?fedora} || 0%{?rhel}
1863 %systemd_post ceph-mon@\*.service ceph-mon.target
1864 %endif
1865 if [ $1 -eq 1 ] ; then
1866 /usr/bin/systemctl start ceph-mon.target >/dev/null 2>&1 || :
1867 fi
1868
1869 %preun mon
1870 %if 0%{?suse_version}
1871 %service_del_preun ceph-mon@\*.service ceph-mon.target
1872 %endif
1873 %if 0%{?fedora} || 0%{?rhel}
1874 %systemd_preun ceph-mon@\*.service ceph-mon.target
1875 %endif
1876
1877 %postun mon
1878 %systemd_postun ceph-mon@\*.service ceph-mon.target
1879 if [ $1 -ge 1 ] ; then
1880   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1881   # "yes". In any case: if units are not running, do not touch them.
1882   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1883   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1884     source $SYSCONF_CEPH
1885   fi
1886   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1887     /usr/bin/systemctl try-restart ceph-mon@\*.service > /dev/null 2>&1 || :
1888   fi
1889 fi
1890
1891 %files fuse
1892 %{_bindir}/ceph-fuse
1893 %{_mandir}/man8/ceph-fuse.8*
1894 %{_sbindir}/mount.fuse.ceph
1895 %{_mandir}/man8/mount.fuse.ceph.8*
1896 %{_unitdir}/ceph-fuse@.service
1897 %{_unitdir}/ceph-fuse.target
1898
1899 %files -n cephfs-mirror
1900 %{_bindir}/cephfs-mirror
1901 %{_mandir}/man8/cephfs-mirror.8*
1902 %{_unitdir}/cephfs-mirror@.service
1903 %{_unitdir}/cephfs-mirror.target
1904
1905 %post -n cephfs-mirror
1906 %if 0%{?suse_version}
1907 if [ $1 -eq 1 ] ; then
1908   /usr/bin/systemctl preset cephfs-mirror@\*.service cephfs-mirror.target >/dev/null 2>&1 || :
1909 fi
1910 %endif
1911 %if 0%{?fedora} || 0%{?rhel}
1912 %systemd_post cephfs-mirror@\*.service cephfs-mirror.target
1913 %endif
1914 if [ $1 -eq 1 ] ; then
1915 /usr/bin/systemctl start cephfs-mirror.target >/dev/null 2>&1 || :
1916 fi
1917
1918 %preun -n cephfs-mirror
1919 %if 0%{?suse_version}
1920 %service_del_preun cephfs-mirror@\*.service cephfs-mirror.target
1921 %endif
1922 %if 0%{?fedora} || 0%{?rhel}
1923 %systemd_preun cephfs-mirror@\*.service cephfs-mirror.target
1924 %endif
1925
1926 %postun -n cephfs-mirror
1927 %systemd_postun cephfs-mirror@\*.service cephfs-mirror.target
1928 if [ $1 -ge 1 ] ; then
1929   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1930   # "yes". In any case: if units are not running, do not touch them.
1931   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1932   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1933     source $SYSCONF_CEPH
1934   fi
1935   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1936     /usr/bin/systemctl try-restart cephfs-mirror@\*.service > /dev/null 2>&1 || :
1937   fi
1938 fi
1939
1940 %files -n rbd-fuse
1941 %{_bindir}/rbd-fuse
1942 %{_mandir}/man8/rbd-fuse.8*
1943
1944 %files -n rbd-mirror
1945 %{_bindir}/rbd-mirror
1946 %{_mandir}/man8/rbd-mirror.8*
1947 %{_unitdir}/ceph-rbd-mirror@.service
1948 %{_unitdir}/ceph-rbd-mirror.target
1949
1950 %post -n rbd-mirror
1951 %if 0%{?suse_version}
1952 if [ $1 -eq 1 ] ; then
1953   /usr/bin/systemctl preset ceph-rbd-mirror@\*.service ceph-rbd-mirror.target >/dev/null 2>&1 || :
1954 fi
1955 %endif
1956 %if 0%{?fedora} || 0%{?rhel}
1957 %systemd_post ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1958 %endif
1959 if [ $1 -eq 1 ] ; then
1960 /usr/bin/systemctl start ceph-rbd-mirror.target >/dev/null 2>&1 || :
1961 fi
1962
1963 %preun -n rbd-mirror
1964 %if 0%{?suse_version}
1965 %service_del_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1966 %endif
1967 %if 0%{?fedora} || 0%{?rhel}
1968 %systemd_preun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1969 %endif
1970
1971 %postun -n rbd-mirror
1972 %systemd_postun ceph-rbd-mirror@\*.service ceph-rbd-mirror.target
1973 if [ $1 -ge 1 ] ; then
1974   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
1975   # "yes". In any case: if units are not running, do not touch them.
1976   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
1977   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
1978     source $SYSCONF_CEPH
1979   fi
1980   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
1981     /usr/bin/systemctl try-restart ceph-rbd-mirror@\*.service > /dev/null 2>&1 || :
1982   fi
1983 fi
1984
1985 %files immutable-object-cache
1986 %{_bindir}/ceph-immutable-object-cache
1987 %{_mandir}/man8/ceph-immutable-object-cache.8*
1988 %{_unitdir}/ceph-immutable-object-cache@.service
1989 %{_unitdir}/ceph-immutable-object-cache.target
1990
1991 %post immutable-object-cache
1992 %if 0%{?suse_version}
1993 if [ $1 -eq 1 ] ; then
1994   /usr/bin/systemctl preset ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target >/dev/null 2>&1 || :
1995 fi
1996 %endif
1997 %if 0%{?fedora} || 0%{?rhel}
1998 %systemd_post ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
1999 %endif
2000 if [ $1 -eq 1 ] ; then
2001 /usr/bin/systemctl start ceph-immutable-object-cache.target >/dev/null 2>&1 || :
2002 fi
2003
2004 %preun immutable-object-cache
2005 %if 0%{?suse_version}
2006 %service_del_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2007 %endif
2008 %if 0%{?fedora} || 0%{?rhel}
2009 %systemd_preun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2010 %endif
2011
2012 %postun immutable-object-cache
2013 test -n "$FIRST_ARG" || FIRST_ARG=$1
2014 %systemd_postun ceph-immutable-object-cache@\*.service ceph-immutable-object-cache.target
2015 if [ $FIRST_ARG -ge 1 ] ; then
2016   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2017   # "yes". In any case: if units are not running, do not touch them.
2018   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2019   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2020     source $SYSCONF_CEPH
2021   fi
2022   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2023     /usr/bin/systemctl try-restart ceph-immutable-object-cache@\*.service > /dev/null 2>&1 || :
2024   fi
2025 fi
2026
2027 %files -n rbd-nbd
2028 %{_bindir}/rbd-nbd
2029 %{_mandir}/man8/rbd-nbd.8*
2030 %dir %{_libexecdir}/rbd-nbd
2031 %{_libexecdir}/rbd-nbd/rbd-nbd_quiesce
2032
2033 %files radosgw
2034 %{_bindir}/ceph-diff-sorted
2035 %{_bindir}/radosgw
2036 %{_bindir}/radosgw-token
2037 %{_bindir}/radosgw-es
2038 %{_bindir}/radosgw-object-expirer
2039 %{_bindir}/rgw-gap-list
2040 %{_bindir}/rgw-gap-list-comparator
2041 %{_bindir}/rgw-orphan-list
2042 %{_libdir}/libradosgw.so*
2043 %{_mandir}/man8/radosgw.8*
2044 %dir %{_localstatedir}/lib/ceph/radosgw
2045 %{_unitdir}/ceph-radosgw@.service
2046 %{_unitdir}/ceph-radosgw.target
2047
2048 %post radosgw
2049 /sbin/ldconfig
2050 %if 0%{?suse_version}
2051 if [ $1 -eq 1 ] ; then
2052   /usr/bin/systemctl preset ceph-radosgw@\*.service ceph-radosgw.target >/dev/null 2>&1 || :
2053 fi
2054 %endif
2055 %if 0%{?fedora} || 0%{?rhel}
2056 %systemd_post ceph-radosgw@\*.service ceph-radosgw.target
2057 %endif
2058 if [ $1 -eq 1 ] ; then
2059 /usr/bin/systemctl start ceph-radosgw.target >/dev/null 2>&1 || :
2060 fi
2061
2062 %preun radosgw
2063 %if 0%{?suse_version}
2064 %service_del_preun ceph-radosgw@\*.service ceph-radosgw.target
2065 %endif
2066 %if 0%{?fedora} || 0%{?rhel}
2067 %systemd_preun ceph-radosgw@\*.service ceph-radosgw.target
2068 %endif
2069
2070 %postun radosgw
2071 /sbin/ldconfig
2072 %systemd_postun ceph-radosgw@\*.service ceph-radosgw.target
2073 if [ $1 -ge 1 ] ; then
2074   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2075   # "yes". In any case: if units are not running, do not touch them.
2076   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2077   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2078     source $SYSCONF_CEPH
2079   fi
2080   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2081     /usr/bin/systemctl try-restart ceph-radosgw@\*.service > /dev/null 2>&1 || :
2082   fi
2083 fi
2084
2085 %files osd
2086 %{_bindir}/ceph-clsinfo
2087 %{_bindir}/ceph-bluestore-tool
2088 %{_bindir}/ceph-erasure-code-tool
2089 %{_bindir}/ceph-objectstore-tool
2090 %{_bindir}/ceph-osdomap-tool
2091 %{_bindir}/ceph-osd
2092 %{_libexecdir}/ceph/ceph-osd-prestart.sh
2093 %{_sbindir}/ceph-volume
2094 %{_sbindir}/ceph-volume-systemd
2095 %{_mandir}/man8/ceph-clsinfo.8*
2096 %{_mandir}/man8/ceph-osd.8*
2097 %{_mandir}/man8/ceph-bluestore-tool.8*
2098 %{_mandir}/man8/ceph-volume.8*
2099 %{_mandir}/man8/ceph-volume-systemd.8*
2100 %{_unitdir}/ceph-osd@.service
2101 %{_unitdir}/ceph-osd.target
2102 %{_unitdir}/ceph-volume@.service
2103 %attr(750,ceph,ceph) %dir %{_localstatedir}/lib/ceph/osd
2104 %config(noreplace) %{_sysctldir}/90-ceph-osd.conf
2105 %{_sysconfdir}/sudoers.d/ceph-osd-smartctl
2106
2107 %post osd
2108 %if 0%{?suse_version}
2109 if [ $1 -eq 1 ] ; then
2110   /usr/bin/systemctl preset ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target >/dev/null 2>&1 || :
2111 fi
2112 %endif
2113 %if 0%{?fedora} || 0%{?rhel}
2114 %systemd_post ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2115 %endif
2116 if [ $1 -eq 1 ] ; then
2117 /usr/bin/systemctl start ceph-osd.target >/dev/null 2>&1 || :
2118 fi
2119 %if 0%{?sysctl_apply}
2120     %sysctl_apply 90-ceph-osd.conf
2121 %else
2122     /usr/lib/systemd/systemd-sysctl %{_sysctldir}/90-ceph-osd.conf > /dev/null 2>&1 || :
2123 %endif
2124
2125 %preun osd
2126 %if 0%{?suse_version}
2127 %service_del_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2128 %endif
2129 %if 0%{?fedora} || 0%{?rhel}
2130 %systemd_preun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2131 %endif
2132
2133 %postun osd
2134 %systemd_postun ceph-osd@\*.service ceph-volume@\*.service ceph-osd.target
2135 if [ $1 -ge 1 ] ; then
2136   # Restart on upgrade, but only if "CEPH_AUTO_RESTART_ON_UPGRADE" is set to
2137   # "yes". In any case: if units are not running, do not touch them.
2138   SYSCONF_CEPH=%{_sysconfdir}/sysconfig/ceph
2139   if [ -f $SYSCONF_CEPH -a -r $SYSCONF_CEPH ] ; then
2140     source $SYSCONF_CEPH
2141   fi
2142   if [ "X$CEPH_AUTO_RESTART_ON_UPGRADE" = "Xyes" ] ; then
2143     /usr/bin/systemctl try-restart ceph-osd@\*.service ceph-volume@\*.service > /dev/null 2>&1 || :
2144   fi
2145 fi
2146
2147 %if 0%{with seastar}
2148 %files crimson-osd
2149 %{_bindir}/crimson-osd
2150 %endif
2151
2152 %if %{with ocf}
2153
2154 %files resource-agents
2155 %dir %{_prefix}/lib/ocf
2156 %dir %{_prefix}/lib/ocf/resource.d
2157 %dir %{_prefix}/lib/ocf/resource.d/ceph
2158 %attr(0755,-,-) %{_prefix}/lib/ocf/resource.d/ceph/rbd
2159
2160 %endif
2161
2162 %files -n librados2
2163 %{_libdir}/librados.so.*
2164 %dir %{_libdir}/ceph
2165 %{_libdir}/ceph/libceph-common.so.*
2166 %if %{with lttng}
2167 %{_libdir}/librados_tp.so.*
2168 %endif
2169 %dir %{_sysconfdir}/ceph
2170
2171 %post -n librados2 -p /sbin/ldconfig
2172
2173 %postun -n librados2 -p /sbin/ldconfig
2174
2175 %files -n librados-devel
2176 %dir %{_includedir}/rados
2177 %{_includedir}/rados/librados.h
2178 %{_includedir}/rados/rados_types.h
2179 %{_libdir}/librados.so
2180 %if %{with lttng}
2181 %{_libdir}/librados_tp.so
2182 %endif
2183 %{_bindir}/librados-config
2184 %{_mandir}/man8/librados-config.8*
2185
2186 %files -n libradospp-devel
2187 %dir %{_includedir}/rados
2188 %{_includedir}/rados/buffer.h
2189 %{_includedir}/rados/buffer_fwd.h
2190 %{_includedir}/rados/crc32c.h
2191 %{_includedir}/rados/inline_memory.h
2192 %{_includedir}/rados/librados.hpp
2193 %{_includedir}/rados/librados_fwd.hpp
2194 %{_includedir}/rados/page.h
2195 %{_includedir}/rados/rados_types.hpp
2196
2197 %files -n python%{python3_pkgversion}-rados
2198 %{python3_sitearch}/rados.cpython*.so
2199 %{python3_sitearch}/rados-*.egg-info
2200
2201 %files -n libcephsqlite
2202 %{_libdir}/libcephsqlite.so
2203
2204 %post -n libcephsqlite -p /sbin/ldconfig
2205
2206 %postun -n libcephsqlite -p /sbin/ldconfig
2207
2208 %files -n libcephsqlite-devel
2209 %{_includedir}/libcephsqlite.h
2210
2211 %if 0%{with libradosstriper}
2212 %files -n libradosstriper1
2213 %{_libdir}/libradosstriper.so.*
2214
2215 %post -n libradosstriper1 -p /sbin/ldconfig
2216
2217 %postun -n libradosstriper1 -p /sbin/ldconfig
2218
2219 %files -n libradosstriper-devel
2220 %dir %{_includedir}/radosstriper
2221 %{_includedir}/radosstriper/libradosstriper.h
2222 %{_includedir}/radosstriper/libradosstriper.hpp
2223 %{_libdir}/libradosstriper.so
2224 %endif
2225
2226 %files -n librbd1
2227 %{_libdir}/librbd.so.*
2228 %if %{with lttng}
2229 %{_libdir}/librbd_tp.so.*
2230 %endif
2231 %dir %{_libdir}/ceph/librbd
2232 %{_libdir}/ceph/librbd/libceph_*.so*
2233
2234 %post -n librbd1 -p /sbin/ldconfig
2235
2236 %postun -n librbd1 -p /sbin/ldconfig
2237
2238 %files -n librbd-devel
2239 %dir %{_includedir}/rbd
2240 %{_includedir}/rbd/librbd.h
2241 %{_includedir}/rbd/librbd.hpp
2242 %{_includedir}/rbd/features.h
2243 %{_libdir}/librbd.so
2244 %if %{with lttng}
2245 %{_libdir}/librbd_tp.so
2246 %endif
2247
2248 %files -n librgw2
2249 %{_libdir}/librgw.so.*
2250 %if %{with lttng}
2251 %{_libdir}/librgw_op_tp.so.*
2252 %{_libdir}/librgw_rados_tp.so.*
2253 %endif
2254
2255 %post -n librgw2 -p /sbin/ldconfig
2256
2257 %postun -n librgw2 -p /sbin/ldconfig
2258
2259 %files -n librgw-devel
2260 %dir %{_includedir}/rados
2261 %{_includedir}/rados/librgw.h
2262 %{_includedir}/rados/rgw_file.h
2263 %{_libdir}/librgw.so
2264 %if %{with lttng}
2265 %{_libdir}/librgw_op_tp.so
2266 %{_libdir}/librgw_rados_tp.so
2267 %endif
2268
2269 %files -n python%{python3_pkgversion}-rgw
2270 %{python3_sitearch}/rgw.cpython*.so
2271 %{python3_sitearch}/rgw-*.egg-info
2272
2273 %files -n python%{python3_pkgversion}-rbd
2274 %{python3_sitearch}/rbd.cpython*.so
2275 %{python3_sitearch}/rbd-*.egg-info
2276
2277 %files -n libcephfs2
2278 %{_libdir}/libcephfs.so.*
2279 %dir %{_sysconfdir}/ceph
2280
2281 %post -n libcephfs2 -p /sbin/ldconfig
2282
2283 %postun -n libcephfs2 -p /sbin/ldconfig
2284
2285 %files -n libcephfs-devel
2286 %dir %{_includedir}/cephfs
2287 %{_includedir}/cephfs/libcephfs.h
2288 %{_includedir}/cephfs/ceph_ll_client.h
2289 %dir %{_includedir}/cephfs/metrics
2290 %{_includedir}/cephfs/metrics/Types.h
2291 %{_libdir}/libcephfs.so
2292
2293 %if %{with jaeger}
2294 %files -n libjaeger
2295 %{_libdir}/libopentracing.so.*
2296 %{_libdir}/libthrift.so.*
2297 %{_libdir}/libjaegertracing.so.*
2298 %post -n libjaeger -p /sbin/ldconfig
2299 %postun -n libjaeger -p /sbin/ldconfig
2300 %endif
2301
2302 %files -n python%{python3_pkgversion}-cephfs
2303 %{python3_sitearch}/cephfs.cpython*.so
2304 %{python3_sitearch}/cephfs-*.egg-info
2305
2306 %files -n python%{python3_pkgversion}-ceph-argparse
2307 %{python3_sitelib}/ceph_argparse.py
2308 %{python3_sitelib}/__pycache__/ceph_argparse.cpython*.py*
2309 %{python3_sitelib}/ceph_daemon.py
2310 %{python3_sitelib}/__pycache__/ceph_daemon.cpython*.py*
2311
2312 %files -n python%{python3_pkgversion}-ceph-common
2313 %{python3_sitelib}/ceph
2314 %{python3_sitelib}/ceph-*.egg-info
2315
2316 %if 0%{with cephfs_shell}
2317 %files -n cephfs-shell
2318 %{python3_sitelib}/cephfs_shell-*.egg-info
2319 %{_bindir}/cephfs-shell
2320 %{_mandir}/man8/cephfs-shell.8*
2321 %endif
2322
2323 %files -n cephfs-top
2324 %{python3_sitelib}/cephfs_top-*.egg-info
2325 %{_bindir}/cephfs-top
2326 %{_mandir}/man8/cephfs-top.8*
2327
2328 %if 0%{with ceph_test_package}
2329 %files -n ceph-test
2330 %{_bindir}/ceph-client-debug
2331 %{_bindir}/ceph_bench_log
2332 %{_bindir}/ceph_kvstorebench
2333 %{_bindir}/ceph_multi_stress_watch
2334 %{_bindir}/ceph_erasure_code_benchmark
2335 %{_bindir}/ceph_omapbench
2336 %{_bindir}/ceph_objectstore_bench
2337 %{_bindir}/ceph_perf_objectstore
2338 %{_bindir}/ceph_perf_local
2339 %{_bindir}/ceph_perf_msgr_client
2340 %{_bindir}/ceph_perf_msgr_server
2341 %{_bindir}/ceph_psim
2342 %{_bindir}/ceph_radosacl
2343 %{_bindir}/ceph_rgw_jsonparser
2344 %{_bindir}/ceph_rgw_multiparser
2345 %{_bindir}/ceph_scratchtool
2346 %{_bindir}/ceph_scratchtoolpp
2347 %{_bindir}/ceph_test_*
2348 %{_bindir}/ceph-coverage
2349 %{_bindir}/ceph-debugpack
2350 %{_bindir}/ceph-dedup-tool
2351 %if 0%{with seastar}
2352 %{_bindir}/crimson-store-nbd
2353 %endif
2354 %{_mandir}/man8/ceph-debugpack.8*
2355 %dir %{_libdir}/ceph
2356 %{_libdir}/ceph/ceph-monstore-update-crush.sh
2357 %endif
2358
2359 %if 0%{with cephfs_java}
2360 %files -n libcephfs_jni1
2361 %{_libdir}/libcephfs_jni.so.*
2362
2363 %post -n libcephfs_jni1 -p /sbin/ldconfig
2364
2365 %postun -n libcephfs_jni1 -p /sbin/ldconfig
2366
2367 %files -n libcephfs_jni-devel
2368 %{_libdir}/libcephfs_jni.so
2369
2370 %files -n cephfs-java
2371 %{_javadir}/libcephfs.jar
2372 %{_javadir}/libcephfs-test.jar
2373 %endif
2374
2375 %files -n rados-objclass-devel
2376 %dir %{_includedir}/rados
2377 %{_includedir}/rados/objclass.h
2378
2379 %if 0%{with selinux}
2380 %files selinux
2381 %attr(0600,root,root) %{_datadir}/selinux/packages/ceph.pp
2382 %{_datadir}/selinux/devel/include/contrib/ceph.if
2383 %{_mandir}/man8/ceph_selinux.8*
2384
2385 %post selinux
2386 # backup file_contexts before update
2387 . /etc/selinux/config
2388 FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
2389 cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
2390
2391 # Install the policy
2392 /usr/sbin/semodule -i %{_datadir}/selinux/packages/ceph.pp
2393
2394 # Load the policy if SELinux is enabled
2395 if ! /usr/sbin/selinuxenabled; then
2396     # Do not relabel if selinux is not enabled
2397     exit 0
2398 fi
2399
2400 if diff ${FILE_CONTEXT} ${FILE_CONTEXT}.pre > /dev/null 2>&1; then
2401    # Do not relabel if file contexts did not change
2402    exit 0
2403 fi
2404
2405 # Check whether the daemons are running
2406 /usr/bin/systemctl status ceph.target > /dev/null 2>&1
2407 STATUS=$?
2408
2409 # Stop the daemons if they were running
2410 if test $STATUS -eq 0; then
2411     /usr/bin/systemctl stop ceph.target > /dev/null 2>&1
2412 fi
2413
2414 # Relabel the files fix for first package install
2415 /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
2416
2417 rm -f ${FILE_CONTEXT}.pre
2418 # The fixfiles command won't fix label for /var/run/ceph
2419 /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
2420
2421 # Start the daemons iff they were running before
2422 if test $STATUS -eq 0; then
2423     /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
2424 fi
2425 exit 0
2426
2427 %postun selinux
2428 if [ $1 -eq 0 ]; then
2429     # backup file_contexts before update
2430     . /etc/selinux/config
2431     FILE_CONTEXT=/etc/selinux/${SELINUXTYPE}/contexts/files/file_contexts
2432     cp ${FILE_CONTEXT} ${FILE_CONTEXT}.pre
2433
2434     # Remove the module
2435     /usr/sbin/semodule -n -r ceph > /dev/null 2>&1
2436
2437     # Reload the policy if SELinux is enabled
2438     if ! /usr/sbin/selinuxenabled ; then
2439         # Do not relabel if SELinux is not enabled
2440         exit 0
2441     fi
2442
2443     # Check whether the daemons are running
2444     /usr/bin/systemctl status ceph.target > /dev/null 2>&1
2445     STATUS=$?
2446
2447     # Stop the daemons if they were running
2448     if test $STATUS -eq 0; then
2449         /usr/bin/systemctl stop ceph.target > /dev/null 2>&1
2450     fi
2451
2452     /usr/sbin/fixfiles -C ${FILE_CONTEXT}.pre restore 2> /dev/null
2453     rm -f ${FILE_CONTEXT}.pre
2454     # The fixfiles command won't fix label for /var/run/ceph
2455     /usr/sbin/restorecon -R /var/run/ceph > /dev/null 2>&1
2456
2457     # Start the daemons if they were running before
2458     if test $STATUS -eq 0; then
2459         /usr/bin/systemctl start ceph.target > /dev/null 2>&1 || :
2460     fi
2461 fi
2462 exit 0
2463 %endif
2464
2465 %files grafana-dashboards
2466 %if 0%{?suse_version}
2467 %attr(0755,root,root) %dir %{_sysconfdir}/grafana
2468 %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards
2469 %endif
2470 %attr(0755,root,root) %dir %{_sysconfdir}/grafana/dashboards/ceph-dashboard
2471 %config %{_sysconfdir}/grafana/dashboards/ceph-dashboard/*
2472 %doc monitoring/grafana/dashboards/README
2473 %doc monitoring/grafana/README.md
2474
2475 %files prometheus-alerts
2476 %if 0%{?suse_version}
2477 %attr(0755,root,root) %dir %{_sysconfdir}/prometheus
2478 %endif
2479 %attr(0755,root,root) %dir %{_sysconfdir}/prometheus/ceph
2480 %config %{_sysconfdir}/prometheus/ceph/ceph_default_alerts.yml
2481
2482 %changelog