]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
doc: Fix ceph command manpage to match ceph -h (hammer) 3982/head
authorDavid Zafman <dzafman@redhat.com>
Thu, 12 Mar 2015 21:55:54 +0000 (14:55 -0700)
committerDavid Zafman <dzafman@redhat.com>
Sat, 14 Mar 2015 00:00:10 +0000 (17:00 -0700)
Include rebuilt man/ceph.8

Fixes: #10678
Signed-off-by: David Zafman <dzafman@redhat.com>
doc/man/8/ceph.rst
man/ceph.8

index f0fb3a0ef1482956bdee61d102d5ea91aa6dfd6d..038d99a5456b64e61681287039f806fb7c8d4161 100644 (file)
@@ -15,6 +15,8 @@ Synopsis
 
 | **ceph** **df** *{detail}*
 
+| **ceph** **fs** [ *ls* \| *new* \| *reset* \| *rm* ] ...
+
 | **ceph** **fsid**
 
 | **ceph** **health** *{detail}*
@@ -31,15 +33,15 @@ Synopsis
 
 | **ceph** **mon_status**
 
-| **ceph** **osd** [ *blacklist* \| *create* \| *deep-scrub* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lost* \| *ls* \| *lspools* \| *map* \| *metadata* \| *out* \| *pause* \| *perf* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-utilization* \| *rm* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd*  \| *stat* \| *thrash* \| *tree* \| *unpause* \| *unset* ] ...
+| **ceph** **osd** [ *blacklist* \| *blocked-by* \| *create* \| *deep-scrub* \| *df* \| *down* \| *dump* \| *erasure-code-profile* \| *find* \| *getcrushmap* \| *getmap* \| *getmaxosd* \| *in* \| *lspools* \| *map* \| *metadata* \| *out* \| *pause* \| *perf* \| *pg-temp* \| *primary-affinity* \| *primary-temp* \| *repair* \| *reweight* \| *reweight-by-pg* \| *rm* \| *scrub* \| *set* \| *setcrushmap* \| *setmaxosd*  \| *stat* \| *thrash* \| *tree* \| *unpause* \| *unset* ] ...
 
-| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *reweight* \| *reweight-all* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ...
+| **ceph** **osd** **crush** [ *add* \| *add-bucket* \| *create-or-move* \| *dump* \| *get-tunable* \| *link* \| *move* \| *remove* \| *rename-bucket* \| *reweight* \| *reweight-all* \| *reweight-subtree* \| *rm* \| *rule* \| *set* \| *set-tunable* \| *show-tunables* \| *tunables* \| *unlink* ] ...
 
-| **ceph** **osd** **pool** [ *create* \| *delete* \| *get* \| *get-quota* \| *mksnap* \| *rename* \| *rmsnap* \| *set* \| *set-quota* \| *stats* ] ...
+| **ceph** **osd** **pool** [ *create* \| *delete* \| *get* \| *get-quota* \| *ls* \| *mksnap* \| *rename* \| *rmsnap* \| *set* \| *set-quota* \| *stats* ] ...
 
 | **ceph** **osd** **tier** [ *add* \| *add-cache* \| *cache-mode* \| *remove* \| *remove-overlay* \| *set-overlay* ] ...
 
-| **ceph** **pg** [ *debug* \| *deep-scrub* \| *dump* \| *dump_json* \| *dump_pools_json* \| *dump_stuck* \| *force_create_pg* \| *getmap* \| *map* \| *repair* \| *scrub* \| *send_pg_creates* \| *set_full_ratio* \| *set_nearfull_ratio* \| *stat* ] ...
+| **ceph** **pg** [ *debug* \| *deep-scrub* \| *dump* \| *dump_json* \| *dump_pools_json* \| *dump_stuck* \| *force_create_pg* \| *getmap* \| *ls* \| *ls-by-osd* \| *ls-by-pool* \| *ls-by-primary* \| *map* \| *repair* \| *scrub* \| *send_pg_creates* \| *set_full_ratio* \| *set_nearfull_ratio* \| *stat* ] ...
 
 | **ceph** **quorum** [ *enter* \| *exit* ]
 
@@ -55,6 +57,8 @@ Synopsis
 
 | **ceph** **tell** *<name (type.id)> <args> [<args>...]*
 
+| **ceph** **version**
+
 Description
 ===========
 
@@ -74,7 +78,7 @@ or updating of authentication keys for a particular  entity such as a monitor or
 OSD. It uses some additional subcommands.
 
 Subcommand ``add`` adds authentication info for a particular entity from input
-file, or random key if no input given and/or any caps specified in the command.
+file, or random key if no input is given and/or any caps specified in the command.
 
 Usage::
 
@@ -208,6 +212,36 @@ Usage::
        ceph df {detail}
 
 
+fs
+--
+
+Manage cephfs filesystems. It uses some additional subcommands.
+
+Subcommand ``ls`` to list filesystems
+
+Usage::
+
+       ceph fs ls
+
+Subcommand ``new`` to make a new filesystem using named pools <metadata> and <data>
+
+Usage::
+
+       ceph fs new <fs_name> <metadata> <data>
+
+Subcommand ``reset`` is used for disaster recovery only: reset to a single-MDS map
+
+Usage::
+
+       ceph fs reset <fs_name> {--yes-i-really-mean-it}
+
+Subcommand ``rm`` to disable the named filesystem
+
+Usage::
+
+       ceph fs rm <fs_name> {--yes-i-really-mean-it}
+
+
 fsid
 ----
 
@@ -466,6 +500,12 @@ Usage::
 
        ceph osd blacklist rm <EntityAddr>
 
+Subcommand ``blocked-by`` prints a histogram of which OSDs are blocking their peers
+
+Usage::
+
+       ceph osd blocked-by
+
 Subcommand ``create`` creates new osd (with optional UUID).
 
 Usage::
@@ -495,7 +535,7 @@ Subcommand ``create-or-move`` creates entry or moves existing entry for <name>
 Usage::
 
        ceph osd crush create-or-move <osdname (id|osd.id)> <float[0.0-]> <args>
-[<args>...]
+       [<args>...]
 
 Subcommand ``dump`` dumps crush map.
 
@@ -528,6 +568,12 @@ Usage::
 
        ceph osd crush remove <name> {<ancestor>}
 
+Subcommand ``rename-bucket`` renames buchket <srcname> to <stname>
+
+Usage::
+
+       ceph osd crush rename-bucket <srcname> <dstname>
+
 Subcommand ``reweight`` change <name>'s weight to <weight> in crush map.
 
 Usage::
@@ -541,6 +587,13 @@ Usage::
 
        ceph osd crush reweight-all
 
+Subcommand ``reweight-subtree`` changes all leaf items beneath <name>
+to <weight> in crush map
+
+Usage::
+
+       ceph osd crush reweight-subtree <name> <weight>
+
 Subcommand ``rm`` removes <name> from crush map (everywhere, or just at
 <ancestor>).
 
@@ -620,7 +673,7 @@ Subcommand ``tunables`` sets crush tunables values to <profile>.
 
 Usage::
 
-       ceph osd crush tunables legacy|argonaut|bobtail|firefly|optimal|default
+       ceph osd crush tunables legacy|argonaut|bobtail|firefly|hammer|optimal|default
 
 Subcommand ``unlink`` unlinks <name> from crush map (everywhere, or just at
 <ancestor>).
@@ -629,6 +682,12 @@ Usage::
 
        ceph osd crush unlink <name> {<ancestor>}
 
+Subcommand ``df`` shows OSD utilization
+
+Usage::
+
+       ceph osd df {plain|tree}
+
 Subcommand ``deep-scrub`` initiates deep scrub on specified osd.
 
 Usage::
@@ -769,7 +828,7 @@ Subcommand ``create`` creates pool.
 Usage::
 
        ceph osd pool create <poolname> <int[0-]> {<int[0-]>} {replicated|erasure}
-       {<erasure_code_profile>} {<ruleset>}
+       {<erasure_code_profile>} {<ruleset>} {<int>}
 
 Subcommand ``delete`` deletes pool.
 
@@ -789,7 +848,7 @@ Usage::
        ceph osd pool get <poolname> cache_target_dirty_ratio|cache_target_full_ratio
 
        ceph osd pool get <poolname> cache_min_flush_age|cache_min_evict_age|
-       erasure_code_profile
+       erasure_code_profile|min_read_recency_for_promote|write_fadvise_dontneed
 
 Subcommand ``get-quota`` obtains object or byte limits for pool.
 
@@ -797,6 +856,12 @@ Usage::
 
        ceph osd pool get-quota <poolname>
 
+Subcommand ``ls`` list pools
+
+Usage::
+
+       ceph osd pool ls {detail}
+
 Subcommand ``mksnap`` makes snapshot <snap> in <pool>.
 
 Usage::
@@ -820,17 +885,12 @@ Subcommand ``set`` sets pool parameter <var> to <val>.
 Usage::
 
        ceph osd pool set <poolname> size|min_size|crash_replay_interval|pg_num|
-       pgp_num|crush_ruleset|hashpspool|hit_set_type|hit_set_period
-
-       ceph osd pool set <poolname> hit_set_count|hit_set_fpp|debug_fake_ec_pool
-
-       ceph osd pool set <poolname> target_max_bytes|target_max_objects
-
-       ceph osd pool set <poolname> cache_target_dirty_ratio|cache_target_full_ratio
-
-       ceph osd pool set <poolname> cache_min_flush_age|cache_min_evict_age
-
-       ceph osd pool set <poolname> auid <val> {--yes-i-really-mean-it}
+       pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|
+       hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool|
+       target_max_bytes|target_max_objects|cache_target_dirty_ratio|
+       cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|
+       min_read_recency_for_promote|write_fadvise_dontneed
+       <val> {--yes-i-really-mean-it}
 
 Subcommand ``set-quota`` sets object or byte limit on pool.
 
@@ -870,6 +930,13 @@ Usage::
 
        osd reweight <int[0-]> <float[0.0-1.0]>
 
+Subcommand ``reweight-by-pg`` reweight OSDs by PG distribution
+[overload-percentage-for-consideration, default 120].
+
+Usage::
+
+       ceph osd reweight-by-pg {<int[100-]>} {<poolname> [<poolname...]}
+
 Subcommand ``reweight-by-utilization`` reweight OSDs by utilization
 [overload-percentage-for-consideration, default 120].
 
@@ -893,8 +960,8 @@ Subcommand ``set`` sets <key>.
 
 Usage::
 
-       ceph osd set pause|noup|nodown|noout|noin|nobackfill|norebalance|norecover|
-       noscrub|nodeep-scrub|notieragent
+       ceph osd set full|pause|noup|nodown|noout|noin|nobackfill|
+       norebalance|norecover|noscrub|nodeep-scrub|notieragent
 
 Subcommand ``setcrushmap`` sets crush map from input file.
 
@@ -941,7 +1008,8 @@ Subcommand ``cache-mode`` specifies the caching mode for cache tier <pool>.
 
 Usage::
 
-       ceph osd tier cache-mode <poolname> none|writeback|forward|readonly
+       ceph osd tier cache-mode <poolname> none|writeback|forward|readonly|
+       readforward|readproxy
 
 Subcommand ``remove`` removes the tier <tierpool> (the second one) from base pool
 <pool> (the first one).
@@ -979,8 +1047,8 @@ Subcommand ``unset`` unsets <key>.
 
 Usage::
 
-       osd unset pause|noup|nodown|noout|noin|nobackfill|norebalance|norecover|
-       noscrub|nodeep-scrub|notieragent
+       ceph osd unset full|pause|noup|nodown|noout|noin|nobackfill|
+       norebalance|norecover|noscrub|nodeep-scrub|notieragent
 
 
 pg
@@ -1039,6 +1107,65 @@ Usage::
 
        ceph pg getmap
 
+Subcommand ``ls`` lists pg with specific pool, osd, state
+
+Usage::
+
+       ceph pg ls {<int>} {active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale| remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized [active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale|remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized...]}
+
+Subcommand ``ls-by-osd`` lists pg on osd [osd]
+
+Usage::
+
+       ceph pg ls-by-osd <osdname (id|osd.id)> {<int>}
+       {active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale| remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized [active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale|remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized...]}
+
+Subcommand ``ls-by-pool`` lists pg with pool = [poolname | poolid]
+
+Usage::
+
+       ceph pg ls-by-pool <poolstr> {<int>} {active|
+       clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale| remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized [active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale|remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized...]}
+
+Subcommand ``ls-by-primary`` lists pg with primary = [osd]
+
+Usage::
+
+       ceph pg ls-by-primary <osdname (id|osd.id)> {<int>}
+       {active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale| remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized [active|clean|down|replay|splitting|
+       scrubbing|scrubq|degraded|inconsistent|peering|repair|
+       recovery|backfill_wait|incomplete|stale|remapped|
+       deep_scrub|backfill|backfill_toofull|recovery_wait|
+       undersized...]}
+
 Subcommand ``map`` shows mapping of pg to osds.
 
 Usage::
@@ -1152,6 +1279,14 @@ Usage::
 
        ceph tell <name (type.id)> <args> [<args>...]
 
+version
+-------
+
+Show mon daemon version
+
+Usage::
+
+       ceph version
 
 Options
 =======
index 57be1ba52f86fbcccb6dc35765a33daa603afe17..c168670178eb693f95c3585d787317f370e47c15 100644 (file)
@@ -75,6 +75,10 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
 .fi
 .sp
 .nf
+\fBceph\fP \fBfs\fP [ \fIls\fP | \fInew\fP | \fIreset\fP | \fIrm\fP ] ...
+.fi
+.sp
+.nf
 \fBceph\fP \fBfsid\fP
 .fi
 .sp
@@ -107,15 +111,15 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
 .fi
 .sp
 .nf
-\fBceph\fP \fBosd\fP [ \fIblacklist\fP | \fIcreate\fP | \fIdeep\-scrub\fP | \fIdown\fP | \fIdump\fP | \fIerasure\-code\-profile\fP | \fIfind\fP | \fIgetcrushmap\fP | \fIgetmap\fP | \fIgetmaxosd\fP | \fIin\fP | \fIlost\fP | \fIls\fP | \fIlspools\fP | \fImap\fP | \fImetadata\fP | \fIout\fP | \fIpause\fP | \fIperf\fP | \fIprimary\-affinity\fP | \fIprimary\-temp\fP | \fIrepair\fP | \fIreweight\fP | \fIreweight\-by\-utilization\fP | \fIrm\fP | \fIscrub\fP | \fIset\fP | \fIsetcrushmap\fP | \fIsetmaxosd\fP  | \fIstat\fP | \fIthrash\fP | \fItree\fP | \fIunpause\fP | \fIunset\fP ] ...
+\fBceph\fP \fBosd\fP [ \fIblacklist\fP | \fIblocked\-by\fP | \fIcreate\fP | \fIdeep\-scrub\fP | \fIdf\fP | \fIdown\fP | \fIdump\fP | \fIerasure\-code\-profile\fP | \fIfind\fP | \fIgetcrushmap\fP | \fIgetmap\fP | \fIgetmaxosd\fP | \fIin\fP | \fIlspools\fP | \fImap\fP | \fImetadata\fP | \fIout\fP | \fIpause\fP | \fIperf\fP | \fIpg\-temp\fP | \fIprimary\-affinity\fP | \fIprimary\-temp\fP | \fIrepair\fP | \fIreweight\fP | \fIreweight\-by\-pg\fP | \fIrm\fP | \fIscrub\fP | \fIset\fP | \fIsetcrushmap\fP | \fIsetmaxosd\fP  | \fIstat\fP | \fIthrash\fP | \fItree\fP | \fIunpause\fP | \fIunset\fP ] ...
 .fi
 .sp
 .nf
-\fBceph\fP \fBosd\fP \fBcrush\fP [ \fIadd\fP | \fIadd\-bucket\fP | \fIcreate\-or\-move\fP | \fIdump\fP | \fIget\-tunable\fP | \fIlink\fP | \fImove\fP | \fIremove\fP | \fIreweight\fP | \fIreweight\-all\fP | \fIrm\fP | \fIrule\fP | \fIset\fP | \fIset\-tunable\fP | \fIshow\-tunables\fP | \fItunables\fP | \fIunlink\fP ] ...
+\fBceph\fP \fBosd\fP \fBcrush\fP [ \fIadd\fP | \fIadd\-bucket\fP | \fIcreate\-or\-move\fP | \fIdump\fP | \fIget\-tunable\fP | \fIlink\fP | \fImove\fP | \fIremove\fP | \fIrename\-bucket\fP | \fIreweight\fP | \fIreweight\-all\fP | \fIreweight\-subtree\fP | \fIrm\fP | \fIrule\fP | \fIset\fP | \fIset\-tunable\fP | \fIshow\-tunables\fP | \fItunables\fP | \fIunlink\fP ] ...
 .fi
 .sp
 .nf
-\fBceph\fP \fBosd\fP \fBpool\fP [ \fIcreate\fP | \fIdelete\fP | \fIget\fP | \fIget\-quota\fP | \fImksnap\fP | \fIrename\fP | \fIrmsnap\fP | \fIset\fP | \fIset\-quota\fP | \fIstats\fP ] ...
+\fBceph\fP \fBosd\fP \fBpool\fP [ \fIcreate\fP | \fIdelete\fP | \fIget\fP | \fIget\-quota\fP | \fIls\fP | \fImksnap\fP | \fIrename\fP | \fIrmsnap\fP | \fIset\fP | \fIset\-quota\fP | \fIstats\fP ] ...
 .fi
 .sp
 .nf
@@ -123,7 +127,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
 .fi
 .sp
 .nf
-\fBceph\fP \fBpg\fP [ \fIdebug\fP | \fIdeep\-scrub\fP | \fIdump\fP | \fIdump_json\fP | \fIdump_pools_json\fP | \fIdump_stuck\fP | \fIforce_create_pg\fP | \fIgetmap\fP | \fImap\fP | \fIrepair\fP | \fIscrub\fP | \fIsend_pg_creates\fP | \fIset_full_ratio\fP | \fIset_nearfull_ratio\fP | \fIstat\fP ] ...
+\fBceph\fP \fBpg\fP [ \fIdebug\fP | \fIdeep\-scrub\fP | \fIdump\fP | \fIdump_json\fP | \fIdump_pools_json\fP | \fIdump_stuck\fP | \fIforce_create_pg\fP | \fIgetmap\fP | \fIls\fP | \fIls\-by\-osd\fP | \fIls\-by\-pool\fP | \fIls\-by\-primary\fP | \fImap\fP | \fIrepair\fP | \fIscrub\fP | \fIsend_pg_creates\fP | \fIset_full_ratio\fP | \fIset_nearfull_ratio\fP | \fIstat\fP ] ...
 .fi
 .sp
 .nf
@@ -154,6 +158,10 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]]
 \fBceph\fP \fBtell\fP \fI<name (type.id)> <args> [<args>...]\fP
 .fi
 .sp
+.nf
+\fBceph\fP \fBversion\fP
+.fi
+.sp
 .SH DESCRIPTION
 .sp
 \fBceph\fP is a control utility which is used for manual deployment and maintenance
@@ -168,7 +176,7 @@ or updating of authentication keys for a particular  entity such as a monitor or
 OSD. It uses some additional subcommands.
 .sp
 Subcommand \fBadd\fP adds authentication info for a particular entity from input
-file, or random key if no input given and/or any caps specified in the command.
+file, or random key if no input is given and/or any caps specified in the command.
 .sp
 Usage:
 .INDENT 0.0
@@ -443,6 +451,65 @@ ceph df {detail}
 .fi
 .UNINDENT
 .UNINDENT
+.SS fs
+.sp
+Manage cephfs filesystems. It uses some additional subcommands.
+.sp
+Subcommand \fBls\fP to list filesystems
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph fs ls
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBnew\fP to make a new filesystem using named pools <metadata> and <data>
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph fs new <fs_name> <metadata> <data>
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBreset\fP is used for disaster recovery only: reset to a single\-MDS map
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph fs reset <fs_name> {\-\-yes\-i\-really\-mean\-it}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBrm\fP to disable the named filesystem
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph fs rm <fs_name> {\-\-yes\-i\-really\-mean\-it}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
 .SS fsid
 .sp
 Show cluster\(aqs FSID/UUID.
@@ -959,6 +1026,20 @@ ceph osd blacklist rm <EntityAddr>
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBblocked\-by\fP prints a histogram of which OSDs are blocking their peers
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd blocked\-by
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBcreate\fP creates new osd (with optional UUID).
 .sp
 Usage:
@@ -1016,13 +1097,12 @@ Usage:
 .nf
 .ft C
 ceph osd crush create\-or\-move <osdname (id|osd.id)> <float[0.0\-]> <args>
+[<args>...]
 .ft P
 .fi
 .UNINDENT
 .UNINDENT
 .sp
-[<args>...]
-.sp
 Subcommand \fBdump\fP dumps crush map.
 .sp
 Usage:
@@ -1094,6 +1174,20 @@ ceph osd crush remove <name> {<ancestor>}
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBrename\-bucket\fP renames buchket <srcname> to <stname>
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd crush rename\-bucket <srcname> <dstname>
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBreweight\fP change <name>\(aqs weight to <weight> in crush map.
 .sp
 Usage:
@@ -1123,6 +1217,21 @@ ceph osd crush reweight\-all
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBreweight\-subtree\fP changes all leaf items beneath <name>
+to <weight> in crush map
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd crush reweight\-subtree <name> <weight>
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBrm\fP removes <name> from crush map (everywhere, or just at
 <ancestor>).
 .sp
@@ -1294,7 +1403,7 @@ Usage:
 .sp
 .nf
 .ft C
-ceph osd crush tunables legacy|argonaut|bobtail|firefly|optimal|default
+ceph osd crush tunables legacy|argonaut|bobtail|firefly|hammer|optimal|default
 .ft P
 .fi
 .UNINDENT
@@ -1315,6 +1424,20 @@ ceph osd crush unlink <name> {<ancestor>}
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBdf\fP shows OSD utilization
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd df {plain|tree}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBdeep\-scrub\fP initiates deep scrub on specified osd.
 .sp
 Usage:
@@ -1627,7 +1750,7 @@ Usage:
 .nf
 .ft C
 ceph osd pool create <poolname> <int[0\-]> {<int[0\-]>} {replicated|erasure}
-{<erasure_code_profile>} {<ruleset>}
+{<erasure_code_profile>} {<ruleset>} {<int>}
 .ft P
 .fi
 .UNINDENT
@@ -1663,7 +1786,7 @@ ceph osd pool get <poolname> auid|target_max_objects|target_max_bytes
 ceph osd pool get <poolname> cache_target_dirty_ratio|cache_target_full_ratio
 
 ceph osd pool get <poolname> cache_min_flush_age|cache_min_evict_age|
-erasure_code_profile
+erasure_code_profile|min_read_recency_for_promote|write_fadvise_dontneed
 .ft P
 .fi
 .UNINDENT
@@ -1683,6 +1806,20 @@ ceph osd pool get\-quota <poolname>
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBls\fP list pools
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd pool ls {detail}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBmksnap\fP makes snapshot <snap> in <pool>.
 .sp
 Usage:
@@ -1734,17 +1871,12 @@ Usage:
 .nf
 .ft C
 ceph osd pool set <poolname> size|min_size|crash_replay_interval|pg_num|
-pgp_num|crush_ruleset|hashpspool|hit_set_type|hit_set_period
-
-ceph osd pool set <poolname> hit_set_count|hit_set_fpp|debug_fake_ec_pool
-
-ceph osd pool set <poolname> target_max_bytes|target_max_objects
-
-ceph osd pool set <poolname> cache_target_dirty_ratio|cache_target_full_ratio
-
-ceph osd pool set <poolname> cache_min_flush_age|cache_min_evict_age
-
-ceph osd pool set <poolname> auid <val> {\-\-yes\-i\-really\-mean\-it}
+pgp_num|crush_ruleset|hashpspool|nodelete|nopgchange|nosizechange|
+hit_set_type|hit_set_period|hit_set_count|hit_set_fpp|debug_fake_ec_pool|
+target_max_bytes|target_max_objects|cache_target_dirty_ratio|
+cache_target_full_ratio|cache_min_flush_age|cache_min_evict_age|auid|
+min_read_recency_for_promote|write_fadvise_dontneed
+<val> {\-\-yes\-i\-really\-mean\-it}
 .ft P
 .fi
 .UNINDENT
@@ -1836,6 +1968,21 @@ osd reweight <int[0\-]> <float[0.0\-1.0]>
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBreweight\-by\-pg\fP reweight OSDs by PG distribution
+[overload\-percentage\-for\-consideration, default 120].
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph osd reweight\-by\-pg {<int[100\-]>} {<poolname> [<poolname...]}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBreweight\-by\-utilization\fP reweight OSDs by utilization
 [overload\-percentage\-for\-consideration, default 120].
 .sp
@@ -1887,8 +2034,8 @@ Usage:
 .sp
 .nf
 .ft C
-ceph osd set pause|noup|nodown|noout|noin|nobackfill|norecover|noscrub|
-nodeep\-scrub|notieragent
+ceph osd set full|pause|noup|nodown|noout|noin|nobackfill|
+norebalance|norecover|noscrub|nodeep\-scrub|notieragent
 .ft P
 .fi
 .UNINDENT
@@ -1991,7 +2138,8 @@ Usage:
 .sp
 .nf
 .ft C
-ceph osd tier cache\-mode <poolname> none|writeback|forward|readonly
+ceph osd tier cache\-mode <poolname> none|writeback|forward|readonly|
+readforward|readproxy
 .ft P
 .fi
 .UNINDENT
@@ -2077,8 +2225,8 @@ Usage:
 .sp
 .nf
 .ft C
-osd unset pause|noup|nodown|noout|noin|nobackfill|norecover|noscrub|
-nodeep\-scrub|notieragent
+ceph osd unset full|pause|noup|nodown|noout|noin|nobackfill|
+norebalance|norecover|noscrub|nodeep\-scrub|notieragent
 .ft P
 .fi
 .UNINDENT
@@ -2167,7 +2315,7 @@ Usage:
 .sp
 .nf
 .ft C
-ceph pg dump_stuck {inactive|unclean|stale|undersized|degraded[inactive|unclean|stale|undersized|degraded...]}
+ceph pg dump_stuck {inactive|unclean|stale|undersized|degraded [inactive|unclean|stale|undersized|degraded...]}
 {<int>}
 .ft P
 .fi
@@ -2202,6 +2350,97 @@ ceph pg getmap
 .UNINDENT
 .UNINDENT
 .sp
+Subcommand \fBls\fP lists pg with specific pool, osd, state
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph pg ls {<int>} {active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale| remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized [active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale|remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized...]}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBls\-by\-osd\fP lists pg on osd [osd]
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph pg ls\-by\-osd <osdname (id|osd.id)> {<int>}
+{active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale| remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized [active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale|remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized...]}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBls\-by\-pool\fP lists pg with pool = [poolname | poolid]
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph pg ls\-by\-pool <poolstr> {<int>} {active|
+clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale| remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized [active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale|remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized...]}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
+Subcommand \fBls\-by\-primary\fP lists pg with primary = [osd]
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph pg ls\-by\-primary <osdname (id|osd.id)> {<int>}
+{active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale| remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized [active|clean|down|replay|splitting|
+scrubbing|scrubq|degraded|inconsistent|peering|repair|
+recovery|backfill_wait|incomplete|stale|remapped|
+deep_scrub|backfill|backfill_toofull|recovery_wait|
+undersized...]}
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
+.sp
 Subcommand \fBmap\fP shows mapping of pg to osds.
 .sp
 Usage:
@@ -2405,6 +2644,21 @@ ceph tell <name (type.id)> <args> [<args>...]
 .fi
 .UNINDENT
 .UNINDENT
+.SS version
+.sp
+Show mon daemon version
+.sp
+Usage:
+.INDENT 0.0
+.INDENT 3.5
+.sp
+.nf
+.ft C
+ceph version
+.ft P
+.fi
+.UNINDENT
+.UNINDENT
 .SH OPTIONS
 .INDENT 0.0
 .TP
@@ -2513,7 +2767,7 @@ Set a timeout for connecting to the cluster.
 .UNINDENT
 .SH AVAILABILITY
 .sp
-\fBceph\fP is part of Ceph, a massively scalable, open-source, distributed storage system. Please refer to
+\fBceph\fP is part of Ceph, a massively scalable, open\-source, distributed storage system. Please refer to
 the Ceph documentation at \fI\%http://ceph.com/docs\fP for more information.
 .SH SEE ALSO
 .sp