# comprising the Ceph Storage Cluster.
# Type: Boolean (optional)
# (Default: false)
- cephx require signatures = true ; everywhere possible
+ ;cephx require signatures = true
+
+ # kernel RBD client do not support authentication yet:
+ cephx cluster require signatures = true
+ cephx service require signatures = false
# The path to the keyring file.
# Type: String (optional)
## Replication level, number of data copies.
# Type: 32-bit Integer
# (Default: 2)
- ;osd pool default size = 2
+ ;osd pool default size = 3
## Replication level in degraded state, less than 'osd pool default size' value.
# Sets the minimum number of written replicas for objects in the
# operating in degraded mode.
# Type: 32-bit Integer
# (Default: 0), which means no particular minimum. If 0, minimum is size - (size / 2).
- ;osd pool default min size = 1
+ ;osd pool default min size = 2
## Ensure you have a realistic number of placement groups. We recommend
## approximately 100 per OSD. E.g., total number of OSDs multiplied by 100
# default value is the same as pg_num with mkpool.
# Type: 32-bit Integer
# (Default: 8)
- ;osd pool default pg num = 100
+ ;osd pool default pg num = 128
# Description: The default number of placement groups for placement for a
# pool. The default value is the same as pgp_num with mkpool.
# PG and PGP should be equal (for now).
# Type: 32-bit Integer
# (Default: 8)
- ;osd pool default pgp num = 100
+ ;osd pool default pgp num = 128
# The default CRUSH ruleset to use when creating a pool
# Type: 32-bit Integer
;osd crush chooseleaf type = 1
-### http://ceph.com/docs/bobtail/rados/configuration/log-and-debug-ref/
+### http://ceph.com/docs/master/rados/troubleshooting/log-and-debug/
+ # The location of the logging file for your cluster.
+ # Type: String
+ # Required: No
# Default: /var/log/ceph/$cluster-$name.log
;log file = /var/log/ceph/$cluster-$name.log
+ # Determines if logging messages should appear in syslog.
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
;log to syslog = true
# (Default: false)
;ms bind ipv6 = true
-
-### http://ceph.com/docs/master/rados/configuration/filestore-config-ref/
-
- # The maximum interval in seconds for synchronizing the filestore.
- # Type: Double (optional)
- # (Default: 5)
- ;filestore max sync interval = 5
-
- # Use object map for XATTRS. Set to true for ext4 file systems only.
- # Type: Boolean (optional)
- # (Default: false)
- ;filestore xattr use omap = true
-
-### http://ceph.com/docs/master/rados/configuration/journal-ref/
-
##################
## Monitors
## You need at least one. You need at least three if you want to
## tolerate any node failures. Always create an odd number.
[mon]
-### http://ceph.com/docs/argonaut/config-ref/mon-config/
### http://ceph.com/docs/master/rados/configuration/mon-config-ref/
-### http://ceph.com/docs/dumpling/rados/configuration/mon-osd-interaction/
+### http://ceph.com/docs/master/rados/configuration/mon-osd-interaction/
# The IDs of initial monitors in a cluster during startup.
# If specified, Ceph requires an odd number of monitors to form an
;mon host = cephhost01,cephhost02
;mon addr = 192.168.0.101,192.168.0.102
- # The monitor’s data location
+ # The monitor's data location
# Default: /var/lib/ceph/mon/$cluster-$id
;mon data = /var/lib/ceph/mon/$name
# Exponential backoff for clock drift warnings
# Type: Float
# (Default: 5)
- ;mon clock drift warn backoff = 30 ; Tell the monitor to backoff from this warning for 30 seconds
+ ;mon clock drift warn backoff = 30 # Tell the monitor to backoff from this warning for 30 seconds
# The percentage of disk space used before an OSD is considered full.
# Type: Float
# (Default: .85)
;mon osd nearfull ratio = .85
+ # The number of seconds Ceph waits before marking a Ceph OSD
+ # Daemon "down" and "out" if it doesn't respond.
+ # Type: 32-bit Integer
+ # (Default: 300)
+ ;mon osd down out interval = 300
-### http://ceph.com/docs/next/rados/troubleshooting/log-and-debug/
+### http://ceph.com/docs/master/rados/troubleshooting/log-and-debug/
# logging, for debugging monitor crashes, in order of
# their likelihood of being helpful :)
# experimental support for running multiple metadata servers. Do not run
# multiple metadata servers in production.
[mds]
-### http://ceph.com/docs/argonaut/config-ref/mds-config/
### http://ceph.com/docs/master/cephfs/mds-config-ref/
# where the mds keeps it's secret encryption keys
;keyring = /var/lib/ceph/mds/$name/keyring
+ # Determines whether a 'ceph-mds' daemon should poll and
+ # replay the log of an active MDS (hot standby).
+ # Type: Boolean
+ # (Default: false)
+ ;mds standby replay = true
+
; mds logging to debug issues.
;debug ms = 1
;debug mds = 20
+ ;debug journaler = 20
+ # The number of inodes to cache.
+ # Type: 32-bit Integer
+ # (Default: 100000)
+ ;mds cache size = 250000
+
+ ;mds mem max = 1048576 # KB
;[mds.alpha]
; host = alpha
# You need at least one. Two or more if you want data to be replicated.
# Define as many as you like.
[osd]
-### http://ceph.com/docs/argonaut/config-ref/osd-config/
-### http://ceph.com/docs/bobtail/rados/configuration/osd-config-ref/
+### http://ceph.com/docs/master/rados/configuration/osd-config-ref/
# The path to the OSDs data.
# You must create the directory when deploying Ceph.
## disks. You can use a file under the osd data dir if need be
## (e.g. /data/$name/journal), but it will be slower than a
## separate disk or partition.
- # The path to the OSD’s journal. This may be a path to a file or a block
+ # The path to the OSD's journal. This may be a path to a file or a block
# device (such as a partition of an SSD). If it is a file, you must
# create the directory to contain it.
# We recommend using a drive separate from the osd data drive.
# Default: /var/lib/ceph/osd/$cluster-$id/journal
;osd journal = /var/lib/ceph/osd/$name/journal
+ # Check log files for corruption. Can be computationally expensive.
+ # Type: Boolean
+ # (Default: false)
+ ;osd check for log corruption = true
+
### http://ceph.com/docs/master/rados/configuration/journal-ref/
# The size of the journal in megabytes. If this is 0,
# (Default: 5120)
# Recommended: Begin with 1GB. Should be at least twice the product
# of the expected speed multiplied by "filestore max sync interval".
- ;osd journal size = 1000 ; journal size, in megabytes
+ ;osd journal size = 2048 ; journal size, in megabytes
## If you want to run the journal on a tmpfs, disable DirectIO
# Enables direct i/o to the journal.
- # Requires journal block align set to true.
+ # Requires "journal block align" set to "true".
# Type: Boolean
# Required: Yes when using aio.
# (Default: true)
;journal dio = false
- # Check log files for corruption. Can be computationally expensive.
- # Type: Boolean
- # (Default: false)
- ;osd check for log corruption = true
-
# osd logging to debug osd issues, in order of likelihood of being helpful
;debug ms = 1
;debug osd = 20
;[osd.3]
; host = eta
+
+### http://ceph.com/docs/master/rados/configuration/filestore-config-ref/
+
+ # The maximum interval in seconds for synchronizing the filestore.
+ # Type: Double (optional)
+ # (Default: 5)
+ ;filestore max sync interval = 5
+
+ # Enable snapshots for a btrfs filestore.
+ # Type: Boolean
+ # Required: No. Only used for btrfs.
+ # (Default: true)
+ ;filestore btrfs snap = false
+
+ # Enables the filestore flusher.
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
+ ;filestore flusher = true
+
+##################
+## client settings
+[client]
+
+### http://ceph.com/docs/master/rbd/rbd-config-ref/
+
+ # Enable caching for RADOS Block Device (RBD).
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
+ rbd cache = true
+
+ # The RBD cache size in bytes.
+ # Type: 64-bit Integer
+ # Required: No
+ # (Default: 32 MiB)
+ ;rbd cache size = 33554432
+
+ # The dirty limit in bytes at which the cache triggers write-back.
+ # If 0, uses write-through caching.
+ # Type: 64-bit Integer
+ # Required: No
+ # Constraint: Must be less than rbd cache size.
+ # (Default: 24 MiB)
+ ;rbd cache max dirty = 25165824
+
+ # The dirty target before the cache begins writing data to the data storage.
+ # Does not block writes to the cache.
+ # Type: 64-bit Integer
+ # Required: No
+ # Constraint: Must be less than rbd cache max dirty.
+ # (Default: 16 MiB)
+ ;rbd cache target dirty = 16777216
+
+ # The number of seconds dirty data is in the cache before writeback starts.
+ # Type: Float
+ # Required: No
+ # (Default: 1.0)
+ ;rbd cache max dirty age = 1.0
+
+ # Start out in write-through mode, and switch to write-back after the
+ # first flush request is received. Enabling this is a conservative but
+ # safe setting in case VMs running on rbd are too old to send flushes,
+ # like the virtio driver in Linux before 2.6.32.
+ # Type: Boolean
+ # Required: No
+ # (Default: false)
+ ;rbd cache writethrough until flush = false