--- /dev/null
+FIO
+===
+
+Ceph uses the fio workload generator and benchmarking utility.
+
+To fetch the fio sources:
+
+ git clone git://git.kernel.dk/fio.git
+
+To build fio:
+
+ ./configure
+ make
+
+RBD
+---
+
+The fio engine for rbd is located in the fio tree itself, so you'll need to
+build it from source.
+
+If you install the ceph libraries to a location that isn't in your
+LD_LIBRARY_PATH, be sure to add it:
+
+ export LD_LIBRARY_PATH=/path/to/install/lib
+
+To build fio with rbd:
+
+ ./configure --extra-cflags="-I/path/to/install/include -L/path/to/install/lib"
+ make
+
+If configure fails with "Rados Block Device engine no", see config.log for
+details and adjust the cflags as necessary.
+
+To view the fio options specific to the rbd engine:
+
+ ./fio --enghelp=rbd
+
+See examples/rbd.fio for an example job file. To run:
+
+ ./fio examples/rbd.fio
+
+ObjectStore
+-----------
+
+This fio engine allows you to mount and use a ceph object store directly,
+without having to build a ceph cluster or start any daemons.
+
+Because the ObjectStore is not a public-facing interface, we build it inside
+of the ceph tree and load libfio_ceph_objectstore.so into fio as an external
+engine.
+
+To build ceph with fio_ceph_objectstore:
+```
+ mkdir build && cd build
+ cmake -DWITH_FIO=ON -DFIO_INCLUDE_DIR=/path/to/fio -DCMAKE_BUILD_TYPE=Release /path/to/ceph
+ make install
+```
+If you install the ceph libraries to a location that isn't in your
+LD_LIBRARY_PATH, be sure to add it:
+
+ export LD_LIBRARY_PATH=/path/to/install/lib
+
+To view the fio options specific to the objectstore engine:
+
+ ./fio --enghelp=libfio_ceph_objectstore.so
+
+The conf= option requires a ceph configuration file (ceph.conf). Example job
+and conf files for each object store are provided in the same directory as
+this README.
+
+To run:
+
+ ./fio /path/to/job.fio
--- /dev/null
+# example configuration file for ceph-bluestore.fio
+
+[global]
+ debug bluestore = 0/0
+ debug bluefs = 0/0
+ debug bdev = 0/0
+ debug rocksdb = 0/0
+ # spread objects over 8 collections
+ osd pool default pg num = 8
+ # increasing shards can help when scaling number of collections
+ osd op num shards = 5
+
+[osd]
+ osd objectstore = bluestore
+
+ enable experimental unrecoverable data corrupting features = bluestore rocksdb
+
+ # use directory= option from fio job file
+ osd data = ${fio_dir}
+
+ # log inside fio_dir
+ log file = ${fio_dir}/log
--- /dev/null
+# Runs a 64k random write test against the ceph BlueStore.
+[global]
+ioengine=libfio_ceph_objectstore.so # must be found in your LD_LIBRARY_PATH
+
+conf=ceph-bluestore.conf # must point to a valid ceph configuration file
+directory=/mnt/fio-bluestore # directory for osd_data
+
+rw=randwrite
+iodepth=16
+
+time_based=1
+runtime=20s
+
+[bluestore]
+nr_files=64
+size=256m
+bs=64k
--- /dev/null
+# example configuration file for ceph-filestore.fio
+
+[global]
+ debug filestore = 0/0
+ debug journal = 0/0
+
+ # spread objects over 8 collections
+ osd pool default pg num = 8
+ # increasing shards can help when scaling number of collections
+ osd op num shards = 5
+
+ filestore fd cache size = 32
+
+[osd]
+ osd objectstore = filestore
+
+ # use directory= option from fio job file
+ osd data = ${fio_dir}
+
+ # journal inside fio_dir
+ osd journal = ${fio_dir}/journal
+ osd journal size = 500
+ journal force aio = 1
+
+ # log outside fio_dir
+ log file = ${fio_dir}.log
--- /dev/null
+# Runs a 64k random write test against the ceph FileStore.
+[global]
+ioengine=libfio_ceph_objectstore.so # must be found in your LD_LIBRARY_PATH
+
+conf=ceph-filestore.conf # must point to a valid ceph configuration file
+directory=/mnt/fio-filestore # directory for osd_data
+
+rw=randwrite
+iodepth=16
+
+time_based=1
+runtime=20s
+
+[filestore]
+nr_files=64
+size=256m
+bs=64k
--- /dev/null
+# example configuration file for ceph-memstore.fio
+
+[global]
+ debug filestore = 0
+
+ # spread objects over 8 collections
+ osd pool default pg num = 8
+ # increasing shards can help when scaling number of collections
+ osd op num shards = 5
+
+[osd]
+ osd objectstore = memstore
+
+ # use directory= option from fio job file
+ osd data = ${fio_dir}
+
+ # log inside fio_dir
+ log file = ${fio_dir}/log
--- /dev/null
+# Runs a 64k random write test against the ceph MemStore.
+[global]
+ioengine=libfio_ceph_objectstore.so # must be found in your LD_LIBRARY_PATH
+
+conf=ceph-memstore.conf # must point to a valid ceph configuration file
+directory=/mnt/fio-memstore # directory for osd_data
+
+rw=randwrite
+iodepth=16
+
+time_based=1
+runtime=20s
+
+[memstore]
+nr_files=64
+size=256m
+bs=64k
+++ /dev/null
-######################################################################
-# Example test for the external fio ioengine for ObjectStore.
-#
-# Runs a 4k random write test against a ObjectStore configuration.
-#
-######################################################################
-[global]
-ioengine=./.libs/libfio_ceph_objectstore.so
-invalidate=0 # mandatory
-rw=randwrite
-size=1g
-bs=4k
-
-[ceph_objectstore]
-iodepth=1
-objectstore=filestore
-#filestore_debug=20
-directory=/mnt/fio_ceph_filestore.XXXXXXX
-filestore_journal=/var/lib/ceph/osd/journal-ram/fio_ceph_filestore.XXXXXXX