]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
Restructuring documentation.
authorJohn Wilkins <john.wilkins@dreamhost.com>
Tue, 10 Apr 2012 17:32:22 +0000 (10:32 -0700)
committerTommi Virtanen <tommi.virtanen@dreamhost.com>
Wed, 2 May 2012 19:09:55 +0000 (12:09 -0700)
Submitted by: John Wilkins <john.wilkins@dreamhost.com>

Signed-off-by: Tommi Virtanen <tommi.virtanen@dreamhost.com>
12 files changed:
doc/images/lightstack.png [new file with mode: 0644]
doc/images/lightstack.svg [new file with mode: 0644]
doc/index.rst
doc/install/build_prerequisites.rst [new file with mode: 0644]
doc/install/building_ceph.rst [new file with mode: 0644]
doc/install/cloning_the_ceph_source_code_repository.rst [new file with mode: 0644]
doc/install/download_packages.rst [new file with mode: 0644]
doc/install/downloading_a_ceph_release.rst [new file with mode: 0644]
doc/install/file_system_requirements.rst
doc/install/index.rst
doc/start/index.rst
doc/start/quick_start.rst [new file with mode: 0644]

diff --git a/doc/images/lightstack.png b/doc/images/lightstack.png
new file mode 100644 (file)
index 0000000..41dd0fa
Binary files /dev/null and b/doc/images/lightstack.png differ
diff --git a/doc/images/lightstack.svg b/doc/images/lightstack.svg
new file mode 100644 (file)
index 0000000..14939fd
--- /dev/null
@@ -0,0 +1,304 @@
+<?xml version="1.0" encoding="UTF-8" standalone="no"?>
+<!-- Created with Inkscape (http://www.inkscape.org/) -->
+
+<svg
+   xmlns:dc="http://purl.org/dc/elements/1.1/"
+   xmlns:cc="http://creativecommons.org/ns#"
+   xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
+   xmlns:svg="http://www.w3.org/2000/svg"
+   xmlns="http://www.w3.org/2000/svg"
+   xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
+   xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
+   width="744.09448819"
+   height="1052.3622047"
+   id="svg2"
+   version="1.1"
+   inkscape:version="0.48.2 r9819"
+   sodipodi:docname="lightstack.svg"
+   inkscape:export-filename="/home/johnw/ceph/doc/images/lightstack.png"
+   inkscape:export-xdpi="90"
+   inkscape:export-ydpi="90">
+  <defs
+     id="defs4" />
+  <sodipodi:namedview
+     id="base"
+     pagecolor="#ffffff"
+     bordercolor="#666666"
+     borderopacity="1.0"
+     inkscape:pageopacity="0.0"
+     inkscape:pageshadow="2"
+     inkscape:zoom="1.0074671"
+     inkscape:cx="331.52702"
+     inkscape:cy="671.20828"
+     inkscape:document-units="px"
+     inkscape:current-layer="layer1"
+     showgrid="false"
+     inkscape:window-width="1600"
+     inkscape:window-height="875"
+     inkscape:window-x="0"
+     inkscape:window-y="24"
+     inkscape:window-maximized="1" />
+  <metadata
+     id="metadata7">
+    <rdf:RDF>
+      <cc:Work
+         rdf:about="">
+        <dc:format>image/svg+xml</dc:format>
+        <dc:type
+           rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
+        <dc:title></dc:title>
+      </cc:Work>
+    </rdf:RDF>
+  </metadata>
+  <g
+     inkscape:label="Layer 1"
+     inkscape:groupmode="layer"
+     id="layer1">
+    <rect
+       style="fill:#9c4850;fill-opacity:1;stroke:#000000;stroke-width:1.11656284;stroke-opacity:1"
+       id="rect2987"
+       width="451.495"
+       height="93.6362"
+       x="173.28064"
+       y="580.00098" />
+    <g
+       id="g3844"
+       transform="translate(-30.234232,-111.53376)">
+      <g
+         transform="translate(9.8765869,-0.47521)"
+         id="g3785">
+        <rect
+           style="fill:#f05c56;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+           id="rect3775"
+           width="127.0513"
+           height="45.659061"
+           x="215.39166"
+           y="723.44659" />
+        <text
+           xml:space="preserve"
+           style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+           x="243.87434"
+           y="752.24097"
+           id="text3777"
+           sodipodi:linespacing="125%"><tspan
+             sodipodi:role="line"
+             id="tspan3779"
+             x="243.87434"
+             y="752.24097">Device 1</tspan></text>
+      </g>
+      <g
+         id="g3785-0"
+         transform="translate(150.34506,-0.47521)">
+        <rect
+           style="fill:#f05c56;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+           id="rect3775-5"
+           width="127.0513"
+           height="45.659061"
+           x="215.39166"
+           y="723.44659" />
+        <text
+           xml:space="preserve"
+           style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+           x="243.87434"
+           y="752.24097"
+           id="text3777-5"
+           sodipodi:linespacing="125%"><tspan
+             sodipodi:role="line"
+             id="tspan3779-0"
+             x="243.87434"
+             y="752.24097">Device 2</tspan></text>
+      </g>
+      <g
+         id="g3785-0-4"
+         transform="translate(290.81354,-0.47521)">
+        <rect
+           style="fill:#f05c56;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+           id="rect3775-5-3"
+           width="127.0513"
+           height="45.659061"
+           x="215.39166"
+           y="723.44659" />
+        <text
+           xml:space="preserve"
+           style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+           x="243.87434"
+           y="752.24097"
+           id="text3777-5-8"
+           sodipodi:linespacing="125%"><tspan
+             sodipodi:role="line"
+             id="tspan3779-0-1"
+             x="243.87434"
+             y="752.24097">Device <tspan
+   style="font-style:italic;-inkscape-font-specification:Sans Italic"
+   id="tspan3862">n</tspan></tspan></text>
+      </g>
+    </g>
+    <text
+       xml:space="preserve"
+       style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Sans"
+       x="275.93954"
+       y="602.77899"
+       id="text3858"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3860"
+         x="275.93954"
+         y="602.77899">Object Storage Device Cluster</tspan></text>
+    <rect
+       style="fill:#5e6a71;fill-opacity:1;stroke:#000000;stroke-width:1.11656284000000006;stroke-opacity:1"
+       id="rect2987-0"
+       width="451.495"
+       height="93.6362"
+       x="173.27182"
+       y="474.86395" />
+    <rect
+       style="fill:#80d2dc;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+       id="rect3775-8"
+       width="127.0513"
+       height="45.659061"
+       x="195.02521"
+       y="506.3006" />
+    <text
+       xml:space="preserve"
+       style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="223.50789"
+       y="535.09497"
+       id="text3777-9"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3779-2"
+         x="223.50789"
+         y="535.09497">Monitor 1</tspan></text>
+    <rect
+       style="fill:#80d2dc;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+       id="rect3775-5-4"
+       width="127.0513"
+       height="45.659061"
+       x="335.49368"
+       y="506.3006" />
+    <text
+       xml:space="preserve"
+       style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="363.97635"
+       y="535.09497"
+       id="text3777-5-1"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3779-0-0"
+         x="363.97635"
+         y="535.09497">Monitor 2</tspan></text>
+    <rect
+       style="fill:#80d2dc;fill-opacity:1;stroke:#000000;stroke-opacity:1"
+       id="rect3775-5-3-5"
+       width="127.0513"
+       height="45.659061"
+       x="475.96216"
+       y="506.3006" />
+    <text
+       xml:space="preserve"
+       style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+       x="504.44485"
+       y="535.09497"
+       id="text3777-5-8-7"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3779-0-1-9"
+         x="504.44485"
+         y="535.09497"><tspan
+           style="font-style:italic;-inkscape-font-specification:Sans Italic"
+           id="tspan3862-1"><tspan
+   style="font-style:normal;-inkscape-font-specification:Sans"
+   id="tspan3953">Monitor</tspan> n</tspan></tspan></text>
+    <text
+       xml:space="preserve"
+       style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Sans"
+       x="335.93073"
+       y="497.64197"
+       id="text3858-4"
+       sodipodi:linespacing="125%"><tspan
+         sodipodi:role="line"
+         id="tspan3860-7"
+         x="335.93073"
+         y="497.64197">Monitor Cluster</tspan></text>
+    <g
+       id="g4075"
+       transform="translate(0,6)">
+      <rect
+         y="363.11652"
+         x="172.50159"
+         height="93.6362"
+         width="451.495"
+         id="rect2987-0-0"
+         style="fill:#55aeba;fill-opacity:1;stroke:#000000;stroke-width:1.11656284;stroke-opacity:1" />
+      <rect
+         y="394.55316"
+         x="194.25497"
+         height="45.659061"
+         width="127.0513"
+         id="rect3775-8-3"
+         style="fill:#e6e8e8;fill-opacity:1;stroke:#000000;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text3777-9-1"
+         y="423.34753"
+         x="232.73767"
+         style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="423.34753"
+           x="232.73767"
+           id="tspan3779-2-3"
+           sodipodi:role="line">Direct</tspan></text>
+      <rect
+         y="394.55316"
+         x="334.72345"
+         height="45.659061"
+         width="127.0513"
+         id="rect3775-5-4-1"
+         style="fill:#e6e8e8;fill-opacity:1;stroke:#000000;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text3777-5-1-3"
+         y="423.34753"
+         x="377.20612"
+         style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="423.34753"
+           x="377.20612"
+           id="tspan3779-0-0-8"
+           sodipodi:role="line">Block</tspan></text>
+      <rect
+         y="394.55316"
+         x="475.19196"
+         height="45.659061"
+         width="127.0513"
+         id="rect3775-5-3-5-0"
+         style="fill:#e6e8e8;fill-opacity:1;stroke:#000000;stroke-opacity:1" />
+      <text
+         sodipodi:linespacing="125%"
+         id="text3777-5-8-7-0"
+         y="423.34753"
+         x="523.67462"
+         style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="423.34753"
+           x="523.67462"
+           id="tspan3779-0-1-9-0"
+           sodipodi:role="line">VFS<tspan
+   id="tspan3862-1-9"
+   style="font-style:italic;-inkscape-font-specification:Sans Italic"><tspan
+     id="tspan3953-0"
+     style="font-style:normal;-inkscape-font-specification:Sans" /></tspan></tspan></text>
+      <text
+         sodipodi:linespacing="125%"
+         id="text3858-4-1"
+         y="385.89453"
+         x="297.52643"
+         style="font-size:16px;font-style:normal;font-weight:normal;line-height:125%;letter-spacing:0px;word-spacing:0px;fill:#ffffff;fill-opacity:1;stroke:none;font-family:Sans"
+         xml:space="preserve"><tspan
+           y="385.89453"
+           x="297.52643"
+           id="tspan3860-7-4"
+           sodipodi:role="line">Unified Storage Interface</tspan></text>
+    </g>
+  </g>
+</svg>
index 2ed750a3849ea43aebbdb741a07631a5bdc7b7d7..2fc3b87395b67f88520099b62b47ce49ea7af152 100644 (file)
@@ -1,38 +1,18 @@
 ===============
 Welcome to Ceph
 ===============
-Ceph is an open source storage system that delivers extraordinary scalability--thousands of clients
-accessing petabytes to exabytes of data--with high performance and solid reliability. 
+Ceph uniquely delivers *object, block, and file storage in one unified system*. Ceph is highly reliable, easy to manage, and free. The power of Ceph can transform your company’s IT infrastructure and your ability to manage vast amounts of data. Ceph delivers extraordinary scalability--thousands of clients accessing petabytes to exabytes of data. Ceph leverages commodity hardware and intelligent daemons to accommodate large numbers of storage hosts, which communicate with each other to replicate data, and redistribute data dynamically. Ceph monitors the storage hosts to ensure they are operating effectively.
 
-Ceph leverages commodity hardware to accommodate large numbers of Object Storage Devices (OSDs)
-operating in clusters over a TCP/IP network. Ceph's Reliable Autonomic Distributed Object Store (RADOS) 
-utilizes the CPU, memory and network interface of the OSDs to communicate with each other, 
-replicate data, and redistribute data dynamically. Ceph's monitors maintain a master copy of the 
-OSD cluster map. Monitors also use the Paxos algorithm to to resolve disparities among different versions 
-of the OSD cluster map as maintained by a plurality of monitors.
-
-Client applications access RADOS OSD clusters in several ways. A C/C++ binding (``librados``) provides an
-application with direct access to RADOS OSDs. Applications can access RADOS as a block device (``rbd``) using a 
-device driver (dev/rdb) or the Qemu Kernel-based Virtual Machine (KVM). The RADOS RESTful gateway (``radosgw``)
-supports popular protocols like Amazon S3 and Swift so that applications that support those
-data storage interfaces can utilize RADOS OSDs. Finally, client applications can access RADOS OSDs
-using the Ceph file system. 
-
-The Ceph File System (Ceph FS) is a virtual file system (VFS) with POSIX semantics that provides
-client applications with a unified interface to petabytes or even exabytes of data. Ceph metadata servers 
-provide the Ceph FS file system mapping. Client applications access Ceph FS via a Filesystem in User Space (FUSE), 
-a Kernel Object (KO), or the Ceph VFS.
-
-.. image:: images/techstack.png
+.. image:: images/lightstack.png
 
 Ceph Development Status
 =======================
-The Ceph project is currently focused on stability. The Ceph file system is functionally complete, 
-but has not been tested well enough at scale and under load to recommend it for a production environment yet.
-We recommend deploying Ceph for testing and evaluation. We do not recommend deploying Ceph into a
-production environment or storing valuable data until stress testing is complete.
-Ceph is developed on Linux. You may attempt to deploy Ceph on other platforms, but Linux is the 
-target platform for the Ceph project. You can access the Ceph file system from other operating systems 
+Ceph has been under development as an open source project for since 2004, and its current focus
+is on stability. The Ceph file system is functionally complete, but has not been tested well enough at scale 
+and under load to recommend it for a production environment yet. We recommend deploying Ceph for testing 
+and evaluation. We do not recommend deploying Ceph into a production environment or storing valuable data 
+until stress testing is complete. Ceph is developed on Linux. You may attempt to deploy Ceph on other platforms, 
+but Linux is the target platform for the Ceph project. You can access the Ceph file system from other operating systems 
 using NFS or Samba re-exports.
 
 
@@ -52,4 +32,4 @@ using NFS or Samba re-exports.
    Internals <dev/index>
    man/index
    papers
-   appendix/index
+   appendix/index
\ No newline at end of file
diff --git a/doc/install/build_prerequisites.rst b/doc/install/build_prerequisites.rst
new file mode 100644 (file)
index 0000000..481cd3e
--- /dev/null
@@ -0,0 +1,105 @@
+===================
+Build Prerequisites
+===================
+
+Before you can build Ceph documentation or Ceph source code, you need to install several libraries and tools.
+
+.. tip:: Check this section to see if there are specific prerequisites for your Linux/Unix distribution.
+
+
+Prerequisites for Building Ceph Documentation
+=============================================
+Ceph utilizes Python's Sphinx documentation tool. For details on
+the Sphinx documentation tool, refer to: `Sphinx <http://sphinx.pocoo.org>`_
+Follow the directions at `Sphinx 1.1.3 <http://pypi.python.org/pypi/Sphinx>`_
+to install Sphinx. To run Sphinx, with `admin/build-doc`, at least the following are required:
+
+- ``python-dev``
+- ``python-pip``
+- ``python-virtualenv``
+- ``libxml2-dev``
+- ``libxslt-dev``
+- ``doxygen``
+- ``ditaa``
+- ``graphviz``
+
+Execute ``sudo apt-get install`` for each dependency that isn't installed on your host. ::
+
+       $ sudo apt-get install python-dev python-pip python-virtualenv libxml2-dev libxslt-dev doxygen ditaa graphviz
+
+Prerequisites for Building Ceph Source Code
+===========================================
+Ceph provides ``autoconf`` and ``automake`` scripts to get you started quickly. Ceph build scripts
+depend on the following:
+
+- ``autotools-dev``
+- ``autoconf``
+- ``automake``
+- ``cdbs``
+- ``gcc``
+- ``g++``
+- ``git``
+- ``libboost-dev``
+- ``libedit-dev``
+- ``libssl-dev``
+- ``libtool``
+- ``libfcgi``
+- ``libfcgi-dev``
+- ``libfuse-dev``
+- ``linux-kernel-headers``
+- ``libcrypto++-dev``
+- ``libcrypto++``
+- ``libexpat1-dev``
+- ``libgtkmm-2.4-dev``
+- ``pkg-config``
+
+On Ubuntu, execute ``sudo apt-get install`` for each dependency that isn't installed on your host. ::
+
+       $ sudo apt-get install autotools-dev autoconf automake cdbs
+         gcc g++ git libboost-dev libedit-dev libssl-dev libtool
+         libfcgi libfcgi-dev libfuse-dev linux-kernel-headers
+         libcrypto++-dev libcrypto++ libexpat1-dev libgtkmm-2.4-dev
+
+On Debian/Squeeze, execute ``aptitude install`` for each dependency that isn't installed on your host. ::
+
+       $ aptitude install autotools-dev autoconf automake cdbs
+         gcc g++ git libboost-dev libedit-dev libssl-dev libtool
+         libfcgi libfcgi-dev libfuse-dev linux-kernel-headers
+         libcrypto++-dev libcrypto++ libexpat1-dev libgtkmm-2.4-dev
+
+
+Ubuntu Requirements
+-------------------
+
+- ``uuid-dev``
+- ``libkeytutils-dev``
+- ``libgoogle-perftools-dev``
+- ``libatomic-ops-dev``
+- ``libaio-dev``
+- ``libgdata-common``
+- ``libgdata13``
+
+Execute ``sudo apt-get install`` for each dependency that isn't installed on your host. ::
+
+       $ sudo apt-get install uuid-dev libkeytutils-dev libgoogle-perftools-dev
+         libatomic-ops-dev libaio-dev libgdata-common libgdata13
+
+Debian
+------
+Alternatively, you may also install::
+
+       $ aptitude install fakeroot dpkg-dev
+       $ aptitude install debhelper cdbs libexpat1-dev libatomic-ops-dev
+
+openSUSE 11.2 (and later)
+-------------------------
+
+- ``boost-devel``
+- ``gcc-c++``
+- ``libedit-devel``
+- ``libopenssl-devel``
+- ``fuse-devel`` (optional)
+
+Execute ``zypper install`` for each dependency that isn't installed on your host. ::
+
+       $zypper install boost-devel gcc-c++ libedit-devel libopenssl-devel fuse-devel   
\ No newline at end of file
diff --git a/doc/install/building_ceph.rst b/doc/install/building_ceph.rst
new file mode 100644 (file)
index 0000000..81a2039
--- /dev/null
@@ -0,0 +1,31 @@
+=============
+Building Ceph
+=============
+
+Ceph provides build scripts for source code and for documentation.
+
+Building Ceph
+=============
+Ceph provides ``automake`` and ``configure`` scripts to streamline the build process. To build Ceph, navigate to your cloned Ceph repository and execute the following::
+
+       $ cd ceph
+       $ ./autogen.sh
+       $ ./configure
+       $ make
+
+You can use ``make -j`` to execute multiple jobs depending upon your system. For example:: 
+
+       $ make -j4
+       
+Building Ceph Documentation
+===========================
+Ceph utilizes Python’s Sphinx documentation tool. For details on the Sphinx documentation tool, refer to: `Sphinx <http://sphinx.pocoo.org>`_. To build the Ceph documentaiton, navigate to the Ceph repository and execute the build script::
+
+       $ cd ceph
+       $ admin/build-doc
+       
+Once you build the documentation set, you may navigate to the source directory to view it::
+
+       $ cd build-doc/output
+       
+There should be an ``/html`` directory and a ``/man`` directory containing documentation in HTML and manpage formats respectively.
diff --git a/doc/install/cloning_the_ceph_source_code_repository.rst b/doc/install/cloning_the_ceph_source_code_repository.rst
new file mode 100644 (file)
index 0000000..8486e2d
--- /dev/null
@@ -0,0 +1,54 @@
+=======================================
+Cloning the Ceph Source Code Repository
+=======================================
+To check out the Ceph source code, you must have ``git`` installed
+on your local host. To install ``git``, execute::
+
+       $ sudo apt-get install git
+
+You must also have a ``github`` account. If you do not have a
+``github`` account, go to `github.com <http://github.com>`_ and register. 
+Follow the directions for setting up git at `Set Up Git <http://help.github.com/linux-set-up-git/>`_.
+
+Generate SSH Keys
+-----------------
+You must generate SSH keys for github to clone the Ceph
+repository. If you do not have SSH keys for ``github``, execute::
+
+       $ ssh-keygen -d
+       
+Get the key to add to your ``github`` account::
+
+       $ cat .ssh/id_dsa.pub
+       
+Copy the public key. 
+
+Add the Key
+-----------
+Go to your your ``github`` account,
+click on "Account Settings" (i.e., the 'tools' icon); then,
+click "SSH Keys" on the left side navbar. 
+
+Click "Add SSH key" in the "SSH Keys" list, enter a name for
+the key, paste the key you generated, and press the "Add key"
+button.
+
+Clone the Source
+----------------
+To clone the Ceph source code repository, execute::
+
+       $ git clone git@github.com:ceph/ceph.git
+        
+Once ``git clone`` executes, you should have a full copy of the Ceph repository.
+
+Clone the Submodules
+--------------------
+Before you can build Ceph, you must get the ``init`` submodule and the ``update`` submodule:: 
+
+       $ git submodule init 
+       $ git submodule update 
+
+.. tip:: Make sure you maintain the latest copies of these submodules. Running ``git status`` will tell you if the submodules are out of date:: 
+
+       $ git status
+       
diff --git a/doc/install/download_packages.rst b/doc/install/download_packages.rst
new file mode 100644 (file)
index 0000000..9bf6d09
--- /dev/null
@@ -0,0 +1,41 @@
+====================
+Downloading Packages
+====================
+
+We automatically build Debian and Ubuntu packages for any branches or tags that appear in 
+the ``ceph.git`` `repository <http://github.com/ceph/ceph>`_. We build packages for the following 
+architectures:
+
+- ``amd64`` 
+- ``i386`` 
+
+For each architecture, we build packages for the following distributions:
+
+- Debian 7.0 (``wheezy``)
+- Debian 6.0 (``squeeze``)
+- Debian unstable (``sid``)
+- Ubuntu 12.04 (``precise``)
+- Ubuntu 11.10 (``oneiric``)
+- Ubuntu 11.04 (``natty``)
+- Ubuntu 10.10 (``maverick``)
+
+When you execute the following commands to install the Ceph packages, replace ``{ARCH}`` with the architecture of your CPU,
+``{DISTRO}`` with the code name of your operating system (e.g., ``wheezy``, rather than the version number) and 
+``{BRANCH}`` with the version of Ceph you want to run (e.g., ``master``, ``stable``, ``unstable``, ``v0.44``, etc.). ::
+
+       wget -q -O- https://raw.github.com/ceph/ceph/master/keys/autobuild.asc \
+       | sudo apt-key add -
+
+       sudo tee /etc/apt/sources.list.d/ceph.list <<EOF
+       deb http://ceph.newdream.net/debian-snapshot-{ARCH}/{BRANCH}/ {DISTRO} main
+       deb-src http://ceph.newdream.net/debian-snapshot-{ARCH}/{BRANCH}/ {DISTRO} main
+       EOF
+
+       sudo apt-get update
+       sudo apt-get install ceph
+
+
+When you download packages, you will receive the latest package build, which may be several weeks behind the current release
+or the most recent code. It may contain bugs that have already been fixed in the most recent versions of the code. Until packages
+contain only stable code, you should carefully consider the tradeoffs of installing from a package or retrieving the latest release
+or the most current source code and building Ceph.
\ No newline at end of file
diff --git a/doc/install/downloading_a_ceph_release.rst b/doc/install/downloading_a_ceph_release.rst
new file mode 100644 (file)
index 0000000..5a3ce1a
--- /dev/null
@@ -0,0 +1,6 @@
+==========================
+Downloading a Ceph Release
+==========================
+As Ceph development progresses, the Ceph team releases new versions. You may download Ceph releases here:
+
+`Ceph Releases <http://ceph.newdream.net/download/>`_
\ No newline at end of file
index 62b711d60347ffe4c43154b4f7e4c1fc2194b32f..5bc20ac8e11624648344baefe0381600c5654b6b 100644 (file)
@@ -14,18 +14,18 @@ Ceph include B tree and B+ tree file systems such as:
 - ``btrfs``
 - ``XFS``
 
-.. warning:: 
+.. warning:: XATTR limits.
 
-The RADOS Gateway's ACL and Ceph snapshots easily surpass the 4-kilobyte limit for XATTRs in ``ext4``, 
-causing the ``ceph-osd`` process to crash. So ``ext4`` is a poor file system choice if 
-you intend to deploy the RADOS Gateway or use snapshots.
+   The RADOS Gateway's ACL and Ceph snapshots easily surpass the 4-kilobyte limit for XATTRs in ``ext4``, 
+   causing the ``ceph-osd`` process to crash. So ``ext4`` is a poor file system choice if 
+   you intend to deploy the RADOS Gateway or use snapshots.
   
-.. tip:: 
+.. tip:: Use `btrfs`
 
-The Ceph team believes that the best performance and stability will come from ``btrfs.`` 
-The ``btrfs`` file system has internal transactions that keep the local data set in a consistent state. 
-This makes OSDs based on ``btrfs`` simple to deploy, while providing scalability not 
-currently available from block-based file systems. The 64-kb XATTR limit for ``xfs``
-XATTRS is enough to accommodate RDB snapshot metadata and RADOS Gateway ACLs. So ``xfs`` is the second-choice 
-file system of the Ceph team. If you only plan to use RADOS and ``rbd`` without snapshots and without 
-``radosgw``, the ``ext4`` file system should work just fine.
+   The Ceph team believes that the best performance and stability will come from ``btrfs.`` 
+   The ``btrfs`` file system has internal transactions that keep the local data set in a consistent state. 
+   This makes OSDs based on ``btrfs`` simple to deploy, while providing scalability not 
+   currently available from block-based file systems. The 64-kb XATTR limit for ``xfs``
+   XATTRS is enough to accommodate RDB snapshot metadata and RADOS Gateway ACLs. So ``xfs`` is the second-choice 
+   file system of the Ceph team. If you only plan to use RADOS and ``rbd`` without snapshots and without 
+   ``radosgw``, the ``ext4`` file system should work just fine.
index e0cc10c37b675eae569fab4a845b30a11a970e88..aabd1ee2d5762c10b568bf7046c5dbe7b2d829f6 100644 (file)
@@ -1,14 +1,19 @@
-======================
-RADOS OSD Provisioning
-======================
-RADOS OSD clusters are the foundation of the Ceph file system, and they can also be provide
+=============================
+Designing a Storage Cluster
+=============================
+Storage clusters are the foundation of the Ceph file system, and they can also provide
 object storage to clients via ``librados``, ``rbd`` and ``radosgw``. The following sections 
-provide guidance for RADOS OSD provisioning:
+provide guidance for configuring a storage cluster:
 
 1. :doc:`Introduction to RADOS OSDs <introduction_to_rados_osds>`
 2. :doc:`Hardware Requirements <hardware_requirements>`
 3. :doc:`File System Requirements <file_system_requirements>`
-4. :doc:`Installing RADOS Processes and Daemons <installing_rados_processes_and_daemons>`
+4. :doc:`Build Prerequisites <build_prerequisites>`
+5. :doc:`Download Packages <download_packages>`
+6. :doc:`Downloading a Ceph Release <downloading_a_ceph_release>`
+7. :doc:`Cloning the Ceph Source Code Repository <cloning_the_ceph_source_code_repository>`
+8. :doc:`Building Ceph<building_ceph>`
+9. :doc:`Installing RADOS Processes and Daemons <installing_rados_processes_and_daemons>`
 
 .. toctree::
    :hidden:
@@ -16,4 +21,9 @@ provide guidance for RADOS OSD provisioning:
    Introduction <introduction_to_rados_osds>
    Hardware <hardware_requirements>
    File System Reqs <file_system_requirements>
-   Installation <installing_rados_processes_and_daemons>
+   build_prerequisites
+   Download Packages <download_packages>
+   Download a Release <downloading_a_ceph_release>
+   Clone the Source Code <cloning_the_ceph_source_code_repository>
+   building_ceph
+   Installation <installing_rados_processes_and_daemons>
\ No newline at end of file
index 2922d62552eeee667f37273f321a558869033150..64ccb48f2a24c5ae1be739d55354fa652ecc10ba 100644 (file)
@@ -6,23 +6,12 @@ that will help you get started before you install Ceph:
 
 - :doc:`Why use Ceph? <why_use_ceph>`
 - :doc:`Get Involved in the Ceph Community! <get_involved_in_the_ceph_community>`
-- :doc:`Build Prerequisites <build_prerequisites>`
-- :doc:`Download Packages <download_packages>`
-- :doc:`Downloading a Ceph Release <downloading_a_ceph_release>`
-- :doc:`Cloning the Ceph Source Code Repository <cloning_the_ceph_source_code_repository>`
-- :doc:`Building Ceph<building_ceph>`
-- :doc:`Summary <summary>`
-
-Once you successfully build the Ceph code, you may proceed to `RADOS OSD Provisioning <../install/RADOS_OSD_Provisioning>`_.
+- :doc:`Quick Start <quick_start>`
 
 .. toctree::
    :hidden:
 
    why_use_ceph
    Get Involved <get_involved_in_the_ceph_community>
-   build_prerequisites
-   Download Packages <download_packages>
-   Download a Release <downloading_a_ceph_release>
-   Clone the Source Code <cloning_the_ceph_source_code_repository>
-   building_ceph
-   summary
+   quick_start
+
diff --git a/doc/start/quick_start.rst b/doc/start/quick_start.rst
new file mode 100644 (file)
index 0000000..78103d9
--- /dev/null
@@ -0,0 +1,4 @@
+===========
+Quick Start
+===========
+