]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
doc: update openstack and rgw keystone 4848/head
authorSébastien Han <sebastien.han@enovance.com>
Wed, 3 Jun 2015 17:37:06 +0000 (19:37 +0200)
committerSébastien Han <seb@redhat.com>
Tue, 16 Jun 2015 09:22:14 +0000 (11:22 +0200)
Update the OpenStack doc with more options, recommendations and best
practices.
Update the Keystone configuration for the Kilo release with Rados
Gateway.

Signed-off-by: Sébastien Han <seb@redhat.com>
doc/radosgw/keystone.rst
doc/rbd/rbd-openstack.rst

index 7fae21293f472d324585ca2a9719ba3ab66c91d8..16ca2a663c2b182a541bba3f583797be9812c8e5 100644 (file)
@@ -25,12 +25,69 @@ the Ceph Object Gateway gets the ticket, it looks at the tenant, and the user
 roles that are assigned to that ticket, and accepts/rejects the request
 according to the ``rgw keystone accepted roles`` configurable.
 
+
+Prior to Kilo
+-------------
+
+Keystone itself needs to be configured to point to the Ceph Object Gateway as an
+object-storage endpoint::
+
+    keystone service-create --name swift --type object-store
+    keystone endpoint-create --service-id <id> --publicurl http://radosgw.example.com/swift/v1 \
+            --internalurl http://radosgw.example.com/swift/v1 --adminurl http://radosgw.example.com/swift/v1
+
+
+As of Kilo
+----------
+
 Keystone itself needs to be configured to point to the Ceph Object Gateway as an
 object-storage endpoint::
 
-       keystone service-create --name swift --type object-store
-       keystone endpoint-create --service-id <id> --publicurl http://radosgw.example.com/swift/v1 \
-               --internalurl http://radosgw.example.com/swift/v1 --adminurl http://radosgw.example.com/swift/v1
+  openstack service create --name=swift \
+                           --description="Swift Service" \
+                           object-store
+  +-------------+----------------------------------+
+  | Field       | Value                            |
+  +-------------+----------------------------------+
+  | description | Swift Service                    |
+  | enabled     | True                             |
+  | id          | 37c4c0e79571404cb4644201a4a6e5ee |
+  | name        | swift                            |
+  | type        | object-store                     |
+  +-------------+----------------------------------+
+
+  openstack endpoint create --region RegionOne \
+       --publicurl   "http://radosgw.example.com:8080/swift/v1" \
+       --adminurl    "http://radosgw.example.com:8080/swift/v1" \
+       --internalurl "http://radosgw.example.com:8080/swift/v1" \
+       swift
+  +--------------+------------------------------------------+
+  | Field        | Value                                    |
+  +--------------+------------------------------------------+
+  | adminurl     | http://radosgw.example.com:8080/swift/v1 |
+  | id           | e4249d2b60e44743a67b5e5b38c18dd3         |
+  | internalurl  | http://radosgw.example.com:8080/swift/v1 |
+  | publicurl    | http://radosgw.example.com:8080/swift/v1 |
+  | region       | RegionOne                                |
+  | service_id   | 37c4c0e79571404cb4644201a4a6e5ee         |
+  | service_name | swift                                    |
+  | service_type | object-store                             |
+  +--------------+------------------------------------------+
+
+  $ openstack endpoint show object-store
+  +--------------+------------------------------------------+
+  | Field        | Value                                    |
+  +--------------+------------------------------------------+
+  | adminurl     | http://radosgw.example.com:8080/swift/v1 |
+  | enabled      | True                                     |
+  | id           | e4249d2b60e44743a67b5e5b38c18dd3         |
+  | internalurl  | http://radosgw.example.com:8080/swift/v1 |
+  | publicurl    | http://radosgw.example.com:8080/swift/v1 |
+  | region       | RegionOne                                |
+  | service_id   | 37c4c0e79571404cb4644201a4a6e5ee         |
+  | service_name | swift                                    |
+  | service_type | object-store                             |
+  +--------------+------------------------------------------+
 
 
 The keystone URL is the Keystone admin RESTful API URL. The admin token is the
index ca53856435e018a5dd5c769826b4a5213c086fa5..db232b100e2afc0331f14312af23416db3a5d570 100644 (file)
@@ -237,6 +237,16 @@ assuming your configuration file has ``flavor = keystone+cachemanagement``::
     [paste_deploy]
     flavor = keystone
 
+Image properties
+~~~~~~~~~~~~~~~~
+
+We recommend to use the following properties for your images:
+
+- ``hw_scsi_model=virtio-scsi``: add the virtio-scsi controller and get better performance and support for discard operation
+- ``hw_disk_bus=scsi``: connect every cinder block devices to that controller
+- ``hw_qemu_guest_agent=yes``: enable the QEMU guest agent
+- ``os_require_quiesce=yes``: send fs-freeze/thaw calls through the QEMU guest agent
+
 
 Configuring Cinder
 ------------------
@@ -302,7 +312,7 @@ configure the ephemeral backend for Nova.
 
 It is recommended to enable the RBD cache in your Ceph configuration file
 (enabled by default since Giant). Moreover, enabling the admin socket
-brings a lot of benefits while troubleshoothing. Having one socket
+brings a lot of benefits while troubleshooting. Having one socket
 per virtual machine using a Ceph block device will help investigating performance and/or wrong behaviors.
 
 This socket can be accessed like this::
@@ -314,7 +324,17 @@ Now on every compute nodes edit your Ceph configuration file::
     [client]
         rbd cache = true
         rbd cache writethrough until flush = true
-        admin socket = /var/run/ceph/$cluster-$type.$id.$pid.$cctid.asok
+        admin socket = /var/run/ceph/guests/$cluster-$type.$id.$pid.$cctid.asok
+        log file = /var/log/qemu/qemu-guest-$pid.log
+        rbd concurrent management ops = 20
+
+Configure the permissions of these paths::
+
+    mkdir -p /var/run/ceph/guests/ /var/log/qemu/
+    chown qemu:libvirtd /var/run/ceph/guests /var/log/qemu/
+
+Note that user ``qemu`` and group ``libvirtd`` can vary depending on your system.
+The provided example works for RedHat based systems.
 
 .. tip:: If your virtual machine is already running you can simply restart it to get the socket
 
@@ -351,8 +371,7 @@ On every Compute node, edit ``/etc/nova/nova.conf`` and add::
 
 To ensure a proper live-migration, use the following flags::
 
-    libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"
-
+    libvirt_live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
 
 Juno
 ~~~~
@@ -383,9 +402,19 @@ under the ``[libvirt]`` section::
     inject_key = false
     inject_partition = -2
 
-To ensure a proper live-migration, use the following flags::
+To ensure a proper live-migration, use the following flags (under the ``[libvirt]`` section)::
 
-    live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST"
+    live_migration_flag="VIR_MIGRATE_UNDEFINE_SOURCE,VIR_MIGRATE_PEER2PEER,VIR_MIGRATE_LIVE,VIR_MIGRATE_PERSIST_DEST,VIR_MIGRATE_TUNNELLED"
+
+Kilo
+~~~~
+
+Enable discard support for virtual machine ephemeral root disk::
+
+    [libvirt]
+    ...
+    ...
+    hw_disk_discard = unmap # enable discard support (be careful of performance)
 
 
 Restart OpenStack