.. code::
- $ ceph nfs export create rgw <bucket_name> <cluster_id> <pseudo_path> [--readonly] [--client_addr <value>...] [--squash <value>]
+ $ ceph nfs export create rgw --cluster-id <cluster_id> --pseudo-path <pseudo_path> --bucket <bucket_name> [--readonly] [--client_addr <value>...] [--squash <value>]
For example, to export *mybucket* via NFS cluster *mynfs* at the pseudo-path */bucketdata* to any host in the ``192.168.10.0/24`` network
.. code::
- $ ceph nfs export create rgw mybucket mynfs /bucketdata --client_addr 192.168.10.0/24
+ $ ceph nfs export create rgw --cluster-id mynfs --pseudo-path /bucketdata --bucket mybucket --client_addr 192.168.10.0/24
.. note:: Export creation is supported only for NFS Ganesha clusters deployed using nfs interface.
-``<bucket_name>`` is the name of the bucket that will be exported.
-
-.. note:: Currently, if multi-site RGW is enabled, Ceph can only export RGW buckets in the default realm.
-
``<cluster_id>`` is the NFS Ganesha cluster ID.
``<pseudo_path>`` is the export position within the NFS v4 Pseudo Filesystem where the export will be available on the server. It must be an absolute path and unique.
+``<bucket_name>`` is the name of the bucket that will be exported.
+
+.. note:: Currently, if multi-site RGW is enabled, Ceph can only export RGW buckets in the default realm.
+
``<client_addr>`` is the list of client address for which these export
permissions will be applicable. By default all clients can access the export
according to specified export permissions. See the `NFS-Ganesha Export Sample`_
--- /dev/null
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+ host.a:
+ - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+ all-hosts:
+ - systemctl stop nfs-server
+
+- cephadm.shell:
+ host.a:
+ - ceph orch apply rgw foorgw --port 8800
+ - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
+
+- vip.exec:
+ host.a:
+ - dnf install -y python3-boto3 || apt install -y python3-boto3
+ - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
+
+- python:
+ host.a: |
+ import boto3
+ import json
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ bucket.create()
+ bucket.put_object(Key='myobject', Body='thebody')
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket
+
+- cephadm.wait_for_service:
+ service: nfs.foo
+- cephadm.wait_for_service:
+ service: ingress.nfs.foo
+
+## export and mount
+
+- vip.exec:
+ host.a:
+ - mkdir /mnt/foo
+ - sleep 5
+ - mount -t nfs {{VIP0}}:/foobucket /mnt/foo
+ - find /mnt/foo -ls
+ - grep thebody /mnt/foo/myobject
+ - echo test > /mnt/foo/newobject
+ - sync
+
+- python:
+ host.a: |
+ import boto3
+ import json
+ from io import BytesIO
+
+ with open('/tmp/user.json', 'rt') as f:
+ info = json.loads(f.read())
+ s3 = boto3.resource(
+ 's3',
+ aws_access_key_id=info['keys'][0]['access_key'],
+ aws_secret_access_key=info['keys'][0]['secret_key'],
+ endpoint_url='http://localhost:8800',
+ )
+ bucket = s3.Bucket('foobucket')
+ data = BytesIO()
+ bucket.download_fileobj(Fileobj=data, Key='newobject')
+ print(data.getvalue())
+ assert data.getvalue().decode() == 'test\n'
+
+- vip.exec:
+ host.a:
+ - umount /mnt/foo
+
+- cephadm.shell:
+ host.a:
+ - ceph nfs export rm foo /foobucket
+ - ceph nfs cluster rm foo
+++ /dev/null
-tasks:
-- vip:
-
-# make sure cephadm notices the new IP
-- cephadm.shell:
- host.a:
- - ceph orch device ls --refresh
-
-# stop kernel nfs server, if running
-- vip.exec:
- all-hosts:
- - systemctl stop nfs-server
-
-- cephadm.shell:
- host.a:
- - ceph orch apply rgw foorgw --port 8800
- - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
-
-- vip.exec:
- host.a:
- - dnf install -y python3-boto3 || apt install -y python3-boto3
- - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
-
-- python:
- host.a: |
- import boto3
- import json
-
- with open('/tmp/user.json', 'rt') as f:
- info = json.loads(f.read())
- s3 = boto3.resource(
- 's3',
- aws_access_key_id=info['keys'][0]['access_key'],
- aws_secret_access_key=info['keys'][0]['secret_key'],
- endpoint_url='http://localhost:8800',
- )
- bucket = s3.Bucket('foobucket')
- bucket.create()
- bucket.put_object(Key='myobject', Body='thebody')
-
-- cephadm.shell:
- host.a:
- - ceph nfs export create rgw foobucket foo --pseudo-path /foobucket
-
-- cephadm.wait_for_service:
- service: nfs.foo
-- cephadm.wait_for_service:
- service: ingress.nfs.foo
-
-## export and mount
-
-- vip.exec:
- host.a:
- - mkdir /mnt/foo
- - sleep 5
- - mount -t nfs {{VIP0}}:/foobucket /mnt/foo
- - find /mnt/foo -ls
- - grep thebody /mnt/foo/myobject
- - echo test > /mnt/foo/newobject
- - sync
-
-- python:
- host.a: |
- import boto3
- import json
- from io import BytesIO
-
- with open('/tmp/user.json', 'rt') as f:
- info = json.loads(f.read())
- s3 = boto3.resource(
- 's3',
- aws_access_key_id=info['keys'][0]['access_key'],
- aws_secret_access_key=info['keys'][0]['secret_key'],
- endpoint_url='http://localhost:8800',
- )
- bucket = s3.Bucket('foobucket')
- data = BytesIO()
- bucket.download_fileobj(Fileobj=data, Key='newobject')
- print(data.getvalue())
- assert data.getvalue().decode() == 'test\n'
-
-- vip.exec:
- host.a:
- - umount /mnt/foo
-
-- cephadm.shell:
- host.a:
- - ceph nfs export rm foo /foobucket
- - ceph nfs cluster rm foo
if [ "$CEPH_NUM_RGW" -gt 0 ]; then
pseudo_path="/rgw"
do_rgw_create_bucket
- prun ceph_adm nfs export create rgw "nfs-bucket" $cluster_id $pseudo_path
+ prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
else