]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/nfs: use keyword args for 'nfs export create rgw'
authorSage Weil <sage@newdream.net>
Wed, 20 Oct 2021 19:38:27 +0000 (15:38 -0400)
committerSage Weil <sage@newdream.net>
Tue, 2 Nov 2021 21:06:58 +0000 (17:06 -0400)
Signed-off-by: Sage Weil <sage@newdream.net>
doc/mgr/nfs.rst
qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml [new file with mode: 0644]
qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw.yaml [deleted file]
src/vstart.sh

index 52ddf104fd8fc054e97033f0a9c01461292aaf1d..548f7b62da780a03c802e37df2a5db8896b7b53c 100644 (file)
@@ -275,24 +275,24 @@ To export a bucket
 
 .. code::
 
-   $ ceph nfs export create rgw <bucket_name> <cluster_id> <pseudo_path> [--readonly] [--client_addr <value>...] [--squash <value>]
+   $ ceph nfs export create rgw --cluster-id <cluster_id> --pseudo-path <pseudo_path> --bucket <bucket_name> [--readonly] [--client_addr <value>...] [--squash <value>]
 
 For example, to export *mybucket* via NFS cluster *mynfs* at the pseudo-path */bucketdata* to any host in the ``192.168.10.0/24`` network
 
 .. code::
 
-   $ ceph nfs export create rgw mybucket mynfs /bucketdata --client_addr 192.168.10.0/24
+   $ ceph nfs export create rgw --cluster-id mynfs --pseudo-path /bucketdata --bucket mybucket --client_addr 192.168.10.0/24
 
 .. note:: Export creation is supported only for NFS Ganesha clusters deployed using nfs interface.
 
-``<bucket_name>`` is the name of the bucket that will be exported.
-
-.. note:: Currently, if multi-site RGW is enabled, Ceph can only export RGW buckets in the default realm.
-
 ``<cluster_id>`` is the NFS Ganesha cluster ID.
 
 ``<pseudo_path>`` is the export position within the NFS v4 Pseudo Filesystem where the export will be available on the server. It must be an absolute path and unique.
 
+``<bucket_name>`` is the name of the bucket that will be exported.
+
+.. note:: Currently, if multi-site RGW is enabled, Ceph can only export RGW buckets in the default realm.
+
 ``<client_addr>`` is the list of client address for which these export
 permissions will be applicable. By default all clients can access the export
 according to specified export permissions. See the `NFS-Ganesha Export Sample`_
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw-bucket.yaml
new file mode 100644 (file)
index 0000000..3f49649
--- /dev/null
@@ -0,0 +1,89 @@
+tasks:
+- vip:
+
+# make sure cephadm notices the new IP
+- cephadm.shell:
+    host.a:
+      - ceph orch device ls --refresh
+
+# stop kernel nfs server, if running
+- vip.exec:
+    all-hosts:
+      - systemctl stop nfs-server
+
+- cephadm.shell:
+    host.a:
+      - ceph orch apply rgw foorgw --port 8800
+      - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
+
+- vip.exec:
+    host.a:
+      - dnf install -y python3-boto3 || apt install -y python3-boto3
+      - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
+
+- python:
+    host.a: |
+      import boto3
+      import json
+
+      with open('/tmp/user.json', 'rt') as f:
+          info = json.loads(f.read())
+      s3 = boto3.resource(
+          's3',
+          aws_access_key_id=info['keys'][0]['access_key'],
+          aws_secret_access_key=info['keys'][0]['secret_key'],
+          endpoint_url='http://localhost:8800',
+      )
+      bucket = s3.Bucket('foobucket')
+      bucket.create()
+      bucket.put_object(Key='myobject', Body='thebody')
+
+- cephadm.shell:
+    host.a:
+      - ceph nfs export create rgw --bucket foobucket --cluster-id foo --pseudo-path /foobucket 
+
+- cephadm.wait_for_service:
+    service: nfs.foo
+- cephadm.wait_for_service:
+    service: ingress.nfs.foo
+
+## export and mount
+
+- vip.exec:
+    host.a:
+      - mkdir /mnt/foo
+      - sleep 5
+      - mount -t nfs {{VIP0}}:/foobucket /mnt/foo
+      - find /mnt/foo -ls
+      - grep thebody /mnt/foo/myobject
+      - echo test > /mnt/foo/newobject
+      - sync
+
+- python:
+    host.a: |
+      import boto3
+      import json
+      from io import BytesIO
+
+      with open('/tmp/user.json', 'rt') as f:
+          info = json.loads(f.read())
+      s3 = boto3.resource(
+          's3',
+          aws_access_key_id=info['keys'][0]['access_key'],
+          aws_secret_access_key=info['keys'][0]['secret_key'],
+          endpoint_url='http://localhost:8800',
+      )
+      bucket = s3.Bucket('foobucket')
+      data = BytesIO()
+      bucket.download_fileobj(Fileobj=data, Key='newobject')
+      print(data.getvalue())
+      assert data.getvalue().decode() == 'test\n'
+
+- vip.exec:
+    host.a:
+      - umount /mnt/foo
+
+- cephadm.shell:
+    host.a:
+      - ceph nfs export rm foo /foobucket
+      - ceph nfs cluster rm foo
diff --git a/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw.yaml b/qa/suites/orch/cephadm/smoke-roleless/2-services/nfs-ingress-rgw.yaml
deleted file mode 100644 (file)
index 8e18c59..0000000
+++ /dev/null
@@ -1,89 +0,0 @@
-tasks:
-- vip:
-
-# make sure cephadm notices the new IP
-- cephadm.shell:
-    host.a:
-      - ceph orch device ls --refresh
-
-# stop kernel nfs server, if running
-- vip.exec:
-    all-hosts:
-      - systemctl stop nfs-server
-
-- cephadm.shell:
-    host.a:
-      - ceph orch apply rgw foorgw --port 8800
-      - ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}}
-
-- vip.exec:
-    host.a:
-      - dnf install -y python3-boto3 || apt install -y python3-boto3
-      - /home/ubuntu/cephtest/cephadm shell radosgw-admin user create --uid foouser --display-name foo > /tmp/user.json
-
-- python:
-    host.a: |
-      import boto3
-      import json
-
-      with open('/tmp/user.json', 'rt') as f:
-          info = json.loads(f.read())
-      s3 = boto3.resource(
-          's3',
-          aws_access_key_id=info['keys'][0]['access_key'],
-          aws_secret_access_key=info['keys'][0]['secret_key'],
-          endpoint_url='http://localhost:8800',
-      )
-      bucket = s3.Bucket('foobucket')
-      bucket.create()
-      bucket.put_object(Key='myobject', Body='thebody')
-
-- cephadm.shell:
-    host.a:
-      - ceph nfs export create rgw foobucket foo --pseudo-path /foobucket
-
-- cephadm.wait_for_service:
-    service: nfs.foo
-- cephadm.wait_for_service:
-    service: ingress.nfs.foo
-
-## export and mount
-
-- vip.exec:
-    host.a:
-      - mkdir /mnt/foo
-      - sleep 5
-      - mount -t nfs {{VIP0}}:/foobucket /mnt/foo
-      - find /mnt/foo -ls
-      - grep thebody /mnt/foo/myobject
-      - echo test > /mnt/foo/newobject
-      - sync
-
-- python:
-    host.a: |
-      import boto3
-      import json
-      from io import BytesIO
-
-      with open('/tmp/user.json', 'rt') as f:
-          info = json.loads(f.read())
-      s3 = boto3.resource(
-          's3',
-          aws_access_key_id=info['keys'][0]['access_key'],
-          aws_secret_access_key=info['keys'][0]['secret_key'],
-          endpoint_url='http://localhost:8800',
-      )
-      bucket = s3.Bucket('foobucket')
-      data = BytesIO()
-      bucket.download_fileobj(Fileobj=data, Key='newobject')
-      print(data.getvalue())
-      assert data.getvalue().decode() == 'test\n'
-
-- vip.exec:
-    host.a:
-      - umount /mnt/foo
-
-- cephadm.shell:
-    host.a:
-      - ceph nfs export rm foo /foobucket
-      - ceph nfs cluster rm foo
index 0fa55eba02ed9680e9edc8a8ed9ac66e5720b320..2b6a4017afab7a3fabafbb41b5b9196454e1078d 100755 (executable)
@@ -1670,7 +1670,7 @@ if [ $GANESHA_DAEMON_NUM -gt 0 ]; then
        if [ "$CEPH_NUM_RGW" -gt 0 ]; then
             pseudo_path="/rgw"
             do_rgw_create_bucket
-           prun ceph_adm nfs export create rgw "nfs-bucket" $cluster_id $pseudo_path
+           prun ceph_adm nfs export create rgw --cluster-id $cluster_id --pseudo-path $pseudo_path --bucket "nfs-bucket"
             echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
        fi
     else