.. code:: bash
- $ ceph nfs export create cephfs <fsname> <cluster_id> <pseudo_path> [--readonly] [--path=/path/in/cephfs] [--client_addr <value>...] [--squash <value>]
+ $ ceph nfs export create cephfs --cluster-id <cluster_id> --pseudo-path <pseudo_path> --fsname <fsname> [--readonly] [--path=/path/in/cephfs] [--client_addr <value>...] [--squash <value>]
This creates export RADOS objects containing the export block, where
-``<fsname>`` is the name of the FS volume used by the NFS Ganesha cluster
-that will serve this export.
-
``<cluster_id>`` is the NFS Ganesha cluster ID.
``<pseudo_path>`` is the export position within the NFS v4 Pseudo Filesystem where the export will be available on the server. It must be an absolute path and unique.
+``<fsname>`` is the name of the FS volume used by the NFS Ganesha cluster
+that will serve this export.
+
``<path>`` is the path within cephfs. Valid path should be given and default
path is '/'. It need not be unique. Subvolume path can be fetched using:
- cephadm.shell:
host.a:
- ceph nfs cluster create foo --placement=2 || ceph nfs cluster create cephfs foo --placement=2
- - ceph nfs export create cephfs foofs foo --binding /fake || ceph nfs export create cephfs foofs foo --pseudo-path /fake
+ - ceph nfs export create cephfs --fsname foofs --clusterid foo --binding /fake || ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
# we can't do wait_for_service here because with octopus it's nfs.ganesha-foo not nfs.foo
- while ! ceph orch ls | grep nfs | grep 2/2 ; do sleep 1 ; done
- cephadm.shell:
host.a:
- - ceph nfs export create cephfs foofs foo --pseudo-path /fake
+ - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
- vip.exec:
host.a:
host.a:
- ceph fs volume create foofs
- ceph nfs cluster create foo --ingress --virtual-ip {{VIP0}}/{{VIPPREFIXLEN}} --port 2999
- - ceph nfs export create cephfs foofs foo --pseudo-path /fake
+ - ceph nfs export create cephfs --fsname foofs --cluster-id foo --pseudo-path /fake
- cephadm.wait_for_service:
service: nfs.foo
ceph_adm orch set backend test_orchestrator
ceph_adm test_orchestrator load_data -i $CEPH_ROOT/src/pybind/mgr/test_orchestrator/dummy_data.json
prun ceph_adm nfs cluster create $cluster_id
- prun ceph_adm nfs export create cephfs "a" $cluster_id "/cephfs"
+ prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path "/cephfs"
for name in a b c d e f g h i j k l m n o p
do
port="2049"
prun ceph_adm nfs cluster create $cluster_id
if [ $CEPH_NUM_MDS -gt 0 ]; then
- prun ceph_adm nfs export create cephfs "a" $cluster_id $pseudo_path
+ prun ceph_adm nfs export create cephfs --fsname "a" --cluster-id $cluster_id --pseudo-path $pseudo_path
echo "Mount using: mount -t nfs -o port=$port $IP:$pseudo_path mountpoint"
fi
if [ "$CEPH_NUM_RGW" -gt 0 ]; then