+++ /dev/null
-roles:
-- - host.a
- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - client.0
- - ceph.nvmeof.nvmeof.a
-- - host.b
- - mon.b
- - mon.c
- - osd.2
- - osd.3
- - osd.4
- - client.1
- - ceph.nvmeof.nvmeof.b
-- - client.2
-- - client.3
-
-overrides:
- ceph:
- conf:
- mon:
- # cephadm can take up to 5 minutes to bring up remaining mons
- mon down mkfs grace: 300
--- /dev/null
+roles:
+- - client.0
+- - client.1
+- - host.a
+ - mon.a
+ - mgr.x
+ - osd.0
+ - client.2
+ - ceph.nvmeof.nvmeof.a
+- - host.b
+ - mon.b
+ - osd.1
+ - client.3
+ - ceph.nvmeof.nvmeof.b
+- - host.c
+ - mon.c
+ - osd.2
+ - client.4
+ - ceph.nvmeof.nvmeof.c
+- - host.d
+ - osd.3
+ - client.5
+ - ceph.nvmeof.nvmeof.d
+
+overrides:
+ ceph:
+ conf:
+ mon:
+ # cephadm can take up to 5 minutes to bring up remaining mons
+ mon down mkfs grace: 300
tasks:
- nvmeof:
- client: client.0
+ installer: host.a
gw_image: quay.io/ceph/nvmeof:1.2 # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
rbd:
pool_name: mypool
- workunit:
no_coverage_and_limits: true
clients:
- client.2:
+ client.0:
- nvmeof/setup_subsystem.sh
env:
RBD_POOL: mypool
no_coverage_and_limits: true
timeout: 30m
clients:
- client.2:
+ client.0:
- nvmeof/basic_tests.sh
- nvmeof/fio_test.sh --start_ns 1 --end_ns 30 --rbd_iostat
- client.3:
+ client.1:
- nvmeof/basic_tests.sh
- nvmeof/fio_test.sh --start_ns 31 --end_ns 60
env:
tasks:
- nvmeof:
- client: client.0
+ installer: host.a
gw_image: quay.io/ceph/nvmeof:1.2 # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
rbd:
pool_name: mypool
- workunit:
no_coverage_and_limits: true
clients:
- client.2:
+ client.0:
- nvmeof/setup_subsystem.sh
env:
RBD_POOL: mypool
no_coverage_and_limits: true
timeout: 30m
clients:
- client.2:
+ client.0:
- nvmeof/basic_tests.sh
- nvmeof/fio_test.sh --rbd_iostat
- client.3:
+ client.1:
- nvmeof/basic_tests.sh
- nvmeof/namespace_test.sh
env:
tasks:
- nvmeof:
- client: client.0
+ installer: host.a
gw_image: quay.io/ceph/nvmeof:1.2 # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
rbd:
pool_name: mypool
no_coverage_and_limits: true
timeout: 30m
clients:
- client.2:
+ client.0:
- nvmeof/setup_subsystem.sh
- nvmeof/basic_tests.sh
- nvmeof/fio_test.sh --rbd_iostat
no_coverage_and_limits: true
timeout: 30m
clients:
- client.2:
- - nvmeof/scalability_test.sh nvmeof.a
- - nvmeof/scalability_test.sh nvmeof.b
+ client.0:
+ - nvmeof/scalability_test.sh nvmeof.a,nvmeof.b
+ - nvmeof/scalability_test.sh nvmeof.b,nvmeof.c,nvmeof.d
env:
SCALING_DELAYS: '50'
-
+++ /dev/null
-roles:
-- - host.a
- - mon.a
- - mgr.x
- - osd.0
- - osd.1
- - client.0
- - ceph.nvmeof.nvmeof.a
-- - host.b
- - mon.b
- - osd.2
- - osd.3
- - osd.4
- - client.1
- - ceph.nvmeof.nvmeof.b
-- - host.c
- - mon.c
- - osd.5
- - osd.6
- - osd.7
- - client.2
- - ceph.nvmeof.nvmeof.c
-- - client.3 # initiator
-
-overrides:
- ceph:
- conf:
- mon:
- # cephadm can take up to 5 minutes to bring up remaining mons
- mon down mkfs grace: 300
--- /dev/null
+roles:
+- - client.0 # initiator
+- - host.a
+ - mon.a
+ - mgr.x
+ - osd.0
+ - osd.1
+ - client.1
+ - ceph.nvmeof.nvmeof.a
+- - host.b
+ - mon.b
+ - osd.2
+ - osd.3
+ - osd.4
+ - client.2
+ - ceph.nvmeof.nvmeof.b
+- - host.c
+ - mon.c
+ - osd.5
+ - osd.6
+ - osd.7
+ - client.3
+ - ceph.nvmeof.nvmeof.c
+- - host.d
+ - client.4
+ - ceph.nvmeof.nvmeof.d
+
+
+overrides:
+ ceph:
+ conf:
+ mon:
+ # cephadm can take up to 5 minutes to bring up remaining mons
+ mon down mkfs grace: 300
tasks:
- nvmeof:
- client: client.0
+ installer: host.a
gw_image: quay.io/ceph/nvmeof:1.2 # "default" is the image cephadm defaults to; change to test specific nvmeof images, example "latest"
rbd:
pool_name: mypool
- workunit:
no_coverage_and_limits: true
clients:
- client.3:
+ client.0:
- nvmeof/setup_subsystem.sh
- nvmeof/basic_tests.sh
env:
tasks:
- nvmeof.thrash:
- checker_host: 'client.3'
+ checker_host: 'client.0'
switch_thrashers: True
- mon_thrash:
tasks:
- nvmeof.thrash:
- checker_host: 'client.3'
+ checker_host: 'client.0'
no_coverage_and_limits: true
timeout: 30m
clients:
- client.3:
+ client.0:
- nvmeof/fio_test.sh --rbd_iostat
env:
RBD_POOL: mypool
Setup nvmeof gateway on client and then share gateway config to target host.
- nvmeof:
- client: client.0
+ installer: host.a // or 'nvmeof.nvmeof.a'
version: default
rbd:
pool_name: mypool
def setup(self):
super(Nvmeof, self).setup()
try:
- self.client = self.config['client']
+ host = self.config['installer']
except KeyError:
- raise ConfigError('nvmeof requires a client to connect with')
-
- self.cluster_name, type_, self.client_id = misc.split_role(self.client)
- if type_ != 'client':
- msg = 'client role ({0}) must be a client'.format(self.client)
- raise ConfigError(msg)
- self.remote = get_remote_for_role(self.ctx, self.client)
+ raise ConfigError('nvmeof requires a installer host to deploy service')
+ self.cluster_name, _, _ = misc.split_role(host)
+ self.remote = get_remote_for_role(self.ctx, host)
def begin(self):
super(Nvmeof, self).begin()