tasks:
- ceph:
- ceph-fuse: [client.0, client.1]
+ - ssh_keys:
- mpi:
nodes: [client.0, client.1]
exec: ior ...
tasks:
- ceph:
- ceph-fuse:
+ - ssh_keys:
- mpi:
exec: ior ...
tasks:
- ceph:
+ - ssh_keys:
- mpi:
nodes: all
exec: ...
+ Example that specifies a working directory for MPI processes:
+
+ tasks:
+ - ceph:
+ - ceph-fuse:
+ - pexec:
+ clients:
+ - ln -s /tmp/cephtest/mnt.* /tmp/cephtest/gmnt
+ - ssh_keys:
+ - mpi:
+ exec: fsx-mpi
+ workdir: /tmp/cephtest/gmnt
+ - pexec:
+ clients:
+ - rm -f /tmp/cephtest/gmnt
+
"""
assert isinstance(config, dict), 'task mpi got invalid config'
assert 'exec' in config, 'task mpi got invalid config, missing exec'
hosts.append(ip)
remotes.append(remote)
+ workdir = []
+ if 'workdir' in config:
+ workdir = ['-wdir', config['workdir'] ]
+
log.info('mpi rank 0 is: {name}'.format(name=master_remote.name))
# write out the mpi hosts file
teuthology.write_file(remote=master_remote, path='/tmp/cephtest/mpi-hosts', data='\n'.join(hosts))
log.info('mpiexec on {name}: {cmd}'.format(name=master_remote.name, cmd=mpiexec))
args=['mpiexec', '-f', '/tmp/cephtest/mpi-hosts']
+ args.extend(workdir)
args.extend(mpiexec.split(' '))
master_remote.run(args=args, )
log.info('mpi task completed')