valgrind_exception = None
for (proc, remote) in lookup_procs:
- proc.exitstatus.get()
+ proc.wait()
out = proc.stdout.getvalue()
for line in out.split('\n'):
if line == '':
log.debug('ceph-fuse not yet mounted, got fs type {fstype!r}'.format(fstype=fstype))
# it shouldn't have exited yet; exposes some trivial problems
- assert not fuse.exitstatus.ready()
+ assert not fuse.poll()
time.sleep(5)
log.info('ceph-fuse is mounted on %s', mountpoint)
],
wait=False,
check_status=False)
- result = proc.exitstatus.get();
+ result = proc.wait();
if result != 0:
remote.run(
wait=False
)
- hostresult = hostproc.exitstatus.get()
- clientresult = clientproc.exitstatus.get()
+ hostresult = hostproc.wait()
+ clientresult = clientproc.wait()
if (hostresult != 0) or (clientresult != 0):
raise Exception("Did not pass locking test!")
log.info('finished locktest executable with results {r} and {s}'. \
# write some data
p = rados_start(ctx, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096',
'--no-cleanup'])
- err = p.exitstatus.get();
+ err = p.wait();
log.info('err is %d' % err)
# mark osd.0 out to trigger a rebalance/backfill
manager.revive_osd(1)
# wait for our writes to complete + succeed
- err = p.exitstatus.get()
+ err = p.wait()
log.info('err is %d' % err)
# cluster must recover
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.exitstatus.get()
+ proc.wait()
lines = proc.stdout.getvalue().split('\n')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.exitstatus.get()
+ proc.wait()
lines = proc.stdout.getvalue().split('\n')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.exitstatus.get()
+ proc.wait()
lines = proc.stdout.getvalue().split('\n')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.exitstatus.get()
+ proc.wait()
lines = proc.stdout.getvalue().split('\n')
time.sleep(sleep_time)
proc.stdin.close() # causes daemon-helper send SIGKILL to ceph -w
- proc.exitstatus.get()
+ proc.wait()
lines = proc.stdout.getvalue().split('\n')
manager.revive_osd(1)
# wait for our writes to complete + succeed
- err = p.exitstatus.get()
+ err = p.wait()
log.info('err is %d' % err)
# cluster must repeer
# write some more (make sure osd.2 really is divergent)
p = rados_start(testdir, mon, ['-p', 'rbd', 'bench', '15', 'write', '-b', '4096'])
- p.exitstatus.get();
+ p.wait();
# revive divergent osd
manager.revive_osd(2)
p = rados_start(testdir, mon,
['-p', 'rbd', 'bench', '60', 'write', '-b', '1',
'--no-cleanup'])
- p.exitstatus.get()
+ p.wait()
# few objects in metadata pool (with pg log, normal recovery)
for f in range(1, 20):
p = rados_start(testdir, mon, ['-p', 'metadata', 'put',
'foo.%d' % f, '/etc/passwd'])
- p.exitstatus.get()
+ p.wait()
# move it back
manager.raw_cluster_cmd('osd', 'in', '0', '1')
for client, proc in procs:
log.info("shutting down sync agent on %s", client)
proc.stdin.close()
- proc.exitstatus.get()
+ proc.wait()
finally:
for client, proc in procs:
ctx.cluster.only(client).run(
proc.stdin.writelines(['restarted\n'])
proc.stdin.flush()
try:
- proc.exitstatus.get()
+ proc.wait()
except tor.CommandFailedError:
raise Exception('restart task got non-zero exit status from script: {s}'.format(s=c))
finally: