args = parser.parse_args()
return args
-def main():
- from gevent import monkey; monkey.patch_all()
- from orchestra import monkey; monkey.patch_all()
-
- import logging
- import time
-
- log = logging.getLogger(__name__)
-
- ctx = parse_args()
-
- loglevel = logging.INFO
- if ctx.verbose:
- loglevel = logging.DEBUG
-
- logging.basicConfig(
- level=loglevel,
- )
-
- from teuthology.misc import read_config
- read_config(ctx)
-
- log.info('\n '.join(['targets:', ] + yaml.safe_dump(ctx.config['targets'], default_flow_style=False).splitlines()))
-
- if ctx.owner is None:
- from teuthology.misc import get_user
- ctx.owner = get_user()
-
- from teuthology.task.internal import check_lock, connect
- check_lock(ctx, None)
- connect(ctx, None)
-
- log.info('Unmount cfuse and killing daemons...')
+def shutdown_daemons(ctx, log):
from orchestra import run
nodes = {}
for remote in ctx.cluster.remotes.iterkeys():
for name, proc in nodes.iteritems():
log.info('Waiting for %s to finish shutdowns...', name)
proc.exitstatus.get()
- log.info('All daemons killed.')
+def find_kernel_mounts(ctx, log):
+ from orchestra import run
nodes = {}
log.info('Looking for kernel mounts to handle...')
for remote in ctx.cluster.remotes.iterkeys():
kernel_mounts.append(remote)
except run.CommandFailedError: # no mounts!
log.debug('no kernel mount on %s', remote.name)
+
+ return kernel_mounts
+
+def remove_kernel_mounts(ctx, kernel_mounts, log):
"""
properly we should be able to just do a forced unmount,
- but that doesn't seem to be working, so we'll reboot instead
+ but that doesn't seem to be working, so you should reboot instead
+ """
+ from orchestra import run
nodes = {}
for remote in kernel_mounts:
log.info('clearing kernel mount from %s', remote.name)
'grep', 'ceph', '/etc/mtab', run.Raw('|'),
'grep', '-o', "on /.* type", run.Raw('|'),
'grep', '-o', "/.* ", run.Raw('|'),
- 'xargs', 'sudo', 'umount', '-f', run.Raw(';')
+ 'xargs', 'sudo', 'umount', '-f', run.Raw(';'),
'fi'
- ]
+ ],
wait=False
)
nodes[remote] = proc
- """
+
+ for remote, proc in nodes:
+ proc.exitstatus.get()
+
+def reboot_kernel_mounts(ctx, kernel_mounts, log):
+ from orchestra import run
+ import time
nodes = {}
-
for remote in kernel_mounts:
log.info('rebooting %s', remote.name)
proc = remote.run( # note use of -n to force a no-sync reboot
time.sleep(5) #if we try and reconnect too quickly, it succeeds!
reconnect(ctx, 300) #allow 5 minutes for the reboots
-
+def remove_testing_tree(ctx, log):
+ from orchestra import run
nodes = {}
- log.info('Clearing filesystem of test data...')
for remote in ctx.cluster.remotes.iterkeys():
proc = remote.run(
args=[
for name, proc in nodes.iteritems():
log.info('Waiting for %s to clear filesystem...', name)
proc.exitstatus.get()
+
+def main():
+ from gevent import monkey; monkey.patch_all()
+ from orchestra import monkey; monkey.patch_all()
+
+ import logging
+
+ log = logging.getLogger(__name__)
+
+ ctx = parse_args()
+
+ loglevel = logging.INFO
+ if ctx.verbose:
+ loglevel = logging.DEBUG
+
+ logging.basicConfig(
+ level=loglevel,
+ )
+
+ from teuthology.misc import read_config
+ read_config(ctx)
+
+ log.info('\n '.join(['targets:', ] + yaml.safe_dump(ctx.config['targets'], default_flow_style=False).splitlines()))
+
+ if ctx.owner is None:
+ from teuthology.misc import get_user
+ ctx.owner = get_user()
+
+ from teuthology.task.internal import check_lock, connect
+ check_lock(ctx, None)
+ connect(ctx, None)
+
+ log.info('Unmount cfuse and killing daemons...')
+ shutdown_daemons(ctx, log)
+ log.info('All daemons killed.')
+
+ log.info('Dealing with any kernel mounts...')
+ kernel_mounts = find_kernel_mounts(ctx, log)
+ #remove_kernel_mounts(ctx, kernel_mounts, log)
+ reboot_kernel_mounts(ctx, kernel_mounts, log)
+ log.info('All kernel mounts gone.')
+
+ log.info('Clearing filesystem of test data...')
+ remove_testing_tree(ctx, log)
log.info('Filesystem Cleared.')