Otherwise, the umount process will fail because the mount still exists
when the mountpoint cleanup (rmdir) is started.
See:
2020-10-04T22:08:24.448 INFO:teuthology.nuke.actions:Clearing teuthology firewall rules...
2020-10-04T22:08:24.449 INFO:teuthology.orchestra.run.smithi063:> sudo sh -c 'iptables-save | grep -v teuthology | iptables-restore'
2020-10-04T22:08:24.464 INFO:teuthology.orchestra.run.smithi189:> sudo sh -c 'iptables-save | grep -v teuthology | iptables-restore'
2020-10-04T22:08:24.482 INFO:teuthology.nuke.actions:Cleared teuthology firewall rules.
2020-10-04T22:08:24.483 INFO:teuthology.orchestra.run:Running command with timeout 900
2020-10-04T22:08:24.483 INFO:teuthology.orchestra.run.smithi063:> (cd /home/ubuntu/cephtest && exec stat --file-system '--printf=%T
2020-10-04T22:08:24.483 INFO:teuthology.orchestra.run.smithi063:> ' -- /home/ubuntu/cephtest/mnt.0)
2020-10-04T22:08:34.550 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:08:34.553 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:09:04.592 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:09:04.596 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:09:34.727 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:09:34.730 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:10:04.815 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:10:04.818 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:10:34.876 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:10:34.880 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:11:04.923 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:11:04.926 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:11:34.996 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:11:35.000 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:12:05.064 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:12:05.067 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:12:35.202 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:12:35.205 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:13:05.316 INFO:teuthology.orchestra.run.smithi063:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:13:05.318 INFO:teuthology.orchestra.run.smithi189:> sudo logrotate /etc/logrotate.d/ceph-test.conf
2020-10-04T22:13:24.520 INFO:teuthology.orchestra.run.smithi063.stderr:stat: cannot read file system information for '/home/ubuntu/cephtest/mnt.0': Connection timed out
2020-10-04T22:13:24.521 DEBUG:teuthology.orchestra.run:got remote process result: 1
2020-10-04T22:13:24.522 INFO:tasks.cephfs.fuse_mount:mount point does not exist: /home/ubuntu/cephtest/mnt.0
2020-10-04T22:13:24.640 INFO:teuthology.orchestra.run:Running command with timeout 300
2020-10-04T22:13:24.641 INFO:teuthology.orchestra.run.smithi063:> (cd /home/ubuntu/cephtest && exec rm -rf /home/ubuntu/cephtest/mnt.0)
2020-10-04T22:13:24.688 INFO:teuthology.orchestra.run.smithi063.stderr:rm: cannot remove '/home/ubuntu/cephtest/mnt.0': Is a directory
2020-10-04T22:13:24.688 DEBUG:teuthology.orchestra.run:got remote process result: 1
From: /ceph/teuthology-archive/pdonnell-2020-10-04_21:51:57-fs-wip-pdonnell-testing-
20201004.051319-octopus-distro-basic-smithi/
5494771/teuthology.log
Signed-off-by: Patrick Donnelly <pdonnell@redhat.com>
proc.wait()
except CommandFailedError:
error = six.ensure_str(proc.stderr.getvalue())
- if ("endpoint is not connected" in error
- or "Software caused connection abort" in error):
- # This happens is fuse is killed without unmount
- log.warning("Found stale moutn point at {0}".format(self.mountpoint))
- return True
- else:
- # This happens if the mount directory doesn't exist
- log.info('mount point does not exist: %s', self.mountpoint)
- return False
+ stale = [
+ 'Connection timed out',
+ 'Endpoint is not connected',
+ 'Software caused connection abort',
+ ]
+ for s in stale:
+ if s.lower() in error.lower():
+ # This happens is fuse is killed without unmount
+ log.warning("Found stale moutn point at {0}".format(self.mountpoint))
+ return True
+
+ # This happens if the mount directory doesn't exist
+ log.info('mount point does not exist: %s', self.mountpoint)
+ return False
fstype = six.ensure_str(proc.stdout.getvalue()).rstrip('\n')
if fstype == 'fuseblk':