]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
tests: ceph-disk: Make unit test coverage all ceph-disk destroy/deactivate feature
authorVicente Cheng <freeze.bilsted@gmail.com>
Fri, 25 Sep 2015 09:44:19 +0000 (17:44 +0800)
committerVicente Cheng <freeze.bilsted@gmail.com>
Tue, 17 Nov 2015 01:24:43 +0000 (09:24 +0800)
  - Add some unit test to coverage all destroy/deactivate feature.
  - Do some minor modifications on the ceph-diskw

Signed-off-by: Vicente Cheng <freeze.bilsted@gmail.com>
src/ceph-disk
src/test/python/ceph-disk/tests/test_ceph_disk.py

index 56f937ceb52bd7e48c619376313c830254ac13c4..771906313daad1bef3ed55286b36b3a57223096f 100755 (executable)
@@ -823,14 +823,16 @@ def convert_osd_id(cluster, osd_id):
 
     # mount_info's first fields means `device`, Second means `mount point`
     mount_info = []
-    with file('/proc/mounts', 'rb') as proc_mounts:
-        for line in proc_mounts:
-            if mountsp_name in line:
-                fields = line.split()
-                mount_info.append(fields[0])
-                mount_info.append(fields[1])
-            else:
-                continue
+    try:
+        proc_mounts = open('/proc/mounts', 'rb')
+        if proc_mounts:
+            for line in proc_mounts:
+                if mountsp_name in line:
+                    fields = line.split()
+                    mount_info.append(fields[0])
+                    mount_info.append(fields[1])
+    except:
+        raise Error('Open/Read file Error.')
     if not mount_info:
         raise Error('Can not find mount point by osd-id')
     return mount_info
@@ -2207,9 +2209,9 @@ def stop_daemon(
                 )
         else:
             raise Error('{cluster} osd.{osd_id} is not tagged with an init '\
-                        ' system'.format(cluster=cluster,osd_id=osd_id,))
-    except:
-        raise Error('ceph osd stop failed')
+                        ' system'.format(cluster=cluster,osd_id=osd_id))
+    except subprocess.CalledProcessError as e:
+        raise Error('ceph osd stop failed', e)
 
 
 def detect_fstype(
@@ -2649,7 +2651,7 @@ def _check_osd_status(cluster, osd_id):
         raise Error(e)
     out_json = json.loads(out)
     for item in out_json[u'osds']:
-        if item.get(u'osd') is int(osd_id):
+        if item.get(u'osd') == int(osd_id):
             found = True
             if item.get(u'in') is 1:
                 status_code += 2
@@ -2701,9 +2703,6 @@ def _remove_osd_directory_files(mounted_path, cluster):
 
 def main_deactivate(args):
     mount_info = []
-    if args.cluster is None:
-        args.cluster = 'ceph'
-
     if args.deactivate_by_id:
         osd_id = args.deactivate_by_id
     else:
@@ -2799,9 +2798,6 @@ def _deallocate_osd_id(cluster, osd_id):
 
 def main_destroy(args):
     mount_info = []
-    if args.cluster is None:
-        args.cluster = 'ceph'
-
     if args.destroy_by_id:
         osd_id = args.destroy_by_id
     else:
@@ -2818,8 +2814,8 @@ def main_destroy(args):
                             osd_id = get_oneliner(tpath, 'whoami')
                         finally:
                             unmount(tpath)
-           except MountError:
-               pass
+           except:
+               raise MountError
            mount_info.append(args.path)
 
 
@@ -2877,8 +2873,8 @@ def main_destroy(args):
                     if whoami is osd_id:
                         found = True
                         (base_dev, part_num) = split_dev_base_partnum(item)
-                except MountError:
-                     pass
+                except:
+                    raise MountError
             if not found:
                 raise Error('Could not find the partition of osd.%s!' % osd_id)
 
index 707ac75d219734245fc31ca7ab4fc2a9fdeb791b..298e659b8af0edb6772e5d0d6050249c6c3a4e7a 100644 (file)
@@ -5,6 +5,7 @@ import unittest
 import argparse
 import pytest
 import ceph_disk
+import StringIO
 
 def fail_to_mount(dev, fstype, options):
     raise ceph_disk.MountError(dev + " mount fail")
@@ -684,9 +685,8 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
             self.assertRaises(IOError, ceph_disk.main_deactivate, args)
             # clear the created file by unit test
 
-    def test_main_deactivate_non_exists_by_dev(self):
+    def test_main_deactivate_non_exists_non_cluster_by_dev(self):
         args = ceph_disk.parse_args(['deactivate', \
-                                     '--cluster', 'ceph', \
                                      '/dev/Xda1'])
         self.assertRaises(Exception, ceph_disk.main_deactivate, args)
 
@@ -707,6 +707,7 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
     def test_main_deactivate_osd_in_and_down_by_dev(self):
         args = ceph_disk.parse_args(['deactivate', \
                                      '--cluster', 'ceph', \
+                                     '--mark-out', \
                                      '/dev/Xda1'])
         patcher = patch('os.path.exists')
         patch_path = patcher.start()
@@ -718,6 +719,7 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 is_mounted=lambda dev: mount_path,
                 get_oneliner=lambda mount_path, filen: 5566,
                 _check_osd_status=lambda ceph, status: 2,
+                _mark_osd_out=lambda ceph, osd_id: True
                 ):
             ceph_disk.main_deactivate(args)
 
@@ -764,6 +766,7 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
     def test_main_deactivate_osd_in_and_up_by_dev(self):
         args = ceph_disk.parse_args(['deactivate', \
                                      '--cluster', 'ceph', \
+                                     '--mark-out', \
                                      '/dev/Xda1'])
         patcher = patch('os.path.exists')
         patch_path = patcher.start()
@@ -776,6 +779,7 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 is_mounted=lambda dev: mount_path,
                 get_oneliner=lambda mount_path, filen: 5566,
                 _check_osd_status=lambda ceph, osd_id: 3,
+                _mark_osd_out=lambda ceph, osd_id: True,
                 stop_daemon=lambda ceph, osd_id: True,
                 _remove_osd_directory_files=lambda mount_path, ceph: True,
                 unmount=lambda path: True,
@@ -799,21 +803,180 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 ):
             self.assertRaises(Exception, ceph_disk._mark_osd_out, 'ceph', '5566')
 
-    def test_stop_daemon_fail(self):
-        dev = {
-            'cluster': 'ceph',
-            'osd_id': '5566',
-        }
+    def test_check_osd_status_fail(self):
+        with patch.multiple(
+                ceph_disk,
+                command=raise_command_error,
+                ):
+            self.assertRaises(Exception, ceph_disk._check_osd_status, 'ceph', '5566')
+
+    def test_check_osd_status_osd_not_found(self):
+
+        fake_value = '{"osds":[{"osd":0,"up":1,"in":1},{"osd":1,"up":1,"in":1}]}'
+
+        def return_fake_value(cmd):
+            return fake_value, 0
+
+        with patch.multiple(
+                ceph_disk,
+                command=return_fake_value,
+                ):
+            #ceph_disk._check_osd_status('ceph', '5566')
+            self.assertRaises(Exception, ceph_disk._check_osd_status, 'ceph', '5566')
+
+    def test_check_osd_status_success(self):
 
-        def stop_daemon_fail():
-            raise Error('ceph osd stop failed')
+        fake_value = '{"osds":[{"osd":0,"up":1,"in":1},{"osd":5566,"up":1,"in":1}]}'
+
+        def return_fake_value(cmd):
+            return fake_value, 0
 
         with patch.multiple(
                 ceph_disk,
+                command=return_fake_value,
+                ):
+            ceph_disk._check_osd_status('ceph', '5566')
+
+    @patch('os.path.exists', return_value=False)
+    def test_stop_daemon_fail_all_init_type(self, mock_path_exists):
+        self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
+
+    @patch('os.path.exists', return_value=Exception)
+    def test_stop_daemon_fail_on_os_path_check(self, mock_path_exists):
+        self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
+
+    def test_stop_daemon_fail_upstart(self):
+        STATEDIR = '/var/lib/ceph'
+        cluster = 'ceph'
+        osd_id = '5566'
+
+        path = (STATEDIR + '/osd/{cluster}-{osd_id}/upstart').format(
+               cluster=cluster, osd_id=osd_id)
+
+        def path_file_test(check_path):
+            if check_path == path:
+                return True
+            else:
+                False
+
+        def stop_daemon_fail(cmd):
+            raise Exception('ceph osd stop failed')
+
+        patcher = patch('os.path.exists')
+        check_path = patcher.start()
+        check_path.side_effect = path_file_test
+        with patch.multiple(
+                ceph_disk,
+                check_path,
                 command_check_call=stop_daemon_fail,
                 ):
             self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
 
+    def test_stop_daemon_fail_sysvinit_usr_sbin_service(self):
+        STATEDIR = '/var/lib/ceph'
+        cluster = 'ceph'
+        osd_id = '5566'
+
+        path = (STATEDIR + '/osd/{cluster}-{osd_id}/sysvinit').format(
+        cluster=cluster, osd_id=osd_id)
+
+        def path_file_test(check_path):
+            if check_path == path:
+                return True
+            elif check_path == '/usr/sbin/service':
+                return True
+            else:
+                False
+
+        def stop_daemon_fail(cmd):
+            raise Exception('ceph osd stop failed')
+
+        patcher = patch('os.path.exists')
+        check_path = patcher.start()
+        check_path.side_effect = path_file_test
+        with patch.multiple(
+                ceph_disk,
+                check_path,
+                #join_path,
+                command_check_call=stop_daemon_fail,
+                ):
+            self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
+
+    def test_stop_daemon_fail_sysvinit_sbin_service(self):
+        STATEDIR = '/var/lib/ceph'
+        cluster = 'ceph'
+        osd_id = '5566'
+
+        path = (STATEDIR + '/osd/{cluster}-{osd_id}/sysvinit').format(
+        cluster=cluster, osd_id=osd_id)
+
+        def path_file_test(check_path):
+            if check_path == path:
+                return True
+            elif check_path == '/sbin/service':
+                return True
+            else:
+                False
+
+        def stop_daemon_fail(cmd):
+            raise Exception('ceph osd stop failed')
+
+        patcher = patch('os.path.exists')
+        check_path = patcher.start()
+        check_path.side_effect = path_file_test
+        with patch.multiple(
+                ceph_disk,
+                check_path,
+                command_check_call=stop_daemon_fail,
+                ):
+            self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
+
+    def test_stop_daemon_fail_systemd_disable_stop(self):
+        STATEDIR = '/var/lib/ceph'
+        cluster = 'ceph'
+        osd_id = '5566'
+
+        path = (STATEDIR + '/osd/{cluster}-{osd_id}/systemd').format(
+        cluster=cluster, osd_id=osd_id)
+
+        def path_file_test(check_path):
+            if check_path == path:
+                return True
+            else:
+                False
+
+        def stop_daemon_fail(cmd):
+            if 'stop' in cmd:
+                raise Exception('ceph osd stop failed')
+            else:
+                return True
+
+        patcher = patch('os.path.exists')
+        check_path = patcher.start()
+        check_path.side_effect = path_file_test
+        with patch.multiple(
+                ceph_disk,
+                check_path,
+                command_check_call=stop_daemon_fail,
+                ):
+            self.assertRaises(Exception, ceph_disk.stop_daemon, 'ceph', '5566')
+
+    def test_convert_osd_id(self):
+        file_output = StringIO.StringIO('/dev/sdX1 /var/lib/ceph/osd/ceph-1234 xfs rw,noatime 0 0\n' \
+                                        '/dev/sdX1 /var/lib/ceph/osd/ceph-5566 xfs rw,noatime 0 0\n')
+        with patch('__builtin__.open', return_value=file_output):
+            ceph_disk.convert_osd_id('ceph', '5566')
+
+    def test_convert_osd_id_not_found(self):
+        file_output = StringIO.StringIO('/dev/sdX1 /var/lib/ceph/osd/ceph-1234 xfs rw,noatime 0 0\n' \
+                                        '/dev/sdY1 /var/lib/ceph/osd/ceph-5678 xfs rw,noatime 0 0\n')
+        with patch('__builtin__.open', return_value=file_output):
+            self.assertRaises(Exception, ceph_disk.convert_osd_id, 'ceph', '5566')
+
+    def test_convert_osd_id_get_mounts_fail(self):
+        with patch('__builtin__.open', return_value=Exception):
+            self.assertRaises(Exception, ceph_disk.convert_osd_id, 'ceph', '5566')
+
     def test_convert_osd_id_fail(self):
         dev = {
             'cluster': 'ceph',
@@ -822,7 +985,8 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
         }
         self.assertRaises(Exception, ceph_disk.convert_osd_id, dev)
 
-    def test_remove_osd_directory_files(self):
+    @patch('os.remove', return_value=True)
+    def test_remove_osd_directory_files(self, mock_remove):
         cluster = 'ceph'
         init_file = 'init'
         with patch.multiple(
@@ -831,6 +995,26 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 ):
             ceph_disk._remove_osd_directory_files('/somewhere', cluster)
 
+    def test_remove_osd_directory_files_remove_OSError(self):
+        cluster = 'ceph'
+        init_file = 'init'
+        with patch.multiple(
+                ceph_disk,
+                get_conf=lambda cluster, **kwargs: None,
+                init_get=lambda : init_file
+                ):
+            ceph_disk._remove_osd_directory_files('/somewhere', cluster)
+
+    @patch('os.path.exists', return_value=False)
+    def test_remove_osd_directory_files_already_remove(self, mock_exists):
+        cluster = 'ceph'
+        init_file = 'upstart'
+        with patch.multiple(
+                ceph_disk,
+                get_conf=lambda cluster, **kwargs: init_file,
+                ):
+            ceph_disk._remove_osd_directory_files('/tmp', cluster)
+
     def test_path_set_context(self):
         path = '/somewhere'
         with patch.multiple(
@@ -909,38 +1093,48 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 ):
             ceph_disk.main_destroy(args)
 
-    def test_main_destroy_without_zap_by_id(self):
+    def test_main_destroy_with_zap_find_part_fail_by_id(self):
         args = ceph_disk.parse_args(['destroy', \
                                      '--cluster', 'ceph', \
+                                     '--zap', \
                                      '--destroy-by-id', '5566'])
         cluster = 'ceph'
         osd_id = '5566'
+        fake_part_return = {'Xda': ['Xda1'], 'Xdb': []}
+        disk = 'Xda'
+        partition = 'Xda1'
         with patch.multiple(
                 ceph_disk,
                 _check_osd_status=lambda cluster, osd_id: 0,
                 _remove_from_crush_map=lambda cluster, osd_id: True,
                 _delete_osd_auth_key=lambda cluster, osd_id: True,
                 _deallocate_osd_id=lambda cluster, osd_id: True,
+                list_all_partitions=lambda names: fake_part_return,
+                split_dev_base_partnum=lambda names: (disk, 1)
                 ):
-            ceph_disk.main_destroy(args)
+            self.assertRaises(Exception, ceph_disk.main_destroy, args)
 
-    def test_main_destroy_with_zap_find_part_fail_by_id(self):
+    def test_main_destroy_with_zap_mount_part_fail_by_id(self):
         args = ceph_disk.parse_args(['destroy', \
                                      '--cluster', 'ceph', \
                                      '--zap', \
                                      '--destroy-by-id', '5566'])
         cluster = 'ceph'
         osd_id = '5566'
-        disk = "Xda"
-        partition = "Xda1"
+        fake_part_return = {'Xda': ['Xda1'], 'Xdb': []}
+        disk = 'Xda'
+        partition = 'Xda1'
+        fstype = 'ext4'
         with patch.multiple(
                 ceph_disk,
                 _check_osd_status=lambda cluster, osd_id: 0,
                 _remove_from_crush_map=lambda cluster, osd_id: True,
                 _delete_osd_auth_key=lambda cluster, osd_id: True,
                 _deallocate_osd_id=lambda cluster, osd_id: True,
-                list_all_partitions=lambda names: { disk: [partition] },
-                split_dev_base_partnum=lambda names: (disk, 1)
+                list_all_partitions=lambda names: fake_part_return,
+                split_dev_base_partnum=lambda names: (disk, 1),
+                get_dev_fs=lambda dev:fstype,
+                mount=lambda dev, fstype, options:Exception
                 ):
             self.assertRaises(Exception, ceph_disk.main_destroy, args)
 
@@ -1105,9 +1299,9 @@ class TestCephDiskDeactivateAndDestroy(unittest.TestCase):
                 ):
             self.assertRaises(Exception, ceph_disk.main_destroy, args)
 
-    def test_main_destroy_non_exist_by_dev(self):
+    @patch('os.path.exists', return_value=False)
+    def test_main_destroy_non_exist_non_cluster_by_dev(self, mock_exists):
         args = ceph_disk.parse_args(['destroy', \
-                                     '--cluster', 'ceph', \
                                      '/dev/Xda1'])
         self.assertRaises(Exception, ceph_disk.main_destroy, args)