self.assertGreater(second["counters"]["last_synced_start"], first["counters"]["last_synced_start"])
self.assertGreater(second["counters"]["last_synced_end"], second["counters"]["last_synced_start"])
self.assertGreater(second["counters"]["last_synced_duration"], 0)
- self.assertEquals(second["counters"]["last_synced_bytes"], 10737418240) # last_synced_bytes = 10 files of 1024MB size each
+ self.assertEqual(second["counters"]["last_synced_bytes"], 10737418240) # last_synced_bytes = 10 files of 1024MB size each
# some more IO
for i in range(10):
self.assertGreater(third["counters"]["last_synced_start"], second["counters"]["last_synced_end"])
self.assertGreater(third["counters"]["last_synced_end"], third["counters"]["last_synced_start"])
self.assertGreater(third["counters"]["last_synced_duration"], 0)
- self.assertEquals(third["counters"]["last_synced_bytes"], 10737418240) # last_synced_bytes = 10 files of 1024MB size each
+ self.assertEqual(third["counters"]["last_synced_bytes"], 10737418240) # last_synced_bytes = 10 files of 1024MB size each
# delete a snapshot
self.mount_a.run_shell(["rmdir", "d0/.snap/snap0"])
# we have not added any directories
peer = status['filesystems'][0]['peers'][0]
- self.assertEquals(status['filesystems'][0]['directory_count'], 0)
- self.assertEquals(peer['stats']['failure_count'], 0)
- self.assertEquals(peer['stats']['recovery_count'], 0)
+ self.assertEqual(status['filesystems'][0]['directory_count'], 0)
+ self.assertEqual(peer['stats']['failure_count'], 0)
+ self.assertEqual(peer['stats']['recovery_count'], 0)
# add a non-existent directory for synchronization -- check if its reported
# in daemon stats
status = self.get_mirror_daemon_status()
# we added one
peer = status['filesystems'][0]['peers'][0]
- self.assertEquals(status['filesystems'][0]['directory_count'], 1)
+ self.assertEqual(status['filesystems'][0]['directory_count'], 1)
# failure count should be reflected
- self.assertEquals(peer['stats']['failure_count'], 1)
- self.assertEquals(peer['stats']['recovery_count'], 0)
+ self.assertEqual(peer['stats']['failure_count'], 1)
+ self.assertEqual(peer['stats']['recovery_count'], 0)
# create the directory, mirror daemon would recover
self.mount_a.run_shell(["mkdir", "d0"])
time.sleep(120)
status = self.get_mirror_daemon_status()
peer = status['filesystems'][0]['peers'][0]
- self.assertEquals(status['filesystems'][0]['directory_count'], 1)
+ self.assertEqual(status['filesystems'][0]['directory_count'], 1)
# failure and recovery count should be reflected
- self.assertEquals(peer['stats']['failure_count'], 1)
- self.assertEquals(peer['stats']['recovery_count'], 1)
+ self.assertEqual(peer['stats']['failure_count'], 1)
+ self.assertEqual(peer['stats']['recovery_count'], 1)
self.disable_mirroring(self.primary_fs_name, self.primary_fs_id)
"""Test mirror daemon init failure"""
# disable mgr mirroring plugin as it would try to load dir map on
- # on mirroring enabled for a filesystem (an throw up erorrs in
+ # on mirroring enabled for a filesystem (an throw up errors in
# the logs)
self.disable_mirroring_module()
"""Test if the mirror daemon can recover from a init failure"""
# disable mgr mirroring plugin as it would try to load dir map on
- # on mirroring enabled for a filesystem (an throw up erorrs in
+ # on mirroring enabled for a filesystem (an throw up errors in
# the logs)
self.disable_mirroring_module()
for fname in fnames:
t = self.mount_b.run_shell_payload(f"stat -c %F {dirname}/.snap/{snap_name}/{fname}").stdout.getvalue().strip()
if typs[tidx] == 'reg':
- self.assertEquals('regular file', t)
+ self.assertEqual('regular file', t)
elif typs[tidx] == 'dir':
- self.assertEquals('directory', t)
+ self.assertEqual('directory', t)
elif typs[tidx] == 'sym':
- self.assertEquals('symbolic link', t)
+ self.assertEqual('symbolic link', t)
tidx += 1
self.enable_mirroring(self.primary_fs_name, self.primary_fs_id)
"""Test snapshot synchronization in midst of snapshot deletes.
Deleted the previous snapshot when the mirror daemon is figuring out
- incremental differences between current and previous snaphot. The
+ incremental differences between current and previous snapshot. The
mirror daemon should identify the purge and switch to using remote
comparison to sync the snapshot (in the next iteration of course).
"""
def test_init_cache(self):
get_ttl = "config get mgr mgr_ttl_cache_expire_seconds"
res = self.cluster_cmd(get_ttl)
- self.assertEquals(int(res), 10)
+ self.assertEqual(int(res), 10)
def test_health_not_cached(self):
get_health = "mgr api get health"
self.cluster_cmd(get_health)
h, m = self.get_hit_miss_ratio()
- self.assertEquals(h, h_start)
- self.assertEquals(m, m_start)
+ self.assertEqual(h, h_start)
+ self.assertEqual(m, m_start)
def test_osdmap(self):
get_osdmap = "mgr api get osd_map"
# Miss, add osd_map to cache
self.wait_until_true(wait_miss, self.ttl + 5)
h, m = self.get_hit_miss_ratio()
- self.assertEquals(h, hit_start)
- self.assertEquals(m, miss_start+1)
+ self.assertEqual(h, hit_start)
+ self.assertEqual(m, miss_start+1)
# Hit, get osd_map from cache
self.cluster_cmd(get_osdmap)
h, m = self.get_hit_miss_ratio()
- self.assertEquals(h, hit_start+1)
- self.assertEquals(m, miss_start+1)
+ self.assertEqual(h, hit_start+1)
+ self.assertEqual(m, miss_start+1)