]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/vol: better to call base class __init__() at beginning
authorRishabh Dave <ridave@redhat.com>
Tue, 3 Sep 2024 14:34:44 +0000 (20:04 +0530)
committerRishabh Dave <ridave@redhat.com>
Fri, 4 Oct 2024 14:55:33 +0000 (20:25 +0530)
It is a good practice and a common convention to call base class's
__init__() method at the beginning of derived class's __init__(). It
ensures proper initialization of base class's attributes and methods.

Signed-off-by: Rishabh Dave <ridave@redhat.com>
src/pybind/mgr/volumes/fs/async_cloner.py
src/pybind/mgr/volumes/fs/async_job.py
src/pybind/mgr/volumes/fs/purge_queue.py

index 463c10005968aa599d5d8c76ff53c6ca39f6a19b..1525f57c3f81791d70b8c8286732ce42e3eb0191 100644 (file)
@@ -313,6 +313,8 @@ class Cloner(AsyncJobs):
     the driver. file types supported are directories, symbolic links and regular files.
     """
     def __init__(self, volume_client, tp_size, snapshot_clone_delay, clone_no_wait):
+        super(Cloner, self).__init__(volume_client, "cloner", tp_size)
+
         self.vc = volume_client
         self.snapshot_clone_delay = snapshot_clone_delay
         self.snapshot_clone_no_wait = clone_no_wait
@@ -323,7 +325,6 @@ class Cloner(AsyncJobs):
             SubvolumeStates.STATE_FAILED       : handle_clone_failed,
             SubvolumeStates.STATE_CANCELED     : handle_clone_failed,
         }
-        super(Cloner, self).__init__(volume_client, "cloner", tp_size)
 
     def reconfigure_max_concurrent_clones(self, tp_size):
         return super(Cloner, self).reconfigure_max_async_threads(tp_size)
index 6834e3e240b335a5d8c73b69fd28d292e51d8ebb..d8c0d0a3fbc7dd7806347f45aa687add5a398464 100644 (file)
@@ -19,11 +19,12 @@ class JobThread(threading.Thread):
     MAX_RETRIES_ON_EXCEPTION = 10
 
     def __init__(self, async_job, volume_client, name):
+        threading.Thread.__init__(self, name=name)
+
         self.vc = volume_client
         self.async_job = async_job
         # event object to cancel jobs
         self.cancel_event = threading.Event()
-        threading.Thread.__init__(self, name=name)
 
     def run(self):
         retries = 0
@@ -117,6 +118,7 @@ class AsyncJobs(threading.Thread):
 
     def __init__(self, volume_client, name_pfx, nr_concurrent_jobs):
         threading.Thread.__init__(self, name="{0}.tick".format(name_pfx))
+
         self.vc = volume_client
         # queue of volumes for starting async jobs
         self.q = deque()  # type: deque
index abace19d029ca2751cc94858adf5e22addd83bf6..8917b475ac62556e6d0f816794c2ede42c3d22ef 100644 (file)
@@ -103,9 +103,10 @@ class ThreadPoolPurgeQueueMixin(AsyncJobs):
     _all_ threads purging entries for one volume (starving other volumes).
     """
     def __init__(self, volume_client, tp_size):
-        self.vc = volume_client
         super(ThreadPoolPurgeQueueMixin, self).__init__(volume_client, "purgejob", tp_size)
 
+        self.vc = volume_client
+
     def get_next_job(self, volname, running_jobs):
         return get_trash_entry_for_volume(self.fs_client, self.vc.volspec, volname, running_jobs)