]> git-server-git.apps.pok.os.sepia.ceph.com Git - radosgw-agent.git/commitdiff
Rename variable to match option parsing variable name (again) 5/head
authorChristophe Courtaut <christophe.courtaut@gmail.com>
Tue, 15 Oct 2013 17:53:03 +0000 (19:53 +0200)
committerChristophe Courtaut <christophe.courtaut@gmail.com>
Tue, 15 Oct 2013 17:53:51 +0000 (19:53 +0200)
The object_timeout variable was renamed object_sync_timeout
in the option parser, but not elsewhere.

This patch fixes it.

Signed-off-by: Christophe Courtaut <christophe.courtaut@gmail.com>
radosgw_agent/cli.py
radosgw_agent/sync.py
radosgw_agent/worker.py

index 18bb7a3f8bb09c3971b954a4a3e57d450818d24d..5e7fea97a87fc317f284cf919ad42ccf4520c96b 100644 (file)
@@ -201,7 +201,7 @@ class TestHandler(BaseHTTPRequestHandler):
                 syncer = sync_cls(TestHandler.src, TestHandler.dest,
                                   TestHandler.max_entries,
                                   rgw_data_log_window=TestHandler.rgw_data_log_window,
-                                  object_timeout=TestHandler.object_timeout)
+                                  object_sync_timeout=TestHandler.object_sync_timeout)
                 syncer.prepare()
                 syncer.sync(
                     TestHandler.num_workers,
@@ -290,7 +290,7 @@ def main():
     meta_syncer = meta_cls(src, dest, args.max_entries)
     data_syncer = data_cls(src, dest, args.max_entries,
                            rgw_data_log_window=args.rgw_data_log_window,
-                           object_timeout=args.object_sync_timeout)
+                           object_sync_timeout=args.object_sync_timeout)
 
     # fetch logs first since data logs need to wait before becoming usable
     # due to rgw's window of data log updates during which the bucket index
index 6e3cfdb8158d4d90c889a1af5002a8a23bcaae7a..4ab5c8a0ed8a709dcbb13ad8a7fda3ae74680679 100644 (file)
@@ -23,7 +23,7 @@ class Syncer(object):
         self.worker_cls = None # filled in by subclass constructor
         self.num_shards = None
         self.max_entries = max_entries
-        self.object_timeout = kwargs.get('object_timeout')
+        self.object_sync_timeout = kwargs.get('object_sync_timeout')
 
     def init_num_shards(self):
         if self.num_shards is not None:
@@ -86,7 +86,7 @@ class Syncer(object):
                                      self.dest,
                                      daemon_id=self.daemon_id,
                                      max_entries=max_entries,
-                                     object_timeout=self.object_timeout,
+                                     object_sync_timeout=self.object_sync_timeout,
                                      )
                      for i in xrange(num_workers)]
         for process in processes:
index ab0de6a0c38bab0053ff4b6055de46da88e23c25..538fee920fe8fe2b9efba3e918c572247b1bc76d 100644 (file)
@@ -166,14 +166,14 @@ class DataWorker(Worker):
         super(DataWorker, self).__init__(*args, **kwargs)
         self.type = 'data'
         self.op_id = 0
-        self.object_timeout = kwargs.get('object_timeout', 60 * 60 * 60)
+        self.object_sync_timeout = kwargs.get('object_sync_timeout', 60 * 60 * 60)
         self.daemon_id = kwargs['daemon_id']
 
     def sync_object(self, bucket, obj):
         self.op_id += 1
         local_op_id = self.local_lock_id + ':' +  str(self.op_id)
         try:
-            until = time.time() + self.object_timeout
+            until = time.time() + self.object_sync_timeout
             client.sync_object_intra_region(self.dest_conn, bucket, obj,
                                             self.src.zone.name,
                                             self.daemon_id,