]> git-server-git.apps.pok.os.sepia.ceph.com Git - radosgw-agent.git/commitdiff
sync: remove extraneous error checking
authorJosh Durgin <josh.durgin@inktank.com>
Tue, 10 Sep 2013 00:12:24 +0000 (17:12 -0700)
committerJosh Durgin <josh.durgin@inktank.com>
Tue, 10 Sep 2013 00:12:34 +0000 (17:12 -0700)
These loops all end once a result of any sort is received for a child
process. It will already complete if every worked has a connection
error.

Signed-off-by: Josh Durgin <josh.durgin@inktank.com>
radosgw_agent/sync.py

index 025c32fcad9e33f6fd02e78cbb08ae1c6016a214..e7940734026c3dac15a2d744ba283d59826a9141 100644 (file)
@@ -64,21 +64,13 @@ class Syncer:
 
         # pull the results out as they are produced
         errors = []
-        connection_errors = []
         for i in xrange(num_shards):
-            # if all processes error out, stop trying to process data
-            if len(connection_errors) == len(processes):
-                log.error('All {num_workers} incremental sync workers have failed.'
-                          ' Ceasing to process shards'.format(num_workers=len(processes)))
-                break
             result, shard_num = resultQueue.get()
             if result == worker.RESULT_SUCCESS:
                 log.debug('synced shard %d', shard_num)
             else:
                 log.error('error on incremental sync of shard %d', shard_num)
                 errors.append(shard_num)
-            if result == worker.RESULT_CONNECTION_ERROR:
-                connection_errors.append(shard_num)
 
             log.info('%d/%d shards processed', i + 1, num_shards)
         if errors:
@@ -157,12 +149,7 @@ class Syncer:
 
         # pull the results out as they are produced
         errors = []
-        connection_errors = []
         for i in xrange(num_data_shards):
-            if len(connection_errors) == len(processes):
-                log.error('All {num_workers} full sync workers have failed.'
-                          ' Ceasing to process shards'.format(num_workers=len(processes)))
-                break
             log.info('%d/%d shards synced, ', i, num_data_shards)
             result, shard_num = resultQueue.get()
             if result != worker.RESULT_SUCCESS:
@@ -170,8 +157,6 @@ class Syncer:
                 errors.append((shard_num))
             else:
                 log.debug('synced shard %s', shard_num)
-            if result == worker.RESULT_CONNECTION_ERROR:
-                connection_errors.append(shard_num)
 
         for process in processes:
             process.join()
@@ -237,14 +222,7 @@ class Syncer:
 
         # pull the results out as they are produced
         errors = []
-        connection_errors = []
         for i in xrange(len(meta_keys)):
-            # if all processes error out, stop trying to process data
-            if len(connection_errors) == len(processes):
-                log.error('All {num_workers} full sync workers have failed.'
-                          ' Ceasing to process shards'.format(num_workers=len(processes)))
-                break
-
             log.info('%d/%d items synced', i, len(meta_keys))
             result, section, name = resultQueue.get()
             if result != worker.RESULT_SUCCESS:
@@ -252,8 +230,6 @@ class Syncer:
                 errors.append((section, name))
             else:
                 log.debug('synced %s %r', section, name)
-            if result == worker.RESULT_CONNECTION_ERROR:
-                connection_errors.append(shard_num)
         for process in processes:
             process.join()
         if errors: