From a8834b7a53d5fc10127d7a61f406977ebbab5916 Mon Sep 17 00:00:00 2001 From: Josh Durgin Date: Fri, 1 Nov 2013 16:04:35 -0700 Subject: [PATCH] Include exception information in warnings This will provide more detail when an error occurs without requiring logging at the debug level. Signed-off-by: Josh Durgin --- radosgw_agent/cli.py | 4 ++-- radosgw_agent/sync.py | 19 +++++++++++-------- radosgw_agent/worker.py | 27 ++++++++++++++------------- 3 files changed, 27 insertions(+), 23 deletions(-) diff --git a/radosgw_agent/cli.py b/radosgw_agent/cli.py index 4ed9d16..48ae35a 100644 --- a/radosgw_agent/cli.py +++ b/radosgw_agent/cli.py @@ -260,8 +260,8 @@ def main(): try: region_map = client.get_region_map(dest_conn) - except Exception as e: - log.error('Could not retrieve region map from destination: %s', e) + except Exception: + log.exception('Could not retrieve region map from destination') sys.exit(1) try: diff --git a/radosgw_agent/sync.py b/radosgw_agent/sync.py index 9206e0e..8be4132 100644 --- a/radosgw_agent/sync.py +++ b/radosgw_agent/sync.py @@ -25,8 +25,9 @@ def prepare_sync(syncer, error_delay): try: syncer.prepare() break - except Exception as e: - log.warn('error preparing for sync, will retry: %s', e) + except Exception: + log.warn('error preparing for sync, will retry. Traceback:', + exc_info=True) time.sleep(error_delay) def incremental_sync(meta_syncer, data_syncer, num_workers, lock_timeout, @@ -41,8 +42,9 @@ def incremental_sync(meta_syncer, data_syncer, num_workers, lock_timeout, meta_syncer.sync(num_workers, lock_timeout) if not metadata_only: data_syncer.sync(num_workers, lock_timeout) - except Exception as e: - log.warn('error doing incremental sync, will try again: %s', e) + except Exception: + log.warn('error doing incremental sync, will try again. Traceback:', + exc_info=True) # prepare data before sleeping due to rgw_log_bucket_window if not metadata_only: @@ -70,8 +72,8 @@ class Syncer(object): try: self.num_shards = client.num_log_shards(self.src_conn, self.type) log.debug('%d shards to check', self.num_shards) - except Exception as e: - log.error('finding number of shards failed: %s', e) + except Exception: + log.error('finding number of shards failed') raise def shard_num_for_key(self, key): @@ -111,8 +113,9 @@ class Syncer(object): self.daemon_id, shard_num, data) - except Exception as e: - log.warn('could not set worker bounds, may repeat some work: %s', e) + except Exception: + log.warn('could not set worker bounds, may repeat some work.' + 'Traceback:', exc_info=True) def sync(self, num_workers, log_lock_time, max_entries=None): workQueue = multiprocessing.Queue() diff --git a/radosgw_agent/worker.py b/radosgw_agent/worker.py index 661f912..7e11cb7 100644 --- a/radosgw_agent/worker.py +++ b/radosgw_agent/worker.py @@ -61,10 +61,10 @@ class Worker(multiprocessing.Process): self.lock.unset_shard() self.result_queue.put((RESULT_SUCCESS, result)) raise SkipShard('no log for shard') - except Exception as e: + except Exception: log.warn('error locking shard %d log, ' - ' skipping for now: %s', - shard_num, e) + ' skipping for now. Traceback: ', + shard_num, exc_info=True) self.lock.unset_shard() self.result_queue.put((RESULT_ERROR, result)) raise SkipShard() @@ -76,7 +76,7 @@ class Worker(multiprocessing.Process): log.warn('work may be duplicated: %s', e) except Exception as e: log.warn('error unlocking log, continuing anyway ' - 'since lock will timeout') + 'since lock will timeout. Traceback:', exc_info=True) def set_bound(self, key, marker, retries, type_=None): # api doesn't allow setting a bound with a blank marker @@ -93,9 +93,10 @@ class Worker(multiprocessing.Process): key, data=data) return RESULT_SUCCESS - except Exception as e: + except Exception: log.warn('error setting worker bound for key "%s",' - ' may duplicate some work later: %s', key, e) + ' may duplicate some work later. Traceback:', key, + exc_info=True) return RESULT_ERROR MetadataEntry = namedtuple('MetadataEntry', @@ -225,7 +226,7 @@ class DataWorker(Worker): except SyncFailed: raise except Exception as e: - log.debug('error geting op state: %s', e) + log.debug('error geting op state: %s', e, exc_info=True) time.sleep(1) # timeout expired raise SyncTimedOut() @@ -322,7 +323,7 @@ class DataWorkerIncremental(IncrementalMixin, DataWorker): retries) except Exception as e: log.warn('error syncing bucket instance "%s": %s', - bucket_instance, e) + bucket_instance, e, exc_info=True) sync_result = RESULT_ERROR if sync_result == RESULT_ERROR: new_retries.append(bucket_instance) @@ -406,16 +407,16 @@ class MetadataWorker(Worker): # Since this error is handled appropriately, return success return RESULT_SUCCESS except Exception as e: - log.error('error getting metadata for %s "%s": %s', - section, name, e) + log.warn('error getting metadata for %s "%s": %s', + section, name, e, exc_info=True) return RESULT_ERROR else: try: client.update_metadata(self.dest_conn, section, name, metadata) return RESULT_SUCCESS except Exception as e: - log.error('error updating metadata for %s "%s": %s', - section, name, e) + log.warn('error updating metadata for %s "%s": %s', + section, name, e, exc_info=True) return RESULT_ERROR class MetadataWorkerIncremental(IncrementalMixin, MetadataWorker): @@ -471,7 +472,7 @@ class MetadataWorkerFull(MetadataWorker): self.sync_meta(section, name) except Exception as e: log.warn('could not sync %s "%s", saving for retry: %s', - section, name, e) + section, name, e, exc_info=True) retries.append(section + '/' + name) # unlock shard and report buckets to retry during incremental sync -- 2.47.3