for code, alert in r.items():
checks[code] = alert
else:
- self.log.warn('Alert is not sent because smtp_host is not configured')
+ self.log.warning('Alert is not sent because smtp_host is not configured')
self.set_health_checks(checks)
def serve(self):
self.event = Event()
def handle_command(self, inbuf, command):
- self.log.warn("Handling command: '%s'" % str(command))
+ self.log.warning("Handling command: '%s'" % str(command))
if command['prefix'] == 'balancer status':
s = {
'plans': list(self.plans.keys()),
metrics = self.get_module_option('crush_compat_metrics').split(',')
key = metrics[0] # balancing using the first score metric
if key not in ['pgs', 'bytes', 'objects']:
- self.log.warn("Invalid crush_compat balancing key %s. Using 'pgs'." % key)
+ self.log.warning("Invalid crush_compat balancing key %s. Using 'pgs'." % key)
key = 'pgs'
# go
}), '')
r, outb, outs = result.wait()
if r != 0:
- self.log.warn('Could not mark OSD %s out. r: [%s], outb: [%s], outs: [%s]' % (osd_ids, r, outb, outs))
+ self.log.warning('Could not mark OSD %s out. r: [%s], outb: [%s], outs: [%s]' % (osd_ids, r, outb, outs))
for osd_id in osd_ids:
result = CommandResult('')
self.send_command(result, 'mon', '', json.dumps({
}), '')
r, outb, outs = result.wait()
if r != 0:
- self.log.warn('Could not set osd.%s primary-affinity, r: [%s], outs: [%s]' % (osd_id, r, outb, outs))
+ self.log.warning('Could not set osd.%s primary-affinity, r: [%s], outs: [%s]' % (osd_id, r, outb, outs))
def extract_smart_features(self, raw):
# FIXME: extract and normalize raw smartctl --json output and
schema = self.get_perf_schema(service['type'], service['id'])
if not schema:
- self.log.warn("No perf counter schema for {0}.{1}".format(
+ self.log.warning("No perf counter schema for {0}.{1}".format(
service['type'], service['id']
))
continue
# In the case that we ignored some PGs, log the reason why (we may
# not end up creating a progress event)
if len(unmoved_pgs):
- self.log.warn("{0} PGs were on osd.{1}, but didn't get new locations".format(
+ self.log.warning("{0} PGs were on osd.{1}, but didn't get new locations".format(
len(unmoved_pgs), osd_id))
- self.log.warn("{0} PGs affected by osd.{1} being marked {2}".format(
+ self.log.warning("{0} PGs affected by osd.{1} being marked {2}".format(
len(affected_pgs), osd_id, marked))
old_weight = old_osds[osd_id]['in']
if new_weight == 0.0 and old_weight > new_weight:
- self.log.warn("osd.{0} marked out".format(osd_id))
+ self.log.warning("osd.{0} marked out".format(osd_id))
self._osd_in_out(old_osdmap, old_dump, new_osdmap, osd_id, "out")
elif new_weight >= 1.0 and old_weight == 0.0:
# Only consider weight>=1.0 as "in" to avoid spawning
# individual recovery events on every adjustment
# in a gradual weight-in
- self.log.warn("osd.{0} marked in".format(osd_id))
+ self.log.warning("osd.{0} marked in".format(osd_id))
self._osd_in_out(old_osdmap, old_dump, new_osdmap, osd_id, "in")
def notify(self, notify_type, notify_data):
ev.message))
self._complete(ev)
except KeyError:
- self.log.warn("complete: ev {0} does not exist".format(ev_id))
+ self.log.warning("complete: ev {0} does not exist".format(ev_id))
pass
def fail(self, ev_id, message):
message))
self._complete(ev)
except KeyError:
- self.log.warn("fail: ev {0} does not exist".format(ev_id))
+ self.log.warning("fail: ev {0} does not exist".format(ev_id))
def _handle_ls(self):
if len(self._events) or len(self._completed_events):
try:
self.metrics["pg_{}".format(state)].set(num, (pool,))
except KeyError:
- self.log.warn("skipping pg in unknown state {}".format(state))
+ self.log.warning("skipping pg in unknown state {}".format(state))
def get_osd_stats(self):
osd_stats = self.get('osd_stats')
valid_flags = set(args.keys()) & set(common.OSD_FLAGS)
invalid_flags = list(set(args.keys()) - valid_flags)
if invalid_flags:
- context.instance.log.warn("%s not valid to set/unset", invalid_flags)
+ context.instance.log.warning("%s not valid to set/unset", invalid_flags)
for flag in list(valid_flags):
if args[flag]:
self._serve()
self.server.socket.close()
except CannotServe as cs:
- self.log.warn("server not running: %s", cs)
+ self.log.warning("server not running: %s", cs)
except:
self.log.error(str(traceback.format_exc()))
def handle_command(self, inbuf, command):
- self.log.warn("Handling command: '%s'" % str(command))
+ self.log.warning("Handling command: '%s'" % str(command))
if command['prefix'] == "restful create-key":
if command['key_name'] in self.keys:
return 0, self.keys[command['key_name']], ""
fs_handle.lutimes(target_path, (time.mktime(source_statx["atime"].timetuple()),
time.mktime(source_statx["mtime"].timetuple())))
except cephfs.Error as e:
- log.warn("error synchronizing attrs for {0} ({1})".format(target_path, e))
+ log.warning("error synchronizing attrs for {0} ({1})".format(target_path, e))
raise e
def bulk_copy(fs_handle, source_path, dst_path, should_cancel):
copy_file(fs_handle, d_full_src, d_full_dst, mo, cancel_check=should_cancel)
else:
handled = False
- log.warn("cptree: (IGNORE) {0}".format(d_full_src))
+ log.warning("cptree: (IGNORE) {0}".format(d_full_src))
if handled:
sync_attrs(fs_handle, d_full_dst, stx)
d = fs_handle.readdir(dir_handle)
raise VolumeException(-errno.EINVAL, "cannot cancel -- clone finished (check clone status)")
track_idx = self.get_clone_tracking_index(fs_handle, clone_subvolume)
if not track_idx:
- log.warn("cannot lookup clone tracking index for {0}".format(clone_subvolume.base_path))
+ log.warning("cannot lookup clone tracking index for {0}".format(clone_subvolume.base_path))
raise VolumeException(-errno.EINVAL, "error canceling clone")
if OpSm.is_init_state("clone", clone_state):
# clone has not started yet -- cancel right away.
if me.errno == -errno.ENOENT:
return False
else:
- log.warn("error checking protected snap {0} ({1})".format(snapname, me))
+ log.warning("error checking protected snap {0} ({1})".format(snapname, me))
raise VolumeException(-errno.EINVAL, "snapshot protection check failed")
else:
return True
self.metadata_mgr.update_section("protected snaps", snapname, "1")
self.metadata_mgr.flush()
except MetadataMgrException as me:
- log.warn("error updating protected snap list ({0})".format(me))
+ log.warning("error updating protected snap list ({0})".format(me))
raise VolumeException(-errno.EINVAL, "error protecting snapshot")
def _unprotect_snapshot(self, snapname):
self.metadata_mgr.remove_option("protected snaps", snapname)
self.metadata_mgr.flush()
except MetadataMgrException as me:
- log.warn("error updating protected snap list ({0})".format(me))
+ log.warning("error updating protected snap list ({0})".format(me))
raise VolumeException(-errno.EINVAL, "error unprotecting snapshot")
def protect_snapshot(self, snapname):
track_idx = index.track(tgt_subvolume.base_path)
self._add_snap_clone(track_idx, snapname)
except (IndexException, MetadataMgrException) as e:
- log.warn("error creating clone index: {0}".format(e))
+ log.warning("error creating clone index: {0}".format(e))
raise VolumeException(-errno.EINVAL, "error cloning subvolume")
def detach_snapshot(self, snapname, track_id):
index.untrack(track_id)
self._remove_snap_clone(track_id)
except (IndexException, MetadataMgrException) as e:
- log.warn("error delining snapshot from clone: {0}".format(e))
+ log.warning("error delining snapshot from clone: {0}".format(e))
raise VolumeException(-errno.EINVAL, "error delinking snapshot from clone")
target_subvolume.remove()
self.purge_queue.queue_job(volname)
except Exception as e:
- log.warn("failed to cleanup clone subvolume '{0}' ({1})".format(target_subvolname, e))
+ log.warning("failed to cleanup clone subvolume '{0}' ({1})".format(target_subvolname, e))
raise ve
def _clone_subvolume_snapshot(self, fs_handle, volname, subvolume, **kwargs):