while count > max_mds:
targets = sorted(self.get_ranks(status=status), key=lambda r: r['rank'], reverse=True)
target = targets[0]
- log.info("deactivating rank %d" % target['rank'])
+ log.debug("deactivating rank %d" % target['rank'])
self.deactivate(target['rank'])
status = self.wait_for_daemons(skip_max_mds_check=True)
count = len(list(self.get_ranks(status=status)))
else:
data_pool_name = self.data_pool_name
- log.info("Creating filesystem '{0}'".format(self.name))
+ log.debug("Creating filesystem '{0}'".format(self.name))
self.mon_manager.raw_cluster_cmd('osd', 'pool', 'create',
self.metadata_pool_name, self.pgs_per_fs_pool.__str__())
'--allow-dangerous-metadata-overlay')
else:
if self.ec_profile and 'disabled' not in self.ec_profile:
- log.info("EC profile is %s", self.ec_profile)
+ log.debug("EC profile is %s", self.ec_profile)
cmd = ['osd', 'erasure-code-profile', 'set', data_pool_name]
cmd.extend(self.ec_profile)
self.mon_manager.raw_cluster_cmd(*cmd)
else:
raise
- log.info("are_daemons_healthy: mds map: {0}".format(mds_map))
+ log.debug("are_daemons_healthy: mds map: {0}".format(mds_map))
for mds_id, mds_status in mds_map['info'].items():
if mds_status['state'] not in ["up:active", "up:standby", "up:standby-replay"]:
elif mds_status['state'] == 'up:active':
active_count += 1
- log.info("are_daemons_healthy: {0}/{1}".format(
+ log.debug("are_daemons_healthy: {0}/{1}".format(
active_count, mds_map['max_mds']
))
if not skip_max_mds_check:
if active_count > mds_map['max_mds']:
- log.info("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
+ log.debug("are_daemons_healthy: number of actives is greater than max_mds: {0}".format(mds_map))
return False
elif active_count == mds_map['max_mds']:
# The MDSMap says these guys are active, but let's check they really are
else:
return False
else:
- log.info("are_daemons_healthy: skipping max_mds check")
+ log.debug("are_daemons_healthy: skipping max_mds check")
return True
def get_daemon_names(self, state=None, status=None):
elapsed += 1
if elapsed > timeout:
- log.info("status = {0}".format(status))
+ log.debug("status = {0}".format(status))
raise RuntimeError("Timed out waiting for MDS daemons to become healthy")
status = self.status()
journal_header_dump = self.get_metadata_object('Journaler::Header', journal_header_object)
version = journal_header_dump['journal_header']['stream_format']
- log.info("Read journal version {0}".format(version))
+ log.debug("Read journal version {0}".format(version))
return version
try:
mds_info = status.get_rank(self.id, rank)
current_state = mds_info['state'] if mds_info else None
- log.info("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
+ log.debug("Looked up MDS state for mds.{0}: {1}".format(rank, current_state))
except:
mdsmap = self.get_mds_map(status=status)
if rank in mdsmap['failed']:
- log.info("Waiting for rank {0} to come back.".format(rank))
+ log.debug("Waiting for rank {0} to come back.".format(rank))
current_state = None
else:
raise
# mds_info is None if no daemon with this ID exists in the map
mds_info = status.get_mds(mds_id)
current_state = mds_info['state'] if mds_info else None
- log.info("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
+ log.debug("Looked up MDS state for {0}: {1}".format(mds_id, current_state))
else:
# In general, look for a single MDS
states = [m['state'] for m in status.get_ranks(self.id)]
current_state = reject
else:
current_state = None
- log.info("mapped states {0} to {1}".format(states, current_state))
+ log.debug("mapped states {0} to {1}".format(states, current_state))
elapsed = time.time() - started_at
if current_state == goal_state:
- log.info("reached state '{0}' in {1}s".format(current_state, elapsed))
+ log.debug("reached state '{0}' in {1}s".format(current_state, elapsed))
return elapsed
elif reject is not None and current_state == reject:
raise RuntimeError("MDS in reject state {0}".format(current_state))
missing = set(want_objects) - set(exist_objects)
if missing:
- log.info("Objects missing (ino {0}, size {1}): {2}".format(
+ log.debug("Objects missing (ino {0}, size {1}): {2}".format(
ino, size, missing
))
return False
else:
- log.info("All objects for ino {0} size {1} found".format(ino, size))
+ log.debug("All objects for ino {0} size {1} found".format(ino, size))
return True
def data_objects_absent(self, ino, size):
present = set(want_objects) & set(exist_objects)
if present:
- log.info("Objects not absent (ino {0}, size {1}): {2}".format(
+ log.debug("Objects not absent (ino {0}, size {1}): {2}".format(
ino, size, present
))
return False
else:
- log.info("All objects for ino {0} size {1} are absent".format(ino, size))
+ log.debug("All objects for ino {0} size {1} are absent".format(ino, size))
return True
def dirfrag_exists(self, ino, frag):
t1 = datetime.datetime.now()
r = self.tool_remote.sh(script=base_args + args, stdout=StringIO()).strip()
duration = datetime.datetime.now() - t1
- log.info("Ran {0} in time {1}, result:\n{2}".format(
+ log.debug("Ran {0} in time {1}, result:\n{2}".format(
base_args + args, duration, r
))
return r
@classmethod
def _request(cls, url, method, data=None, params=None, set_cookies=False):
url = "{}{}".format(cls._base_uri, url)
- log.info("Request %s to %s", method, url)
+ log.debug("Request %s to %s", method, url)
headers = {}
cookies = {}
if cls._token:
return None
if cls._resp.status_code != 202:
- log.info("task finished immediately")
+ log.debug("task finished immediately")
return res
cls._assertIn('name', res)
res_task = None
while retries > 0 and not res_task:
retries -= 1
- log.info("task (%s, %s) is still executing", task_name,
- task_metadata)
+ log.debug("task (%s, %s) is still executing", task_name, task_metadata)
time.sleep(1)
_res = cls._get('/api/task?name={}'.format(task_name))
cls._assertEq(cls._resp.status_code, 200)
raise Exception("Waiting for task ({}, {}) to finish timed out. {}"
.format(task_name, task_metadata, _res))
- log.info("task (%s, %s) finished", task_name, task_metadata)
+ log.debug("task (%s, %s) finished", task_name, task_metadata)
if res_task['success']:
if method == 'POST':
cls._resp.status_code = 201
@classmethod
def _ceph_cmd(cls, cmd):
res = cls.mgr_cluster.mon_manager.raw_cluster_cmd(*cmd)
- log.info("command result: %s", res)
+ log.debug("command result: %s", res)
return res
@classmethod
def _ceph_cmd_result(cls, cmd):
exitstatus = cls.mgr_cluster.mon_manager.raw_cluster_cmd_result(*cmd)
- log.info("command exit status: %d", exitstatus)
+ log.debug("command exit status: %d", exitstatus)
return exitstatus
def set_config_key(self, key, value):
if is_disabled():
return
- log.info("Unloading Mgr module %s ...", module_name)
+ log.debug("Unloading Mgr module %s ...", module_name)
cls.mgr_cluster.mon_manager.raw_cluster_cmd('mgr', 'module', 'disable', module_name)
cls.wait_until_true(is_disabled, timeout=30)
if module_name in always_on:
return
- log.info("Loading Mgr module %s ...", module_name)
+ log.debug("Loading Mgr module %s ...", module_name)
initial_gid = initial_mgr_map['active_gid']
cls.mgr_cluster.mon_manager.raw_cluster_cmd(
"mgr", "module", "enable", module_name, "--force")
mgr_map = cls.mgr_cluster.get_mgr_map()
done = mgr_map['active_gid'] != initial_gid and mgr_map['available']
if done:
- log.info("Restarted after module load (new active {0}/{1})".format(
+ log.debug("Restarted after module load (new active {0}/{1})".format(
mgr_map['active_name'], mgr_map['active_gid']))
return done
cls.wait_until_true(has_restarted, timeout=30)
uri = mgr_map['x']['services'][service_name]
- log.info("Found {0} at {1} (daemon {2}/{3})".format(
+ log.debug("Found {0} at {1} (daemon {2}/{3})".format(
service_name, uri, mgr_map['x']['active_name'],
mgr_map['x']['active_gid']))
cls.mgr_cluster.mgr_fail(mgr_id)
for mgr_id in cls.mgr_cluster.mgr_ids:
- log.info("Using port {0} for {1} on mgr.{2}".format(
+ log.debug("Using port {0} for {1} on mgr.{2}".format(
assign_port, module_name, mgr_id
))
cls.mgr_cluster.set_module_localized_conf(module_name, mgr_id,
mgr_map = cls.mgr_cluster.get_mgr_map()
done = mgr_map['available']
if done:
- log.info("Available after assign ports (new active {0}/{1})".format(
+ log.debug("Available after assign ports (new active {0}/{1})".format(
mgr_map['active_name'], mgr_map['active_gid']))
return done
cls.wait_until_true(is_available, timeout=30)
from teuthology.config import config as teuth_config
import six
import logging
+try:
+ import urllib3
+ urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
+except:
+ pass
def init_log():
global log
return False
def kill(self):
- log.info("kill ")
+ log.debug("kill ")
if self.subproc.pid and not self.finished:
- log.info("kill: killing pid {0} ({1})".format(
+ log.debug("kill: killing pid {0} ({1})".format(
self.subproc.pid, self.args))
safe_kill(self.subproc.pid)
else:
- log.info("kill: already terminated ({0})".format(self.args))
+ log.debug("kill: already terminated ({0})".format(self.args))
@property
def stdin(self):
args[0]
))
- log.info("Running {0}".format(args))
+ log.debug("Running {0}".format(args))
if shell:
subproc = subprocess.Popen(quote(args),
for line in lines:
if line.find("ceph-{0} -i {1}".format(self.daemon_type, self.daemon_id)) != -1:
- log.info("Found ps line for daemon: {0}".format(line))
+ log.debug("Found ps line for daemon: {0}".format(line))
return int(line.split()[0])
if opt_log_ps_output:
- log.info("No match for {0} {1}: {2}".format(
+ log.debug("No match for {0} {1}: {2}".format(
self.daemon_type, self.daemon_id, ps_txt))
else:
- log.info("No match for {0} {1}".format(self.daemon_type,
+ log.debug("No match for {0} {1}".format(self.daemon_type,
self.daemon_id))
return None
return
pid = self._get_pid()
- log.info("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
+ log.debug("Killing PID {0} for {1}.{2}".format(pid, self.daemon_type, self.daemon_id))
os.kill(pid, signal.SIGTERM)
waited = 0
while pid is not None:
new_pid = self._get_pid()
if new_pid is not None and new_pid != pid:
- log.info("Killing new PID {0}".format(new_pid))
+ log.debug("Killing new PID {0}".format(new_pid))
pid = new_pid
os.kill(pid, signal.SIGTERM)
os.kill(self._get_pid(), sig)
if not silent:
- log.info("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
+ log.debug("Sent signal {0} to {1}.{2}".format(sig, self.daemon_type, self.daemon_id))
def safe_kill(pid):
# Previous mount existed, reuse the old name
name = self.fs.name
self.fs = LocalFilesystem(self.ctx, name=name)
- log.info('Wait for MDS to reach steady state...')
+ log.debug('Wait for MDS to reach steady state...')
self.fs.wait_for_daemons()
- log.info('Ready to start {}...'.format(type(self).__name__))
+ log.debug('Ready to start {}...'.format(type(self).__name__))
@property
def _prefix(self):
# Previous mount existed, reuse the old name
name = self.fs.name
self.fs = LocalFilesystem(self.ctx, name=name)
- log.info('Wait for MDS to reach steady state...')
+ log.debug('Wait for MDS to reach steady state...')
self.fs.wait_for_daemons()
- log.info('Ready to start {}...'.format(type(self).__name__))
+ log.debug('Ready to start {}...'.format(type(self).__name__))
@property
def _prefix(self):
# Before starting ceph-fuse process, note the contents of
# /sys/fs/fuse/connections
pre_mount_conns = list_connections()
- log.info("Pre-mount connections: {0}".format(pre_mount_conns))
+ log.debug("Pre-mount connections: {0}".format(pre_mount_conns))
prefix = [os.path.join(BIN_PREFIX, "ceph-fuse")]
if os.getuid() != 0:
self.mountpoint
], wait=False)
- log.info("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid))
+ log.debug("Mounting client.{0} with pid {1}".format(self.client_id, self.fuse_daemon.subproc.pid))
# Wait for the connection reference to appear in /sys
waited = 0
))
post_mount_conns = list_connections()
- log.info("Post-mount connections: {0}".format(post_mount_conns))
+ log.debug("Post-mount connections: {0}".format(post_mount_conns))
# Record our fuse connection number so that we can use it when
# forcing an unmount
# certain teuthology tests want to run tasks in parallel
self.lock = threading.RLock()
- self.log = lambda x: log.info(x)
+ self.log = lambda x: log.debug(x)
# Don't bother constructing a map of pools: it should be empty
# at test cluster start, and in any case it would be out of date
existing_str += "\n[{0}]\n".format(subsys)
for key, val in kvs.items():
# Comment out existing instance if it exists
- log.info("Searching for existing instance {0}/{1}".format(
+ log.debug("Searching for existing instance {0}/{1}".format(
key, subsys
))
existing_section = re.search("^\[{0}\]$([\n]|[^\[])+".format(
existing_val = re.search("^\s*[^#]({0}) =".format(key), section_str, re.MULTILINE)
if existing_val:
start = existing_section.start() + existing_val.start(1)
- log.info("Found string to replace at {0}".format(
+ log.debug("Found string to replace at {0}".format(
start
))
existing_str = existing_str[0:start] + "#" + existing_str[start:]
self.mds_ids = list(self.mds_ids)
- log.info("Discovered MDS IDs: {0}".format(self.mds_ids))
+ log.debug("Discovered MDS IDs: {0}".format(self.mds_ids))
self.mon_manager = LocalCephManager()
def enumerate_methods(s):
- log.info("e: {0}".format(s))
+ log.debug("e: {0}".format(s))
for t in s._tests:
if isinstance(t, suite.BaseTestSuite):
for sub in enumerate_methods(t):
def load_tests(modules, loader):
if modules:
- log.info("Executing modules: {0}".format(modules))
+ log.debug("Executing modules: {0}".format(modules))
module_suites = []
for mod_name in modules:
# Test names like cephfs.test_auto_repair
module_suites.append(loader.loadTestsFromName(mod_name))
- log.info("Loaded: {0}".format(list(module_suites)))
+ log.debug("Loaded: {0}".format(list(module_suites)))
return suite.TestSuite(module_suites)
else:
- log.info("Executing all cephfs tests")
+ log.debug("Executing all cephfs tests")
return loader.discover(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "cephfs")
)
with open(logpath, 'w') as logfile:
logfile.write('')
init_log()
- log.info('logging in a fresh file now...')
+ log.debug('logging in a fresh file now...')
def exec_test():
# Parse arguments
if not is_named:
victims.append((case, method))
- log.info("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
+ log.debug("Disabling {0} tests because of is_for_teuthology or needs_trimming".format(len(victims)))
for s, method in victims:
s._tests.remove(method)