#50 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL
59 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-deploy -k distro -e $CEPH_QA_EMAIL
05 04 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-ansible -k distro -e $CEPH_QA_EMAIL
-### The suite below must run on bare-metal because it's perfromance suite and run 3 times to produce more data points
+### The suite below must run on bare-metal because it's performance suite and run 3 times to produce more data points
57 03 * * * CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s perf-basic -k distro -e $CEPH_QA_EMAIL -N 3
09 03 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s powercycle -k distro -e $CEPH_QA_EMAIL
# be specified multiple times for specifying multiple datastores.
# Either one of vmware_datastore_name or vmware_datastores is
# required. The datastore name should be specified after its
-# datacenter path, seperated by ":". An optional weight may be given
-# after the datastore name, seperated again by ":". Thus, the required
+# datacenter path, separated by ":". An optional weight may be given
+# after the datastore name, separated again by ":". Thus, the required
# format becomes <datacenter_path>:<datastore_name>:<optional_weight>.
# When adding an image, the datastore with highest weight will be
# selected, unless there is not enough free space available in cases
#os_region_name=<None>
os_region_name=RegionOne
-# Location of ca certicates file to use for cinder client requests.
+# Location of ca certificates file to use for cinder client requests.
# (string value)
#cinder_ca_certificates_file=<None>
ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1
markdown_N_impl $(($count+1)) $period $sleeptime
- # down N+1 times ,the osd.0 shoud die
+ # down N+1 times ,the osd.0 should die
ceph osd tree | grep down | grep osd.0 || return 1
}
if benchmark_type == 'cosbench':
# install cosbench
- self.log.info('install dependecies for cosbench')
+ self.log.info('install dependencies for cosbench')
if system_type == 'rpm':
cosbench_depends = ['wget', 'unzip', 'java-1.7.0-openjdk', 'curl']
else:
args=['sudo', 'apt-get', '-y', 'purge', 'openjdk-11*'])
# use our own version of cosbench
cosbench_version = 'cosbench-0.4.2.c3.1'
- # contains additonal parameter "-N" to nc
+ # contains additional parameter "-N" to nc
cosbench_location = 'http://drop.ceph.com/qa/cosbench-0.4.2.c3.1.zip'
cosbench_dir = os.path.join(testdir, cosbench_version)
self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', cosbench_dir])
@contextlib.contextmanager
def valgrind_post(ctx, config):
"""
- After the tests run, look throught all the valgrind logs. Exceptions are raised
+ After the tests run, look through all the valgrind logs. Exceptions are raised
if textual errors occurred in the logs, or if valgrind exceptions were detected in
the logs.
Create directories needed for the cluster.
Create remote journals for all osds.
Create and set keyring.
- Copy the monmap to tht test systems.
+ Copy the monmap to the test systems.
Setup mon nodes.
Setup mds nodes.
Mkfs osd nodes.
def first_in_ceph_log(pattern, excludes):
"""
- Find the first occurence of the pattern specified in the Ceph log,
+ Find the first occurrence of the pattern specified in the Ceph log,
Returns None if none found.
:param pattern: Pattern scanned for.
First make sure all pgs are active and clean.
Next scrub all osds.
Then periodically check until all pgs have scrub time stamps that
- indicate the last scrub completed. Time out if no progess is made
+ indicate the last scrub completed. Time out if no progress is made
here after two minutes.
"""
retries = 40
if gap_cnt % 6 == 0:
for (pgid, tmval) in timez:
# re-request scrub every so often in case the earlier
- # request was missed. do not do it everytime because
+ # request was missed. do not do it every time because
# the scrub may be in progress or not reported yet and
# we will starve progress.
manager.raw_cluster_cmd('pg', 'deep-scrub', pgid)
def wait_run_admin_socket(self, service_type,
service_id, args=['version'], timeout=75, stdout=None):
"""
- If osd_admin_socket call suceeds, return. Otherwise wait
+ If osd_admin_socket call succeeds, return. Otherwise wait
five seconds and try again.
"""
if stdout is None:
def test_getattr_caps(self):
"""
Check if MDS recognizes the 'mask' parameter of open request.
- The paramter allows client to request caps when opening file
+ The parameter allows client to request caps when opening file
"""
if not isinstance(self.mount_a, FuseMount):
p = self.mount_a.open_background("testfile")
self.mount_b.wait_for_visible("testfile")
- # this tiggers a lookup request and an open request. The debug
+ # this triggers a lookup request and an open request. The debug
# code will check if lookup/open reply contains xattrs
self.mount_b.run_shell(["cat", "testfile"])
def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10):
"""
- Do checks. Make sure get_stuck_pgs return the right amout of information, then
+ Do checks. Make sure get_stuck_pgs return the right amount of information, then
extract health information from the raw_cluster_cmd and compare the results with
values passed in. This passes if all asserts pass.
"""
Test filestore/filejournal handling of non-idempotent events.
- Currently this is a kludge; we require the ceph task preceeds us just
+ Currently this is a kludge; we require the ceph task precedes us just
so that we get the tarball installed to run the test binary.
:param ctx: Context
"""
Sets up an nfs mount on the remote that the guest can use to
store logs. This nfs mount is also used to touch a file
- at the end of the test to indiciate if the test was successful
+ at the end of the test to indicate if the test was successful
or not.
"""
export_dir = "/export/{client}".format(client=client)
class Scrubber:
"""
- Scrubbing is actually performed during initialzation
+ Scrubbing is actually performed during initialization
"""
def __init__(self, manager, config):
"""
r = remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'),
'grep', 'ceph'], stdout=StringIO())
if r.stdout.getvalue().find('Active: inactive'):
- log.info("Sucessfully stopped all ceph services")
+ log.info("Successfully stopped all ceph services")
else:
log.info("Failed to stop ceph services")
stdout=StringIO())
log.info(r.stdout.getvalue())
if r.stdout.getvalue().find('Active: active'):
- log.info("Sucessfully started all Ceph services")
+ log.info("Successfully started all Ceph services")
else:
log.info("info", "Failed to start Ceph services")
r = remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'),
stdout=StringIO(), check_status=False)
log.info(r.stdout.getvalue())
if r.stdout.getvalue().find('Active: inactive'):
- log.info("Sucessfully stopped single osd ceph service")
+ log.info("Successfully stopped single osd ceph service")
else:
log.info("Failed to stop ceph osd services")
remote.run(args=['sudo', 'systemctl', 'start',
r = remote.run(args=['sudo', 'systemctl', 'status', mon_name],
stdout=StringIO(), check_status=False)
if r.stdout.getvalue().find('Active: inactive'):
- log.info("Sucessfully stopped single mon ceph service")
+ log.info("Successfully stopped single mon ceph service")
else:
log.info("Failed to stop ceph mon service")
remote.run(args=['sudo', 'systemctl', 'start', mon_name])
r = remote.run(args=['sudo', 'systemctl', 'status', mgr_name],
stdout=StringIO(), check_status=False)
if r.stdout.getvalue().find('Active: inactive'):
- log.info("Sucessfully stopped single ceph mgr service")
+ log.info("Successfully stopped single ceph mgr service")
else:
log.info("Failed to stop ceph mgr service")
remote.run(args=['sudo', 'systemctl', 'start', mgr_name])
r = remote.run(args=['sudo', 'systemctl', 'status', mds_name],
stdout=StringIO(), check_status=False)
if r.stdout.getvalue().find('Active: inactive'):
- log.info("Sucessfully stopped single ceph mds service")
+ log.info("Successfully stopped single ceph mds service")
else:
log.info("Failed to stop ceph mds service")
remote.run(args=['sudo', 'systemctl', 'start', mds_name])
# actually expect IEC units to be passed.
# Keep in mind that all integer based options that are based on bytes
# (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI
- # unit modifiers (for backwards compatibility and convinience) and be parsed
+ # unit modifiers (for backwards compatibility and convenience) and be parsed
# to base 2.
initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn")
$SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000
fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB)
"""
- is flock interruptable?
+ is flock interruptible?
"""
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata)
"""
- is poxis lock interruptable?
+ is posix lock interruptible?
"""
signal.signal(signal.SIGALRM, handler)
signal.alarm(5)
dir1=`find 1/ | wc -w`
dir2=`find 2/.snap/barsnap2/a/b/c | wc -w`
#diff $dir1 $dir2 && echo "Success!"
-test $dir1==$dir2 && echo "Sucess!"
+test $dir1==$dir2 && echo "Success!"
rmdir 1/.snap/*
rmdir 2/.snap/*
echo "OK"
ua=`uuidgen`
ub=`uuidgen`
-# shoudl get same id with same uuid
+# should get same id with same uuid
na=`ceph osd create $ua`
test $na -eq `ceph osd create $ua`
run_expect_nosignal() {
echo "RUN_EXPECT_NOSIGNAL: " "$@"
do_run "$@"
- [ $? -ge 128 ] && die "expected succes or fail, but got signal! cmd: $@"
+ [ $? -ge 128 ] && die "expected success or fail, but got signal! cmd: $@"
}
run() {
(($bucket->delete_key($_[0])) and return 0) or return 1;
}
-# Readd the file back to bucket
+# Read the file back to bucket
sub readd_file {
system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1");
$mytestfilename1 = '10MBfile1';