From 4b0e2c8ed4832719b632c31a8c067c87b1676392 Mon Sep 17 00:00:00 2001 From: Kefu Chai Date: Tue, 18 Sep 2018 11:21:13 +0800 Subject: [PATCH] qa: fix typos Signed-off-by: Kefu Chai --- qa/crontab/teuthology-cronjobs | 2 +- .../openstack/files/glance-api.template.conf | 6 +++--- qa/standalone/osd/osd-markdown.sh | 2 +- qa/tasks/cbt.py | 4 ++-- qa/tasks/ceph.py | 10 +++++----- qa/tasks/ceph_manager.py | 2 +- qa/tasks/cephfs/test_misc.py | 4 ++-- qa/tasks/dump_stuck.py | 2 +- qa/tasks/filestore_idempotent.py | 2 +- qa/tasks/qemu.py | 2 +- qa/tasks/scrub.py | 2 +- qa/tasks/systemd.py | 12 ++++++------ qa/workunits/cephtool/test.sh | 2 +- qa/workunits/fs/misc/filelock_interrupt.py | 4 ++-- qa/workunits/fs/snaps/snaptest-parents.sh | 2 +- qa/workunits/mon/osd.sh | 2 +- qa/workunits/rados/test_rados_tool.sh | 2 +- qa/workunits/rgw/s3_utilities.pm | 2 +- 18 files changed, 32 insertions(+), 32 deletions(-) diff --git a/qa/crontab/teuthology-cronjobs b/qa/crontab/teuthology-cronjobs index c1f68cf0ee5..d15f72ce2a1 100644 --- a/qa/crontab/teuthology-cronjobs +++ b/qa/crontab/teuthology-cronjobs @@ -73,7 +73,7 @@ CEPH_QA_EMAIL="ceph-qa@lists.ceph.com" #50 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=smithi;/home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s samba -e $CEPH_QA_EMAIL 59 03 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=mira; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-deploy -k distro -e $CEPH_QA_EMAIL 05 04 * * 1,6 CEPH_BRANCH=master; MACHINE_NAME=ovh; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s ceph-ansible -k distro -e $CEPH_QA_EMAIL -### The suite below must run on bare-metal because it's perfromance suite and run 3 times to produce more data points +### The suite below must run on bare-metal because it's performance suite and run 3 times to produce more data points 57 03 * * * CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s perf-basic -k distro -e $CEPH_QA_EMAIL -N 3 09 03 * * 6 CEPH_BRANCH=master; MACHINE_NAME=smithi; /home/teuthology/bin/cron_wrapper teuthology-suite -v -c $CEPH_BRANCH -n 7 -m $MACHINE_NAME -s powercycle -k distro -e $CEPH_QA_EMAIL diff --git a/qa/qa_scripts/openstack/files/glance-api.template.conf b/qa/qa_scripts/openstack/files/glance-api.template.conf index 95611d4e18c..956fb1bf240 100644 --- a/qa/qa_scripts/openstack/files/glance-api.template.conf +++ b/qa/qa_scripts/openstack/files/glance-api.template.conf @@ -893,8 +893,8 @@ rbd_store_ceph_conf=/etc/ceph/ceph.conf # be specified multiple times for specifying multiple datastores. # Either one of vmware_datastore_name or vmware_datastores is # required. The datastore name should be specified after its -# datacenter path, seperated by ":". An optional weight may be given -# after the datastore name, seperated again by ":". Thus, the required +# datacenter path, separated by ":". An optional weight may be given +# after the datastore name, separated again by ":". Thus, the required # format becomes ::. # When adding an image, the datastore with highest weight will be # selected, unless there is not enough free space available in cases @@ -970,7 +970,7 @@ rbd_store_ceph_conf=/etc/ceph/ceph.conf #os_region_name= os_region_name=RegionOne -# Location of ca certicates file to use for cinder client requests. +# Location of ca certificates file to use for cinder client requests. # (string value) #cinder_ca_certificates_file= diff --git a/qa/standalone/osd/osd-markdown.sh b/qa/standalone/osd/osd-markdown.sh index ca59ef47b26..3446a27548e 100755 --- a/qa/standalone/osd/osd-markdown.sh +++ b/qa/standalone/osd/osd-markdown.sh @@ -70,7 +70,7 @@ function TEST_markdown_exceed_maxdown_count() { ceph tell osd.0 injectargs '--osd_max_markdown_period '$period'' || return 1 markdown_N_impl $(($count+1)) $period $sleeptime - # down N+1 times ,the osd.0 shoud die + # down N+1 times ,the osd.0 should die ceph osd tree | grep down | grep osd.0 || return 1 } diff --git a/qa/tasks/cbt.py b/qa/tasks/cbt.py index ae2ae40f992..31614cf349e 100644 --- a/qa/tasks/cbt.py +++ b/qa/tasks/cbt.py @@ -102,7 +102,7 @@ class CBT(Task): if benchmark_type == 'cosbench': # install cosbench - self.log.info('install dependecies for cosbench') + self.log.info('install dependencies for cosbench') if system_type == 'rpm': cosbench_depends = ['wget', 'unzip', 'java-1.7.0-openjdk', 'curl'] else: @@ -119,7 +119,7 @@ class CBT(Task): args=['sudo', 'apt-get', '-y', 'purge', 'openjdk-11*']) # use our own version of cosbench cosbench_version = 'cosbench-0.4.2.c3.1' - # contains additonal parameter "-N" to nc + # contains additional parameter "-N" to nc cosbench_location = 'http://drop.ceph.com/qa/cosbench-0.4.2.c3.1.zip' cosbench_dir = os.path.join(testdir, cosbench_version) self.ctx.cluster.run(args=['mkdir', '-p', '-m0755', '--', cosbench_dir]) diff --git a/qa/tasks/ceph.py b/qa/tasks/ceph.py index ce9a243269f..a55a69d8571 100644 --- a/qa/tasks/ceph.py +++ b/qa/tasks/ceph.py @@ -257,7 +257,7 @@ def assign_devs(roles, devs): @contextlib.contextmanager def valgrind_post(ctx, config): """ - After the tests run, look throught all the valgrind logs. Exceptions are raised + After the tests run, look through all the valgrind logs. Exceptions are raised if textual errors occurred in the logs, or if valgrind exceptions were detected in the logs. @@ -390,7 +390,7 @@ def cluster(ctx, config): Create directories needed for the cluster. Create remote journals for all osds. Create and set keyring. - Copy the monmap to tht test systems. + Copy the monmap to the test systems. Setup mon nodes. Setup mds nodes. Mkfs osd nodes. @@ -936,7 +936,7 @@ def cluster(ctx, config): def first_in_ceph_log(pattern, excludes): """ - Find the first occurence of the pattern specified in the Ceph log, + Find the first occurrence of the pattern specified in the Ceph log, Returns None if none found. :param pattern: Pattern scanned for. @@ -1060,7 +1060,7 @@ def osd_scrub_pgs(ctx, config): First make sure all pgs are active and clean. Next scrub all osds. Then periodically check until all pgs have scrub time stamps that - indicate the last scrub completed. Time out if no progess is made + indicate the last scrub completed. Time out if no progress is made here after two minutes. """ retries = 40 @@ -1116,7 +1116,7 @@ def osd_scrub_pgs(ctx, config): if gap_cnt % 6 == 0: for (pgid, tmval) in timez: # re-request scrub every so often in case the earlier - # request was missed. do not do it everytime because + # request was missed. do not do it every time because # the scrub may be in progress or not reported yet and # we will starve progress. manager.raw_cluster_cmd('pg', 'deep-scrub', pgid) diff --git a/qa/tasks/ceph_manager.py b/qa/tasks/ceph_manager.py index 7c8de372982..9638670a157 100644 --- a/qa/tasks/ceph_manager.py +++ b/qa/tasks/ceph_manager.py @@ -1464,7 +1464,7 @@ class CephManager: def wait_run_admin_socket(self, service_type, service_id, args=['version'], timeout=75, stdout=None): """ - If osd_admin_socket call suceeds, return. Otherwise wait + If osd_admin_socket call succeeds, return. Otherwise wait five seconds and try again. """ if stdout is None: diff --git a/qa/tasks/cephfs/test_misc.py b/qa/tasks/cephfs/test_misc.py index b72c92578fa..b14c86ec970 100644 --- a/qa/tasks/cephfs/test_misc.py +++ b/qa/tasks/cephfs/test_misc.py @@ -16,7 +16,7 @@ class TestMisc(CephFSTestCase): def test_getattr_caps(self): """ Check if MDS recognizes the 'mask' parameter of open request. - The paramter allows client to request caps when opening file + The parameter allows client to request caps when opening file """ if not isinstance(self.mount_a, FuseMount): @@ -34,7 +34,7 @@ class TestMisc(CephFSTestCase): p = self.mount_a.open_background("testfile") self.mount_b.wait_for_visible("testfile") - # this tiggers a lookup request and an open request. The debug + # this triggers a lookup request and an open request. The debug # code will check if lookup/open reply contains xattrs self.mount_b.run_shell(["cat", "testfile"]) diff --git a/qa/tasks/dump_stuck.py b/qa/tasks/dump_stuck.py index 39429d2c348..c5209d749a0 100644 --- a/qa/tasks/dump_stuck.py +++ b/qa/tasks/dump_stuck.py @@ -13,7 +13,7 @@ log = logging.getLogger(__name__) def check_stuck(manager, num_inactive, num_unclean, num_stale, timeout=10): """ - Do checks. Make sure get_stuck_pgs return the right amout of information, then + Do checks. Make sure get_stuck_pgs return the right amount of information, then extract health information from the raw_cluster_cmd and compare the results with values passed in. This passes if all asserts pass. diff --git a/qa/tasks/filestore_idempotent.py b/qa/tasks/filestore_idempotent.py index e091148da43..75c38a04f46 100644 --- a/qa/tasks/filestore_idempotent.py +++ b/qa/tasks/filestore_idempotent.py @@ -13,7 +13,7 @@ def task(ctx, config): """ Test filestore/filejournal handling of non-idempotent events. - Currently this is a kludge; we require the ceph task preceeds us just + Currently this is a kludge; we require the ceph task precedes us just so that we get the tarball installed to run the test binary. :param ctx: Context diff --git a/qa/tasks/qemu.py b/qa/tasks/qemu.py index 7a1abe8f5e9..d6f256f7c7b 100644 --- a/qa/tasks/qemu.py +++ b/qa/tasks/qemu.py @@ -272,7 +272,7 @@ def _setup_nfs_mount(remote, client, mount_dir): """ Sets up an nfs mount on the remote that the guest can use to store logs. This nfs mount is also used to touch a file - at the end of the test to indiciate if the test was successful + at the end of the test to indicate if the test was successful or not. """ export_dir = "/export/{client}".format(client=client) diff --git a/qa/tasks/scrub.py b/qa/tasks/scrub.py index 9800d1e98a5..a6194c2c7ce 100644 --- a/qa/tasks/scrub.py +++ b/qa/tasks/scrub.py @@ -63,7 +63,7 @@ def task(ctx, config): class Scrubber: """ - Scrubbing is actually performed during initialzation + Scrubbing is actually performed during initialization """ def __init__(self, manager, config): """ diff --git a/qa/tasks/systemd.py b/qa/tasks/systemd.py index 50471db31ea..d5e7b255dfd 100644 --- a/qa/tasks/systemd.py +++ b/qa/tasks/systemd.py @@ -45,7 +45,7 @@ def task(ctx, config): r = remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), 'grep', 'ceph'], stdout=StringIO()) if r.stdout.getvalue().find('Active: inactive'): - log.info("Sucessfully stopped all ceph services") + log.info("Successfully stopped all ceph services") else: log.info("Failed to stop ceph services") @@ -55,7 +55,7 @@ def task(ctx, config): stdout=StringIO()) log.info(r.stdout.getvalue()) if r.stdout.getvalue().find('Active: active'): - log.info("Sucessfully started all Ceph services") + log.info("Successfully started all Ceph services") else: log.info("info", "Failed to start Ceph services") r = remote.run(args=['sudo', 'ps', '-eaf', run.Raw('|'), @@ -83,7 +83,7 @@ def task(ctx, config): stdout=StringIO(), check_status=False) log.info(r.stdout.getvalue()) if r.stdout.getvalue().find('Active: inactive'): - log.info("Sucessfully stopped single osd ceph service") + log.info("Successfully stopped single osd ceph service") else: log.info("Failed to stop ceph osd services") remote.run(args=['sudo', 'systemctl', 'start', @@ -96,7 +96,7 @@ def task(ctx, config): r = remote.run(args=['sudo', 'systemctl', 'status', mon_name], stdout=StringIO(), check_status=False) if r.stdout.getvalue().find('Active: inactive'): - log.info("Sucessfully stopped single mon ceph service") + log.info("Successfully stopped single mon ceph service") else: log.info("Failed to stop ceph mon service") remote.run(args=['sudo', 'systemctl', 'start', mon_name]) @@ -108,7 +108,7 @@ def task(ctx, config): r = remote.run(args=['sudo', 'systemctl', 'status', mgr_name], stdout=StringIO(), check_status=False) if r.stdout.getvalue().find('Active: inactive'): - log.info("Sucessfully stopped single ceph mgr service") + log.info("Successfully stopped single ceph mgr service") else: log.info("Failed to stop ceph mgr service") remote.run(args=['sudo', 'systemctl', 'start', mgr_name]) @@ -120,7 +120,7 @@ def task(ctx, config): r = remote.run(args=['sudo', 'systemctl', 'status', mds_name], stdout=StringIO(), check_status=False) if r.stdout.getvalue().find('Active: inactive'): - log.info("Sucessfully stopped single ceph mds service") + log.info("Successfully stopped single ceph mds service") else: log.info("Failed to stop ceph mds service") remote.run(args=['sudo', 'systemctl', 'start', mds_name]) diff --git a/qa/workunits/cephtool/test.sh b/qa/workunits/cephtool/test.sh index cdbe55b3e9d..2367beaef3f 100755 --- a/qa/workunits/cephtool/test.sh +++ b/qa/workunits/cephtool/test.sh @@ -275,7 +275,7 @@ function test_mon_injectargs_IEC() # actually expect IEC units to be passed. # Keep in mind that all integer based options that are based on bytes # (i.e., INT, LONG, U32, U64) will accept IEC unit modifiers, as well as SI - # unit modifiers (for backwards compatibility and convinience) and be parsed + # unit modifiers (for backwards compatibility and convenience) and be parsed # to base 2. initial_value=$(get_config_value_or_die "mon.a" "mon_data_size_warn") $SUDO ceph daemon mon.a config set mon_data_size_warn 15000000000 diff --git a/qa/workunits/fs/misc/filelock_interrupt.py b/qa/workunits/fs/misc/filelock_interrupt.py index 2a413a66e83..7b5b3e7d855 100755 --- a/qa/workunits/fs/misc/filelock_interrupt.py +++ b/qa/workunits/fs/misc/filelock_interrupt.py @@ -24,7 +24,7 @@ def main(): fcntl.flock(f1, fcntl.LOCK_SH | fcntl.LOCK_NB) """ - is flock interruptable? + is flock interruptible? """ signal.signal(signal.SIGALRM, handler) signal.alarm(5) @@ -52,7 +52,7 @@ def main(): fcntl.fcntl(f2, fcntl.F_OFD_SETLK, lockdata) """ - is poxis lock interruptable? + is posix lock interruptible? """ signal.signal(signal.SIGALRM, handler) signal.alarm(5) diff --git a/qa/workunits/fs/snaps/snaptest-parents.sh b/qa/workunits/fs/snaps/snaptest-parents.sh index 3e9b85c51a1..a66a977fd57 100755 --- a/qa/workunits/fs/snaps/snaptest-parents.sh +++ b/qa/workunits/fs/snaps/snaptest-parents.sh @@ -35,7 +35,7 @@ echo "comparing snapshots and new tree" dir1=`find 1/ | wc -w` dir2=`find 2/.snap/barsnap2/a/b/c | wc -w` #diff $dir1 $dir2 && echo "Success!" -test $dir1==$dir2 && echo "Sucess!" +test $dir1==$dir2 && echo "Success!" rmdir 1/.snap/* rmdir 2/.snap/* echo "OK" diff --git a/qa/workunits/mon/osd.sh b/qa/workunits/mon/osd.sh index 75bf220f6bc..535d6c13791 100755 --- a/qa/workunits/mon/osd.sh +++ b/qa/workunits/mon/osd.sh @@ -5,7 +5,7 @@ set -e ua=`uuidgen` ub=`uuidgen` -# shoudl get same id with same uuid +# should get same id with same uuid na=`ceph osd create $ua` test $na -eq `ceph osd create $ua` diff --git a/qa/workunits/rados/test_rados_tool.sh b/qa/workunits/rados/test_rados_tool.sh index 429a7d34211..2536f88d54f 100755 --- a/qa/workunits/rados/test_rados_tool.sh +++ b/qa/workunits/rados/test_rados_tool.sh @@ -43,7 +43,7 @@ run_expect_succ() { run_expect_nosignal() { echo "RUN_EXPECT_NOSIGNAL: " "$@" do_run "$@" - [ $? -ge 128 ] && die "expected succes or fail, but got signal! cmd: $@" + [ $? -ge 128 ] && die "expected success or fail, but got signal! cmd: $@" } run() { diff --git a/qa/workunits/rgw/s3_utilities.pm b/qa/workunits/rgw/s3_utilities.pm index 8492dd328dc..12e6af0ad65 100644 --- a/qa/workunits/rgw/s3_utilities.pm +++ b/qa/workunits/rgw/s3_utilities.pm @@ -199,7 +199,7 @@ sub delete_keys { (($bucket->delete_key($_[0])) and return 0) or return 1; } -# Readd the file back to bucket +# Read the file back to bucket sub readd_file { system("dd if=/dev/zero of=/tmp/10MBfile1 bs=10485760 count=1"); $mytestfilename1 = '10MBfile1'; -- 2.39.5