From: Casey Bodley Date: Fri, 10 May 2019 18:40:17 +0000 (-0400) Subject: qa/rgw: remove ceph-ansible from s3a-hadoop suite X-Git-Tag: v14.2.3~137^2~2 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=32b6df730f73834395bfbfce04af967d3ccb5194;p=ceph.git qa/rgw: remove ceph-ansible from s3a-hadoop suite Fixes: http://tracker.ceph.com/issues/39706 Signed-off-by: Casey Bodley (cherry picked from commit 0fc2c8ecee2b6233292b9fd1325347fd0fdf9171) Conflicts: qa/tasks/s3a_hadoop.py - nautilus lacks the "if hadoop_ver == 'trunk'" conditional block --- diff --git a/qa/suites/rgw/hadoop-s3a/clusters/.qa b/qa/suites/rgw/hadoop-s3a/clusters/.qa new file mode 120000 index 000000000000..a602a0353e75 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/clusters/.qa @@ -0,0 +1 @@ +../.qa/ \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml b/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml new file mode 120000 index 000000000000..230ff0fdab41 --- /dev/null +++ b/qa/suites/rgw/hadoop-s3a/clusters/fixed-2.yaml @@ -0,0 +1 @@ +.qa/clusters/fixed-2.yaml \ No newline at end of file diff --git a/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml b/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml index 2e156e4d5d8a..d9bf9f28e2a6 100644 --- a/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml +++ b/qa/suites/rgw/hadoop-s3a/s3a-hadoop.yaml @@ -1,35 +1,8 @@ -machine_type: ovh -openstack: -- volumes: # attached to each instance - count: 3 - size: 20 # GB -overrides: - ceph_ansible: - vars: - ceph_conf_overrides: - global: - osd default pool size: 2 - osd pool default pg num: 8 - osd pool default pgp num: 8 - debug rgw: 20 - debug ms: 1 - ceph_test: true - journal_collocation: true - osd_auto_discovery: false - journal_size: 1024 - ceph_stable_release: luminous - osd_scenario: collocated - ceph_origin: repository - ceph_repository: dev -roles: -- [mon.a, osd.0, osd.1, osd.2] -- [osd.3, osd.4, osd.5] -- [osd.6, osd.7, osd.8] -- [mon.b, mgr.x, rgw.0] - tasks: +- install: +- ceph: - ssh-keys: -- ceph-ansible: -- dnsmasq: - rgw.0: [s3.ceph.com] +- rgw: + client.0: - s3a-hadoop: + role: client.0 diff --git a/qa/tasks/s3a_hadoop.py b/qa/tasks/s3a_hadoop.py index 364c8bed99a1..d4c61a2b6b33 100644 --- a/qa/tasks/s3a_hadoop.py +++ b/qa/tasks/s3a_hadoop.py @@ -20,7 +20,7 @@ def task(ctx, config): bucket-name: 's3atest' (default) access-key: 'anykey' (uses a default value) secret-key: 'secretkey' ( uses a default value) - dnsmasq-name: 's3.ceph.com' + role: client.0 """ if config is None: config = {} @@ -28,19 +28,23 @@ def task(ctx, config): assert isinstance(config, dict), \ "task only supports a dictionary for configuration" + assert hasattr(ctx, 'rgw'), 's3a-hadoop must run after the rgw task' + overrides = ctx.config.get('overrides', {}) misc.deep_merge(config, overrides.get('s3a-hadoop', {})) testdir = misc.get_testdir(ctx) - rgws = ctx.cluster.only(misc.is_type('rgw')) - # use the first rgw node to test s3a - rgw_node = rgws.remotes.keys()[0] + + role = config.get('role') + (remote,) = ctx.cluster.only(role).remotes.keys() + endpoint = ctx.rgw.role_endpoints.get(role) + assert endpoint, 's3tests: no rgw endpoint for {}'.format(role) + # get versions maven_major = config.get('maven-major', 'maven-3') maven_version = config.get('maven-version', '3.3.9') hadoop_ver = config.get('hadoop-version', '2.7.3') bucket_name = config.get('bucket-name', 's3atest') access_key = config.get('access-key', 'EGAQRD2ULOIFKFSKCT4F') - dnsmasq_name = config.get('dnsmasq-name', 's3.ceph.com') secret_key = config.get( 'secret-key', 'zi816w1vZKfaSM85Cl0BxXTwSLyN7zB4RbTswrGb') @@ -52,8 +56,8 @@ def task(ctx, config): '{maven_major}/{maven_version}/binaries/'.format(maven_major=maven_major, maven_version=maven_version) + apache_maven hadoop_git = 'https://github.com/apache/hadoop' hadoop_rel = 'hadoop-{ver} rel/release-{ver}'.format(ver=hadoop_ver) - install_prereq(rgw_node) - rgw_node.run( + install_prereq(remote) + remote.run( args=[ 'cd', testdir, @@ -78,9 +82,8 @@ def task(ctx, config): run.Raw(hadoop_rel) ] ) - configure_s3a(rgw_node, dnsmasq_name, access_key, secret_key, bucket_name, testdir) - fix_rgw_config(rgw_node, dnsmasq_name) - setup_user_bucket(rgw_node, dnsmasq_name, access_key, secret_key, bucket_name, testdir) + configure_s3a(remote, endpoint.hostname, access_key, secret_key, bucket_name, testdir) + setup_user_bucket(remote, endpoint.hostname, access_key, secret_key, bucket_name, testdir) if hadoop_ver.startswith('2.8'): # test all ITtests but skip AWS test using public bucket landsat-pds # which is not available from within this test @@ -90,12 +93,12 @@ def task(ctx, config): else: test_options = 'test -Dtest=S3a*,TestS3A*' try: - run_s3atest(rgw_node, maven_version, testdir, test_options) + run_s3atest(remote, maven_version, testdir, test_options) yield finally: log.info("Done s3a testing, Cleaning up") for fil in ['apache*', 'hadoop*', 'venv*', 'create*']: - rgw_node.run(args=['rm', run.Raw('-rf'), run.Raw('{tdir}/{file}'.format(tdir=testdir, file=fil))]) + remote.run(args=['rm', run.Raw('-rf'), run.Raw('{tdir}/{file}'.format(tdir=testdir, file=fil))]) def install_prereq(client): @@ -118,41 +121,6 @@ def install_prereq(client): ) -def fix_rgw_config(client, name): - """ - Fix RGW config in ceph.conf, we need rgw dns name entry - and also modify the port to use :80 for s3a tests to work - """ - rgw_dns_name = 'rgw dns name = {name}'.format(name=name) - ceph_conf_path = '/etc/ceph/ceph.conf' - # append rgw_dns_name - client.run( - args=[ - 'sudo', - 'sed', - run.Raw('-i'), - run.Raw("'/client.rgw*/a {rgw_name}'".format(rgw_name=rgw_dns_name)), - ceph_conf_path - - ] - ) - # listen on port 80 - client.run( - args=[ - 'sudo', - 'sed', - run.Raw('-i'), - run.Raw('s/:8080/:80/'), - ceph_conf_path - ] - ) - client.run(args=['cat', ceph_conf_path]) - client.run(args=['sudo', 'systemctl', 'restart', 'ceph-radosgw.target']) - client.run(args=['sudo', 'systemctl', 'status', 'ceph-radosgw.target']) - # sleep for daemon to be completely up before creating admin user - time.sleep(10) - - def setup_user_bucket(client, dns_name, access_key, secret_key, bucket_name, testdir): """ Create user with access_key and secret_key that will be