--- /dev/null
+Copyright (c) 2017 Red Hat, Inc.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
--- /dev/null
+#!/bin/sh
+set -e
+
+if [ -f /etc/debian_version ]; then
+ for package in python-pip python-virtualenv python-dev python-six libevent-dev libxml2-dev libxslt-dev zlib1g-dev; do
+ if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then
+ # add a space after old values
+ missing="${missing:+$missing }$package"
+ fi
+ done
+ if [ -n "$missing" ]; then
+ echo "$0: missing required DEB packages. Installing via sudo." 1>&2
+ sudo apt-get -y install $missing
+ fi
+fi
+if [ -f /etc/redhat-release ]; then
+ for package in python-pip python-virtualenv python-devel python-six libevent-devel libxml2-devel libxslt-devel zlib-devel; do
+ if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then
+ missing="${missing:+$missing }$package"
+ fi
+ done
+ if [ -n "$missing" ]; then
+ echo "$0: missing required RPM packages. Installing via sudo." 1>&2
+ sudo yum -y install $missing
+ fi
+fi
+
+virtualenv --system-site-packages --distribute virtualenv
+
+# avoid pip bugs
+./virtualenv/bin/pip install --upgrade pip
+
+# work-around change in pip 1.5
+./virtualenv/bin/pip install six
+./virtualenv/bin/pip install -I nose
+./virtualenv/bin/pip install setuptools==32.3.1
+
+./virtualenv/bin/pip install -U -r requirements.txt
+
+# forbid setuptools from using the network because it'll try to use
+# easy_install, and we really wanted pip; next line will fail if pip
+# requirements.txt does not match setup.py requirements -- sucky but
+# good enough for now
+./virtualenv/bin/python setup.py develop
--- /dev/null
+[DEFAULT]
+
+[rados]
+
+ceph_conf = <path to ceph.conf>
+
+[rgw]
+## replace with e.g. "localhost" to run against local software
+host = <hostname>
+
+## uncomment the port to use something other than 80
+port = 8000
+
+## say "no" to disable TLS
+is_secure = no
+
+# bucket prefix
+bucket_prefix = ragweed
+
+[fixtures]
+
+[user regular]
+user_id = yehsad
+access_key = accesskey1
+secret_key = secret1
+
+[user system]
+access_key = accesskey2
+secret_key = secret2
+
--- /dev/null
+import sys
+import os
+import boto
+import boto.s3.connection
+import json
+import inspect
+import pickle
+import bunch
+import yaml
+import ConfigParser
+import rados
+from boto.s3.key import Key
+from nose.plugins.attrib import attr
+from nose.tools import eq_ as eq
+
+from .reqs import _make_admin_request
+
+ragweed_env = None
+suite = None
+
+class RGWConnection:
+ def __init__(self, access_key, secret_key, host, port, is_secure):
+ self.host = host
+ self.port = port
+ self.is_secure = is_secure
+ self.conn = boto.connect_s3(
+ aws_access_key_id = access_key,
+ aws_secret_access_key = secret_key,
+ host=host,
+ port=port,
+ is_secure=is_secure,
+ calling_format = boto.s3.connection.OrdinaryCallingFormat(),
+ )
+
+ def create_bucket(self, name):
+ return self.conn.create_bucket(name)
+
+ def get_bucket(self, name, validate=True):
+ return self.conn.get_bucket(name, validate=validate)
+
+
+class RGWRESTAdmin:
+ def __init__(self, connection):
+ self.conn = connection
+
+ def get_resource(self, path, params):
+ r = _make_admin_request(self.conn, "GET", path, params)
+ if r.status != 200:
+ raise boto.exception.S3ResponseError(r.status, r.reason)
+ return bunch.bunchify(json.loads(r.read()))
+
+
+ def read_meta_key(self, key):
+ return self.get_resource('/admin/metadata', {'key': key})
+
+ def get_bucket_entrypoint(self, bucket_name):
+ return self.read_meta_key('bucket:' + bucket_name)
+
+ def get_bucket_instance_info(self, bucket_name, bucket_id = None):
+ if not bucket_id:
+ ep = self.get_bucket_entrypoint(bucket_name)
+ print ep
+ bucket_id = ep.data.bucket.bucket_id
+ result = self.read_meta_key('bucket.instance:' + bucket_name + ":" + bucket_id)
+ return result.data.bucket_info
+
+ def get_obj_layout(self, key):
+ path = '/' + key.bucket.name + '/' + key.name
+ params = {'layout': None}
+ if key.version_id is not None:
+ params['versionId'] = key.version_id
+
+ print params
+
+ return self.get_resource(path, params)
+
+ def get_zone_params(self):
+ return self.get_resource('/admin/config', {'type': 'zone'})
+
+
+class RSuite:
+ def __init__(self, name, bucket_prefix, zone, suite_step):
+ self.name = name
+ self.bucket_prefix = bucket_prefix
+ self.zone = zone
+ self.config_bucket = None
+ self.rtests = []
+ self.do_preparing = False
+ self.do_check = False
+ for step in suite_step.split(','):
+ if step == 'prepare':
+ self.do_preparing = True
+ self.config_bucket = self.zone.create_raw_bucket(self.get_bucket_name('conf'))
+ if step == 'check' or step == 'test':
+ self.do_check = True
+ self.config_bucket = self.zone.get_raw_bucket(self.get_bucket_name('conf'))
+
+ def get_bucket_name(self, suffix):
+ return self.bucket_prefix + '-' + suffix
+
+ def register_test(self, t):
+ self.rtests.append(t)
+
+ def write_test_data(self, test):
+ k = Key(self.config_bucket)
+ k.key = 'tests/' + test._name
+ k.set_contents_from_string(test.to_json())
+
+ def read_test_data(self, test):
+ k = Key(self.config_bucket)
+ k.key = 'tests/' + test._name
+ s = k.get_contents_as_string()
+ print 'read_test_data=', s
+ test.from_json(s)
+
+ def is_preparing(self):
+ return self.do_preparing
+
+ def is_checking(self):
+ return self.do_check
+
+
+class RTestJSONSerialize(json.JSONEncoder):
+ def default(self, obj):
+ if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):
+ return JSONEncoder.default(self, obj)
+ return {'__pickle': pickle.dumps(obj)}
+
+def rtest_decode_json(d):
+ if '__pickle' in d:
+ return pickle.loads(str(d['__pickle']))
+ return d
+
+
+class RBucket:
+ def __init__(self, zone, bucket, bucket_info):
+ self.zone = zone
+ self.bucket = bucket
+ self.name = bucket.name
+ self.bucket_info = bucket_info
+
+ def get_data_pool(self):
+ try:
+ # old style explicit pool
+ explicit_pool = self.bucket_info.bucket.pool
+ except:
+ # new style explicit pool
+ explicit_pool = self.bucket_info.bucket.explicit_placement.data_pool
+ if explicit_pool is not None and explicit_pool != '':
+ return explicit_pool
+ return self.zone.get_placement_target(self.bucket_info.placement_rule).data_pool
+
+ def get_tail_pool(self, obj_layout):
+ try:
+ placement_rule = obj_layout.manifest.tail_placement.placement_rule
+ except:
+ placement_rule = ''
+ if placement_rule == '':
+ try:
+ # new style
+ return obj_layout.manifest.tail_placement.bucket.explicit_placement.data_pool
+ except:
+ pass
+
+ try:
+ # old style
+ return obj_layout.manifest.tail_bucket.pool
+ except:
+ pass
+
+ return self.zone.get_placement_target(placement_rule).data_pool
+
+class RZone:
+ def __init__(self, conn):
+ self.conn = conn
+
+ self.rgw_rest_admin = RGWRESTAdmin(self.conn.system)
+ self.zone_params = self.rgw_rest_admin.get_zone_params()
+
+ self.placement_targets = {}
+
+ for e in self.zone_params.placement_pools:
+ self.placement_targets[e.key] = e.val
+
+ print 'zone_params:', self.zone_params
+
+ def get_placement_target(self, placement_id):
+ plid = placement_id
+ if placement_id is None or placement_id == '':
+ print 'zone_params=', self.zone_params
+ plid = self.zone_params.default_placement
+
+ try:
+ return self.placement_targets[plid]
+ except:
+ pass
+
+ return None
+
+ def create_bucket(self, name):
+ bucket = self.create_raw_bucket(name)
+ bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
+ print 'bucket_info:', bucket_info
+ return RBucket(self, bucket, bucket_info)
+
+ def get_bucket(self, name):
+ bucket = self.get_raw_bucket(name)
+ bucket_info = self.rgw_rest_admin.get_bucket_instance_info(bucket.name)
+ print 'bucket_info:', bucket_info
+ return RBucket(self, bucket, bucket_info)
+
+ def create_raw_bucket(self, name):
+ return self.conn.regular.create_bucket(name)
+
+ def get_raw_bucket(self, name):
+ return self.conn.regular.get_bucket(name)
+
+ def refresh_rbucket(self, rbucket):
+ rbucket.bucket = self.get_raw_bucket(rbucket.bucket.name)
+ rbucket.bucket_info = self.rgw_rest_admin.get_bucket_instance_info(rbucket.bucket.name)
+
+
+class RTest:
+ def __init__(self):
+ self._name = self.__class__.__name__
+ self.r_buckets = []
+ self.init()
+
+ def create_bucket(self):
+ bid = len(self.r_buckets) + 1
+ rb = suite.zone.create_bucket(suite.get_bucket_name(self._name + '-' + str(bid)))
+ self.r_buckets.append(rb)
+
+ return rb
+
+ def get_buckets(self):
+ for rb in self.r_buckets:
+ yield rb
+
+ def init(self):
+ pass
+
+ def prepare(self):
+ pass
+
+ def check(self):
+ pass
+
+ def to_json(self):
+ attrs = {}
+ for x in dir(self):
+ if x.startswith('r_'):
+ attrs[x] = getattr(self, x)
+ return json.dumps(attrs, cls=RTestJSONSerialize)
+
+ def from_json(self, s):
+ j = json.loads(s, object_hook=rtest_decode_json)
+ for e in j:
+ setattr(self, e, j[e])
+
+ def save(self):
+ suite.write_test_data(self)
+
+ def load(self):
+ suite.read_test_data(self)
+ for rb in self.r_buckets:
+ suite.zone.refresh_rbucket(rb)
+
+ def test(self):
+ suite.register_test(self)
+ if suite.is_preparing():
+ self.prepare()
+ self.save()
+
+ if suite.is_checking():
+ self.load()
+ self.check()
+
+def read_config(fp):
+ config = bunch.Bunch()
+ g = yaml.safe_load_all(fp)
+ for new in g:
+ print bunch.bunchify(new)
+ config.update(bunch.bunchify(new))
+ return config
+
+str_config_opts = [
+ 'user_id',
+ 'access_key',
+ 'secret_key',
+ 'host',
+ 'ceph_conf',
+ 'bucket_prefix',
+ ]
+
+int_config_opts = [
+ 'port',
+ ]
+
+bool_config_opts = [
+ 'is_secure',
+ ]
+
+def dict_find(d, k):
+ if d.has_key(k):
+ return d[k]
+ return None
+
+class RagweedEnv:
+ def __init__(self):
+ self.config = bunch.Bunch()
+
+ cfg = ConfigParser.RawConfigParser()
+ try:
+ path = os.environ['RAGWEED_CONF']
+ except KeyError:
+ raise RuntimeError(
+ 'To run tests, point environment '
+ + 'variable RAGWEED_CONF to a config file.',
+ )
+ with file(path) as f:
+ cfg.readfp(f)
+
+ for section in cfg.sections():
+ try:
+ (section_type, name) = section.split(None, 1)
+ if not self.config.has_key(section_type):
+ self.config[section_type] = bunch.Bunch()
+ self.config[section_type][name] = bunch.Bunch()
+ cur = self.config[section_type]
+ except ValueError:
+ section_type = ''
+ name = section
+ self.config[name] = bunch.Bunch()
+ cur = self.config
+
+ cur[name] = bunch.Bunch()
+
+ for var in str_config_opts:
+ try:
+ cur[name][var] = cfg.get(section, var)
+ except ConfigParser.NoOptionError:
+ pass
+
+ for var in int_config_opts:
+ try:
+ cur[name][var] = cfg.getint(section, var)
+ except ConfigParser.NoOptionError:
+ pass
+
+ for var in bool_config_opts:
+ try:
+ cur[name][var] = cfg.getboolean(section, var)
+ except ConfigParser.NoOptionError:
+ pass
+
+ print json.dumps(self.config)
+
+ rgw_conf = self.config.rgw
+
+ try:
+ self.bucket_prefix = rgw_conf.bucket_prefix
+ except:
+ self.bucket_prefix = 'ragweed'
+
+ conn = bunch.Bunch()
+ for (k, u) in self.config.user.iteritems():
+ conn[k] = RGWConnection(u.access_key, u.secret_key, rgw_conf.host, dict_find(rgw_conf, 'port'), dict_find(rgw_conf, 'is_secure'))
+
+ self.zone = RZone(conn)
+ self.suite = RSuite('ragweed', self.bucket_prefix, self.zone, os.environ['RAGWEED_STAGES'])
+
+ try:
+ self.ceph_conf = self.config.rados.ceph_conf
+ except:
+ raise RuntimeError(
+ 'ceph_conf is missing under the [rados] section in ' + os.environ['RAGWEED_CONF']
+ )
+
+ self.rados = rados.Rados(conffile=self.ceph_conf)
+ self.rados.connect()
+
+ pools = self.rados.list_pools()
+
+ for pool in pools:
+ print "rados pool>", pool
+
+def setup_module():
+ global ragweed_env
+ global suite
+
+ ragweed_env = RagweedEnv()
+ suite = ragweed_env.suite
--- /dev/null
+import boto.s3.connection
+from httplib import HTTPConnection, HTTPSConnection
+from urlparse import urlparse
+import urllib
+
+def _make_admin_request(conn, method, path, query_dict=None, body=None, response_headers=None, request_headers=None, expires_in=100000, path_style=True, timeout=None):
+ """
+ issue a request for a specified method, on a specified path
+ with a specified (optional) body (encrypted per the connection), and
+ return the response (status, reason).
+ """
+
+ query = ''
+ if query_dict is not None:
+ query = urllib.urlencode(query_dict)
+
+ (bucket_str, key_str) = path.split('/', 2)[1:]
+ bucket = conn.get_bucket(bucket_str, validate=False)
+ key = bucket.get_key(key_str, validate=False)
+
+ urlobj = None
+ if key is not None:
+ urlobj = key
+ elif bucket is not None:
+ urlobj = bucket
+ else:
+ raise RuntimeError('Unable to find bucket name')
+ url = urlobj.generate_url(expires_in, method=method, response_headers=response_headers, headers=request_headers)
+ o = urlparse(url)
+ req_path = o.path + '?' + o.query + '&' + query
+
+ return _make_raw_request(host=conn.host, port=conn.port, method=method, path=req_path, body=body, request_headers=request_headers, secure=conn.is_secure, timeout=timeout)
+
+def _make_request(method, bucket, key, body=None, authenticated=False, response_headers=None, request_headers=None, expires_in=100000, path_style=True, timeout=None):
+ """
+ issue a request for a specified method, on a specified <bucket,key>,
+ with a specified (optional) body (encrypted per the connection), and
+ return the response (status, reason).
+
+ If key is None, then this will be treated as a bucket-level request.
+
+ If the request or response headers are None, then default values will be
+ provided by later methods.
+ """
+ if not path_style:
+ conn = bucket.connection
+ request_headers['Host'] = conn.calling_format.build_host(conn.server_name(), bucket.name)
+
+ if authenticated:
+ urlobj = None
+ if key is not None:
+ urlobj = key
+ elif bucket is not None:
+ urlobj = bucket
+ else:
+ raise RuntimeError('Unable to find bucket name')
+ url = urlobj.generate_url(expires_in, method=method, response_headers=response_headers, headers=request_headers)
+ o = urlparse(url)
+ path = o.path + '?' + o.query
+ else:
+ bucketobj = None
+ if key is not None:
+ path = '/{obj}'.format(obj=key.name)
+ bucketobj = key.bucket
+ elif bucket is not None:
+ path = '/'
+ bucketobj = bucket
+ else:
+ raise RuntimeError('Unable to find bucket name')
+ if path_style:
+ path = '/{bucket}'.format(bucket=bucketobj.name) + path
+
+ return _make_raw_request(host=s3.main.host, port=s3.main.port, method=method, path=path, body=body, request_headers=request_headers, secure=s3.main.is_secure, timeout=timeout)
+
+def _make_bucket_request(method, bucket, body=None, authenticated=False, response_headers=None, request_headers=None, expires_in=100000, path_style=True, timeout=None):
+ """
+ issue a request for a specified method, on a specified <bucket>,
+ with a specified (optional) body (encrypted per the connection), and
+ return the response (status, reason)
+ """
+ return _make_request(method=method, bucket=bucket, key=None, body=body, authenticated=authenticated, response_headers=response_headers, request_headers=request_headers, expires_in=expires_in, path_style=path_style, timeout=timeout)
+
+def _make_raw_request(host, port, method, path, body=None, request_headers=None, secure=False, timeout=None):
+ """
+ issue a request to a specific host & port, for a specified method, on a
+ specified path with a specified (optional) body (encrypted per the
+ connection), and return the response (status, reason).
+
+ This allows construction of special cases not covered by the bucket/key to
+ URL mapping of _make_request/_make_bucket_request.
+ """
+ if secure:
+ class_ = HTTPSConnection
+ else:
+ class_ = HTTPConnection
+
+ if request_headers is None:
+ request_headers = {}
+
+ c = class_(host, port, strict=True, timeout=timeout)
+
+ # TODO: We might have to modify this in future if we need to interact with
+ # how httplib.request handles Accept-Encoding and Host.
+ c.request(method, path, body=body, headers=request_headers)
+
+ res = c.getresponse()
+ #c.close()
+
+ return res
+
+
--- /dev/null
+from cStringIO import StringIO
+import ragweed.framework
+import binascii
+import string
+import random
+
+
+from ragweed.framework import *
+
+class obj_placement:
+ def __init__(self, pool, oid, loc):
+ self.pool = pool
+ self.oid = oid
+ self.loc = loc
+
+def rgwa():
+ return ragweed.framework.ragweed_env.zone.rgw_rest_admin
+
+def get_pool_ioctx(pool_name):
+ return ragweed.framework.ragweed_env.rados.open_ioctx(pool_name)
+
+
+def get_placement(obj_json):
+ try:
+ return obj_placement(obj_json.pool, obj_json.oid, obj_json.loc)
+ except:
+ oid = obj_json.bucket.marker + '_' + obj_json.object
+ key = ''
+ if obj_json.key != '':
+ key = obj_json.bucket.marker + '_' + obj_json.key
+ return obj_placement(obj_json.bucket.pool, oid, key)
+
+def validate_obj_location(rbucket, obj):
+ expected_head_pool = rbucket.get_data_pool()
+ head_pool_ioctx = get_pool_ioctx(expected_head_pool)
+ print 'expected head pool: ' + expected_head_pool
+
+ obj_layout = rgwa().get_obj_layout(obj)
+
+ print 'layout', obj_layout
+
+ print 'head', obj_layout.head
+ expected_tail_pool = rbucket.get_tail_pool(obj_layout)
+ tail_pool_ioctx = get_pool_ioctx(expected_tail_pool)
+
+ head_placement = get_placement(obj_layout.head)
+
+ eq(head_placement.pool, expected_head_pool)
+
+ # check rados object for head exists
+ head_pool_ioctx.set_locator_key(head_placement.loc)
+ (size, mtime) = head_pool_ioctx.stat(head_placement.oid)
+
+ print 'head size:', size, 'mtime:', mtime
+
+ # check tail
+ for o in obj_layout.data_location:
+ print 'o=', o
+ print 'ofs=', o.ofs, 'loc', o.loc
+ placement = get_placement(o.loc)
+ if o.ofs > 0 or placement.oid != head_placement.oid:
+ eq(placement.pool, expected_tail_pool)
+
+ # validate rados object exists
+ tail_pool_ioctx.set_locator_key(placement.loc)
+ (size, mtime) = tail_pool_ioctx.stat(placement.oid)
+
+ eq(size, o.loc_ofs + o.loc_size)
+
+
+def validate_obj(rbucket, obj_name, expected_crc):
+ b = rbucket.bucket
+
+ obj = b.get_key(obj_name)
+
+ validate_obj_location(rbucket, obj)
+ crc = binascii.crc32(obj.get_contents_as_string())
+ obj_crc = '{:#010x}'.format(crc)
+ print 'read crc: ' + obj_crc
+ eq(obj_crc, expected_crc)
+
+
+def gen_rand_string(size, chars=string.ascii_uppercase + string.digits):
+ return ''.join(random.choice(chars) for _ in range(size))
+
+# prepare:
+# create objects in multiple sizes, with various names
+# check:
+# verify data correctness
+# verify that objects were written to the expected data pool
+class r_test_small_obj_data(RTest):
+ def prepare(self):
+ self.r_obj_names = [ 'obj', '_', '__', '_ _' ]
+ self.r_bucket_sizes = {}
+
+ sizes = { 0, 512 * 1024, 1024 * 1024 }
+
+ for size in sizes:
+ rbucket = self.create_bucket()
+ self.r_bucket_sizes[rbucket.name] = size
+ data = '0' * size
+ for n in self.r_obj_names:
+ obj = Key(rbucket.bucket)
+ obj.key = n;
+ obj.set_contents_from_string(data)
+
+ def check(self):
+ print self.r_obj_names
+ for rbucket in self.get_buckets():
+ size = self.r_bucket_sizes[rbucket.name]
+ data = '0' * int(size)
+
+ for n in self.r_obj_names:
+ obj = Key(rbucket.bucket)
+ obj.key = n;
+ obj_data = obj.get_contents_as_string()
+ eq(data, obj_data)
+
+ validate_obj_location(rbucket, obj)
+
+class MultipartUploaderState:
+ def __init__(self, mu):
+ self.upload_id = mu.mp.id
+ self.crc = mu.crc
+ self.cur_part = mu.cur_part
+
+
+class MultipartUploader:
+ def __init__(self, rbucket, obj_name, size, part_size, state=None):
+ self.rbucket = rbucket
+ self.obj_name = obj_name
+ self.size = size
+ self.part_size = part_size
+ self.crc = 0
+ self.cur_part = 0
+
+ if state is not None:
+ self.crc = state.crc
+ self.cur_part = state.cur_part
+
+ for upload in rbucket.bucket.list_multipart_uploads():
+ if upload.key_name == self.obj_name and upload.id == state.upload_id:
+ self.mp = upload
+
+ self.num_full_parts = self.size / self.part_size
+ self.last_part_size = self.size % self.part_size
+
+
+ def prepare(self):
+ self.mp = self.rbucket.bucket.initiate_multipart_upload(self.obj_name)
+ self.crc = 0
+ self.cur_part = 0
+
+ def upload(self):
+ if self.cur_part > self.num_full_parts:
+ return False
+
+ if self.cur_part < self.num_full_parts:
+ payload=gen_rand_string(self.part_size / (1024 * 1024)) * 1024 * 1024
+
+ self.mp.upload_part_from_file(StringIO(payload), self.cur_part + 1)
+ self.crc = binascii.crc32(payload, self.crc)
+ self.cur_part += 1
+
+ return True
+
+
+ if self.last_part_size > 0:
+ last_payload='1'*self.last_part_size
+
+ self.mp.upload_part_from_file(StringIO(last_payload), self.num_full_parts + 1)
+ self.crc = binascii.crc32(last_payload, self.crc)
+ self.cur_part += 1
+
+ return False
+
+ def upload_all(self):
+ while self.upload():
+ pass
+
+ def complete(self):
+ self.mp.complete_upload()
+
+ def hexdigest(self):
+ return '{:#010x}'.format(self.crc)
+
+ def get_state(self):
+ return MultipartUploaderState(self)
+
+
+
+# prepare:
+# init, upload, and complete a multipart object
+# check:
+# verify data correctness
+# verify that object layout is correct
+class r_test_multipart_simple(RTest):
+ def init(self):
+ self.obj_size = 18 * 1024 * 1024
+ self.part_size = 5 * 1024 * 1024
+
+ def prepare(self):
+ rb = self.create_bucket()
+ self.r_obj = 'foo'
+
+ uploader = MultipartUploader(rb, self.r_obj, self.obj_size, self.part_size)
+
+ uploader.prepare()
+ uploader.upload_all()
+ uploader.complete()
+
+ self.r_crc = uploader.hexdigest()
+ print 'written crc: ' + self.r_crc
+
+ def check(self):
+ for rb in self.get_buckets():
+ break
+
+ k = rb.bucket.get_key(self.r_obj)
+ eq(k.size, self.obj_size)
+
+ validate_obj(rb, self.r_obj, self.r_crc)
+
+
+# prepare:
+# init, upload multipart object
+# check:
+# complete multipart
+# verify data correctness
+# verify that object layout is correct
+class r_test_multipart_defer_complete(RTest):
+ def init(self):
+ self.obj_size = 18 * 1024 * 1024
+ self.part_size = 5 * 1024 * 1024
+
+ def prepare(self):
+ rb = self.create_bucket()
+ self.r_obj = 'foo'
+
+ uploader = MultipartUploader(rb, self.r_obj, self.obj_size, self.part_size)
+
+ uploader.prepare()
+ uploader.upload_all()
+
+ self.r_upload_state = uploader.get_state()
+
+
+ def check(self):
+ for rb in self.get_buckets():
+ break
+
+ uploader = MultipartUploader(rb, self.r_obj, self.obj_size, self.part_size,
+ state=self.r_upload_state)
+
+ uploader.complete()
+ crc = uploader.hexdigest()
+ print 'written crc: ' + crc
+
+ k = rb.bucket.get_key(self.r_obj)
+ eq(k.size, self.obj_size)
+
+ validate_obj(rb, self.r_obj, crc)
+
+
+# prepare:
+# init, upload multipart object
+# check:
+# complete multipart
+# verify data correctness
+# verify that object layout is correct
+class r_test_multipart_defer_update_complete(RTest):
+ def init(self):
+ self.obj_size = 18 * 1024 * 1024
+ self.part_size = 5 * 1024 * 1024
+
+ def prepare(self):
+ rb = self.create_bucket()
+ self.r_obj = 'foo'
+
+ uploader = MultipartUploader(rb, self.r_obj, self.obj_size, self.part_size)
+
+ uploader.prepare()
+ ret = uploader.upload() # only upload one part
+ eq(ret, True)
+
+ self.r_upload_state = uploader.get_state()
+
+
+ def check(self):
+ for rb in self.get_buckets():
+ break
+
+ uploader = MultipartUploader(rb, self.r_obj, self.obj_size, self.part_size,
+ state=self.r_upload_state)
+
+ uploader.upload_all() # upload remaining
+ uploader.complete()
+ crc = uploader.hexdigest()
+ print 'written crc: ' + crc
+
+ k = rb.bucket.get_key(self.r_obj)
+ eq(k.size, self.obj_size)
+
+ validate_obj(rb, self.r_obj, crc)
+
--- /dev/null
+PyYAML
+nose >=1.0.0
+boto >=2.6.0, != 2.46.0
+bunch >=1.0.0
+gevent >=1.0
+httplib2
+lxml
--- /dev/null
+#!/usr/bin/python
+from setuptools import setup, find_packages
+
+setup(
+ name='ragweed',
+ version='0.0.1',
+ packages=find_packages(),
+
+ author='Yehuda Sadeh',
+ author_email='yehuda@redhat.com',
+ description='A test suite for ceph rgw',
+ license='MIT',
+ keywords='ceph rgw testing',
+
+ install_requires=[
+ 'boto >=2.0b4',
+ 'PyYAML',
+ 'bunch >=1.0.0',
+ 'gevent >=1.0',
+ 'isodate >=0.4.4',
+ ],
+
+ #entry_points={
+ # 'console_scripts': [
+ # 'ragweed = ragweed:main',
+ # ],
+ # },
+
+ )