The 5GB limit of a single operation uploading was part of S3 spec.
However some private setups may have some special requirements
on this limit. It's more convinent to have a configurable value.
Fixes: #6999
Signed-off-by: Yuan Zhou <yuan.zhou@intel.com>
OPTION(rgw_max_chunk_size, OPT_INT, 512 * 1024)
+OPTION(rgw_max_put_size, OPT_U64, 5ULL*1024*1024*1024)
/**
* override max bucket index shards in zone configuration (if not zero)
#define RGW_BUCKETS_OBJ_SUFFIX ".buckets"
#define RGW_MAX_PENDING_CHUNKS 16
-#define RGW_MAX_PUT_SIZE (5ULL*1024*1024*1024)
#define RGW_MIN_MULTIPART_SIZE (5ULL*1024*1024)
#define RGW_FORMAT_PLAIN 0
{
if (s->length) {
off_t len = atoll(s->length);
- if (len > (off_t)RGW_MAX_PUT_SIZE) {
+ if (len > (off_t)(s->cct->_conf->rgw_max_put_size)) {
return -ERR_TOO_LARGE;
}
}
bl.append(bp, 0, len);
}
- if ((uint64_t)ofs + len > RGW_MAX_PUT_SIZE) {
+ if ((uint64_t)ofs + len > s->cct->_conf->rgw_max_put_size) {
return -ERR_TOO_LARGE;
}
return -ERR_LENGTH_REQUIRED;
}
off_t len = atoll(s->length);
- if (len > (off_t)RGW_MAX_PUT_SIZE) {
+ if (len > (off_t)(s->cct->_conf->rgw_max_put_size)) {
return -ERR_TOO_LARGE;
}