or globally via the `rgw_bucket_logging_obj_roll_time` configuration option. If not set, the default time is 5 minutes.
Adding a log object to the log bucket is done "lazily", meaning, that if no more records are written to the object, it may
remain outside of the log bucket even after the configured time has passed.
+To counter that, you can flush all logging objects on a given source bucket to log them,
+regardless if enough time passed or if no more records are written to the object.
Standard
````````
| ``404`` | NoSuchBucket | The bucket does not exist |
+---------------+-----------------------+----------------------------------------------------------+
+Flush Bucket Logging
+--------------------
+
+Flushes all logging objects for a given source bucket (logging bucket are written lazily).
+
+Syntax
+~~~~~~
+
+::
+
+ POST /{bucket}?logging HTTP/1.1
+
+
+HTTP Response
+~~~~~~~~~~~~~
+
++---------------+-----------------------+----------------------------------------------------------+
+| HTTP Status | Status Code | Description |
++===============+=======================+==========================================================+
+| ``201`` | Created | Flushed all logging objects successfully |
++---------------+-----------------------+----------------------------------------------------------+
+| ``404`` | NoSuchBucket | The bucket does not exist |
++---------------+-----------------------+----------------------------------------------------------+
+
--- /dev/null
+import boto3
+import sys
+
+
+if len(sys.argv) == 2:
+ # bucket name as first argument
+ bucketname = sys.argv[1]
+else:
+ print('Usage: ' + sys.argv[0] + ' <bucket>')
+ sys.exit(1)
+
+# endpoint and keys from vstart
+endpoint = 'http://127.0.0.1:8000/'+bucketname
+access_key='0555b35654ad1656d804'
+secret_key='h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q=='
+
+client = boto3.client('s3',
+ endpoint_url=endpoint,
+ aws_access_key_id=access_key,
+ aws_secret_access_key=secret_key)
+
+# flushing the logs for bucket logging
+print(client.post_bucket_logging(Bucket=bucketname))
"documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#delete-notification",
"documentation":"<p>Deletes the notification configuration from the bucket.</p>"
},
+ "PostBucketLogging":{
+ "name":"PostBucketLogging",
+ "http":{
+ "method":"POST",
+ "requestUri":"/{Bucket}?logging",
+ "responseCode":201
+ },
+ "input":{"shape":"PostBucketLoggingRequest"},
+ "documentationUrl":"https://docs.ceph.com/docs/master/radosgw/s3/bucketops/#post-bucket-logging",
+ "documentation":"<p>Flushes the logging objects of the buckets.</p>"
+ },
"GetUsageStats":{
"name":"GetUsageStats",
"http":{
}
}
},
+ "PostBucketLoggingRequest":{
+ "type":"structure",
+ "required":["Bucket"],
+ "members":{
+ "Bucket":{
+ "shape":"BucketName",
+ "documentation":"<p>Name of the bucket to flush its logging objects.</p>",
+ "location":"uri",
+ "locationName":"Bucket"
+ }
+ }
+ },
"FilterRule":{
"type":"structure",
"members":{
<< "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
return -ret;
}
- cerr << "flushed pending logging object '" << obj_name
+ cout << "flushed pending logging object '" << obj_name
<< "' to target bucket '" << configuration.target_bucket << "'" << std::endl;
return 0;
}
{ "s3:PutBucketCORS", s3PutBucketCORS },
{ "s3:PutBucketEncryption", s3PutBucketEncryption },
{ "s3:PutBucketLogging", s3PutBucketLogging },
+ { "s3:PostBucketLogging", s3PostBucketLogging },
{ "s3:PutBucketNotification", s3PutBucketNotification },
{ "s3:PutBucketOwnershipControls", s3PutBucketOwnershipControls },
{ "s3:PutBucketPolicy", s3PutBucketPolicy },
case s3PutBucketLogging:
return "s3:PutBucketLogging";
+ case s3PostBucketLogging:
+ return "s3:PostBucketLogging";
+
case s3GetBucketTagging:
return "s3:GetBucketTagging";
s3PutBucketNotification,
s3GetBucketLogging,
s3PutBucketLogging,
+ s3PostBucketLogging,
s3GetBucketTagging,
s3PutBucketTagging,
s3GetBucketWebsite,
case s3PutBucketCORS:
case s3PutBucketEncryption:
case s3PutBucketLogging:
+ case s3PostBucketLogging:
case s3PutBucketNotification:
case s3PutBucketPolicy:
case s3PutBucketRequestPayment:
RGW_OP_DETACH_GROUP_POLICY,
RGW_OP_LIST_ATTACHED_GROUP_POLICIES,
RGW_OP_PUT_BUCKET_LOGGING,
+ RGW_OP_POST_BUCKET_LOGGING,
/* rgw specific */
RGW_OP_ADMIN_SET_METADATA,
RGW_OP_GET_OBJ_LAYOUT,
}
};
+// Post /<bucket name>/?logging
+// actual configuration is XML encoded in the body of the message
+class RGWPostBucketLoggingOp : public RGWDefaultResponseOp {
+ int verify_permission(optional_yield y) override {
+ auto [has_s3_existing_tag, has_s3_resource_tag] = rgw_check_policy_condition(this, s, false);
+ if (has_s3_resource_tag)
+ rgw_iam_add_buckettags(this, s);
+
+ if (!verify_bucket_permission(this, s, rgw::IAM::s3PostBucketLogging)) {
+ return -EACCES;
+ }
+
+ return 0;
+ }
+
+ const char* name() const override { return "post_bucket_logging"; }
+ RGWOpType get_type() override { return RGW_OP_POST_BUCKET_LOGGING; }
+ uint32_t op_mask() override { return RGW_OP_TYPE_WRITE; }
+
+ void execute(optional_yield y) override {
+ op_ret = verify_bucket_logging_params(this, s);
+ if (op_ret < 0) {
+ return;
+ }
+
+ std::unique_ptr<rgw::sal::Bucket> bucket;
+ op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, s->bucket_name),
+ &bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get bucket '" << s->bucket_name << "', ret = " << op_ret << dendl;
+ return;
+ }
+ const auto& bucket_attrs = bucket->get_attrs();
+ auto iter = bucket_attrs.find(RGW_ATTR_BUCKET_LOGGING);
+ if (iter == bucket_attrs.end()) {
+ ldpp_dout(this, 1) << "WARNING: no logging configured on bucket" << dendl;
+ return;
+ }
+ rgw::bucketlogging::configuration configuration;
+ try {
+ configuration.enabled = true;
+ decode(configuration, iter->second);
+ } catch (buffer::error& err) {
+ ldpp_dout(this, 1) << "ERROR: failed to decode logging attribute '" << RGW_ATTR_BUCKET_LOGGING
+ << "'. error: " << err.what() << dendl;
+ op_ret = -EINVAL;
+ return;
+ }
+
+ std::unique_ptr<rgw::sal::Bucket> target_bucket;
+ op_ret = driver->load_bucket(this, rgw_bucket(s->bucket_tenant, configuration.target_bucket),
+ &target_bucket, y);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get target bucket '" << configuration.target_bucket << "', ret = " << op_ret << dendl;
+ return;
+ }
+ std::string obj_name;
+ RGWObjVersionTracker objv_tracker;
+ op_ret = target_bucket->get_logging_object_name(obj_name, configuration.target_prefix, null_yield, this, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to get pending logging object name from target bucket '" << configuration.target_bucket << "'" << dendl;
+ return;
+ }
+ op_ret = rgw::bucketlogging::rollover_logging_object(configuration, target_bucket, obj_name, this, null_yield, true, &objv_tracker);
+ if (op_ret < 0) {
+ ldpp_dout(this, 1) << "ERROR: failed to flush pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << dendl;
+ return;
+ }
+ ldpp_dout(this, 20) << "flushed pending logging object '" << obj_name
+ << "' to target bucket '" << configuration.target_bucket << "'" << dendl;
+ }
+};
+
+RGWOp* RGWHandler_REST_BucketLogging_S3::create_post_op() {
+ return new RGWPostBucketLoggingOp();
+}
+
RGWOp* RGWHandler_REST_BucketLogging_S3::create_put_op() {
return new RGWPutBucketLoggingOp();
}
virtual ~RGWHandler_REST_BucketLogging_S3() = default;
static RGWOp* create_get_op();
static RGWOp* create_put_op();
+ static RGWOp* create_post_op();
};
return new RGWDeleteMultiObj_ObjStore_S3;
}
+ if (s->info.args.exists("logging")) {
+ return RGWHandler_REST_BucketLogging_S3::create_post_op();
+ }
+
if (s->info.args.exists("mdsearch")) {
if (!s->cct->_conf->rgw_enable_mdsearch) {
return NULL;
case RGW_OP_GET_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_DELETE_BUCKET_PUBLIC_ACCESS_BLOCK:
case RGW_OP_GET_OBJ://s3select its post-method(payload contain the query) , the request is get-object
- case RGW_OP_PUT_BUCKET_LOGGING:
+ case RGW_OP_PUT_BUCKET_LOGGING:
+ case RGW_OP_POST_BUCKET_LOGGING:
case RGW_OP_GET_BUCKET_LOGGING:
break;
default: