From: Gal Salomon Date: Tue, 30 Jul 2024 13:15:39 +0000 (+0300) Subject: identation X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=41ca7ab87dfec89bf4e543eec4b8e50ff67edbc4;p=ceph.git identation Signed-off-by: Gal Salomon --- diff --git a/src/rgw/rgw_s3select.cc b/src/rgw/rgw_s3select.cc index a89139d3c0e7..e2e88dfae309 100644 --- a/src/rgw/rgw_s3select.cc +++ b/src/rgw/rgw_s3select.cc @@ -344,7 +344,7 @@ RGWSelectObj_ObjStore_S3::~RGWSelectObj_ObjStore_S3() int RGWSelectObj_ObjStore_S3::get_params(optional_yield y) { - if(m_s3select_query.empty() == false) { + if (m_s3select_query.empty() == false) { return 0; } #ifndef _ARROW_EXIST @@ -416,18 +416,17 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_csv(const char* query, const char* if (output_escape_char.size()) { csv.output_escape_char = *output_escape_char.c_str(); } - if(output_quote_fields.compare("ALWAYS") == 0) { + if (output_quote_fields.compare("ALWAYS") == 0) { csv.quote_fields_always = true; - } else if(output_quote_fields.compare("ASNEEDED") == 0) { + } else if (output_quote_fields.compare("ASNEEDED") == 0) { csv.quote_fields_asneeded = true; } - if(m_header_info.compare("IGNORE")==0) { + if (m_header_info.compare("IGNORE")==0) { csv.ignore_header_info=true; - } else if(m_header_info.compare("USE")==0) { + } else if (m_header_info.compare("USE")==0) { csv.use_header_info=true; } - - if(m_outputFormat == OutputFormat::JSON) { + if (m_outputFormat == OutputFormat::JSON) { csv.output_json_format = true; } m_s3_csv_object.set_csv_query(&s3select_syntax, csv); @@ -482,7 +481,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_parquet(const char* query) //parsing the SQL statement. s3select_syntax.parse_query(m_sql_query.c_str()); parquet_object::csv_definitions parquet; - if(m_outputFormat == OutputFormat::JSON) { + if (m_outputFormat == OutputFormat::JSON) { parquet.output_json_format = true; } @@ -548,7 +547,7 @@ int RGWSelectObj_ObjStore_S3::run_s3select_on_json(const char* query, const char return -EINVAL; } - if(m_outputFormat == OutputFormat::JSON) { + if (m_outputFormat == OutputFormat::JSON) { json.output_json_format = true; } @@ -749,7 +748,7 @@ void RGWSelectObj_ObjStore_S3::execute(optional_yield y) m_aws_response_handler.set(s, this, fp_chunked_transfer_encoding); } - if(s->cct->_conf->rgw_disable_s3select == true) + if (s->cct->_conf->rgw_disable_s3select == true) { std::string error_msg="s3select : is disabled by rgw_disable_s3select configuration parameter"; ldpp_dout(this, 10) << error_msg << dendl; @@ -781,7 +780,7 @@ void RGWSelectObj_ObjStore_S3::execute(optional_yield y) m_requested_range = (m_end_scan_sz - m_start_scan_sz); - if(m_is_trino_request){ + if (m_is_trino_request){ // fetch more than requested(m_scan_offset), that additional bytes are scanned for end of row, // thus the additional length will be processed, and no broken row for Trino. // assumption: row is smaller than m_scan_offset. (a different approach is to request for additional range) @@ -828,7 +827,7 @@ void RGWSelectObj_ObjStore_S3::shape_chunk_per_trino_requests(const char* it_cp, //the purpose is to return "perfect" results, with no broken or missing lines. off_t new_offset = 0; - if(m_scan_range_ind){//only upon range-scan + if (m_scan_range_ind){//only upon range-scan int64_t sc=0; int64_t start =0; const char* row_delimiter = m_row_delimiter.c_str(); @@ -836,10 +835,10 @@ void RGWSelectObj_ObjStore_S3::shape_chunk_per_trino_requests(const char* it_cp, ldpp_dout(this, 10) << "s3select query: per Trino request the first and last chunk should modified." << dendl; //chop the head of the first chunk and only upon the slice does not include the head of the object. - if(m_start_scan_sz && (m_aws_response_handler.get_processed_size()==0)){ + if (m_start_scan_sz && (m_aws_response_handler.get_processed_size()==0)){ char* p = const_cast(it_cp+ofs); while(strncmp(row_delimiter,p,1) && (p - (it_cp+ofs)) < len)p++; - if(!strncmp(row_delimiter,p,1)){ + if (!strncmp(row_delimiter,p,1)){ new_offset += (p - (it_cp+ofs))+1; } } @@ -850,14 +849,14 @@ void RGWSelectObj_ObjStore_S3::shape_chunk_per_trino_requests(const char* it_cp, //chop the end of the last chunk for this request //if it's the last chunk, search for first row-delimiter for the following different use-cases - if((m_aws_response_handler.get_processed_size()+len) >= m_requested_range){ + if ((m_aws_response_handler.get_processed_size()+len) >= m_requested_range){ //had pass the requested range, start to search for first delimiter - if(m_aws_response_handler.get_processed_size()>m_requested_range){ + if (m_aws_response_handler.get_processed_size()>m_requested_range){ //the previous chunk contain the complete request(all data) and an extra bytes. //thus, search for the first row-delimiter //[:previous (RR) ... ][:current (RD) ] start = 0; - } else if(m_aws_response_handler.get_processed_size()){ + } else if (m_aws_response_handler.get_processed_size()){ //the *current* chunk contain the complete request in the middle of the chunk. //thus, search for the first row-delimiter after the complete request position //[:current (RR) .... (RD) ] @@ -871,7 +870,7 @@ void RGWSelectObj_ObjStore_S3::shape_chunk_per_trino_requests(const char* it_cp, for(sc=start;sc(it_cp) + ofs + sc; - if(!strncmp(row_delimiter,p,1)){ + if (!strncmp(row_delimiter,p,1)){ ldout(s->cct, 10) << "S3select: found row-delimiter on " << sc << " get_processed_size = " << m_aws_response_handler.get_processed_size() << dendl; len = sc + 1;//+1 is for delimiter. TODO what about m_object_size_for_processing (to update according to len) //the end of row exist in current chunk. @@ -891,7 +890,7 @@ void RGWSelectObj_ObjStore_S3::shape_chunk_per_trino_requests(const char* it_cp, int RGWSelectObj_ObjStore_S3::csv_processing(bufferlist& bl, off_t ofs, off_t len) { int status = 0; - if(m_skip_next_chunk == true){ + if (m_skip_next_chunk == true){ return status; } @@ -913,13 +912,13 @@ int RGWSelectObj_ObjStore_S3::csv_processing(bufferlist& bl, off_t ofs, off_t le } - if(ofs > it.length()){ + if (ofs > it.length()){ //safety check ldpp_dout(this, 10) << "offset and length may cause invalid read: ofs = " << ofs << " len = " << len << " it.length() = " << it.length() << dendl; ofs = 0; } - if(m_is_trino_request){ + if (m_is_trino_request){ //TODO replace len with it.length() ? ; test Trino flow with compressed objects. //is it possible to send get-by-ranges? in parallel? shape_chunk_per_trino_requests(&(it)[0], ofs, len); @@ -983,7 +982,7 @@ int RGWSelectObj_ObjStore_S3::json_processing(bufferlist& bl, off_t ofs, off_t l continue; } - if((ofs + len) > it.length()){ + if ((ofs + len) > it.length()){ ldpp_dout(this, 10) << "s3select: offset and length may cause invalid read: ofs = " << ofs << " len = " << len << " it.length() = " << it.length() << dendl; ofs = 0; len = it.length();