From: Casey Bodley Date: Tue, 23 Apr 2019 19:40:01 +0000 (-0400) Subject: rgw: beast handle_connection() takes io_context X-Git-Tag: v15.1.0~2792^2~1 X-Git-Url: http://git-server-git.apps.pok.os.sepia.ceph.com/?a=commitdiff_plain;h=064f142746ae97f54865069cdacf5aae2b1b14f6;p=ceph.git rgw: beast handle_connection() takes io_context as of boost 1.70, the socket no longer has a get_io_context(), so we have to pass it in as an argument Signed-off-by: Casey Bodley --- diff --git a/src/rgw/rgw_asio_frontend.cc b/src/rgw/rgw_asio_frontend.cc index e4be074ec74..4af3695dc1c 100644 --- a/src/rgw/rgw_asio_frontend.cc +++ b/src/rgw/rgw_asio_frontend.cc @@ -81,7 +81,8 @@ class StreamIO : public rgw::asio::ClientIO { using SharedMutex = ceph::async::SharedMutex; template -void handle_connection(RGWProcessEnv& env, Stream& stream, +void handle_connection(boost::asio::io_context& context, + RGWProcessEnv& env, Stream& stream, boost::beast::flat_buffer& buffer, bool is_ssl, SharedMutex& pause_mutex, rgw::dmclock::Scheduler *scheduler, @@ -152,7 +153,7 @@ void handle_connection(RGWProcessEnv& env, Stream& stream, rgw::io::add_conlen_controlling( &real_client)))); RGWRestfulIO client(cct, &real_client_io); - auto y = optional_yield{socket.get_io_context(), yield}; + auto y = optional_yield{context, yield}; process_request(env.store, env.rest, &req, env.uri_prefix, *env.auth_registry, &client, env.olog, y, scheduler); } @@ -560,7 +561,7 @@ void AsioFrontend::accept(Listener& l, boost::system::error_code ec) return; } buffer.consume(bytes); - handle_connection(env, stream, buffer, true, pause_mutex, + handle_connection(context, env, stream, buffer, true, pause_mutex, scheduler.get(), ec, yield); if (!ec) { // ssl shutdown (ignoring errors) @@ -578,7 +579,7 @@ void AsioFrontend::accept(Listener& l, boost::system::error_code ec) auto c = connections.add(conn); boost::beast::flat_buffer buffer; boost::system::error_code ec; - handle_connection(env, s, buffer, false, pause_mutex, + handle_connection(context, env, s, buffer, false, pause_mutex, scheduler.get(), ec, yield); s.shutdown(tcp::socket::shutdown_both, ec); });