Performance tests on high-end machines have indicated the Linux autotuning
of the receive buffer sizes can cause throughput collapse. See bug
#2100, and this email discussion:
http://marc.info/?l=ceph-devel&m=
133009796706284&w=2
Initially default to 0, which leaves us with the default. We may adjust
the default in the future.
Tested-by: Jim Schutt <jaschut@sandia.gov>
Signed-off-by: Sage Weil <sage@inktank.com>
Reviewed-by: Greg Farnum <greg@inktank.com>
OPTION(perf, OPT_BOOL, true) // enable internal perf counters
OPTION(ms_tcp_nodelay, OPT_BOOL, true)
+OPTION(ms_tcp_rcvbuf, OPT_INT, 0)
OPTION(ms_initial_backoff, OPT_DOUBLE, .2)
OPTION(ms_max_backoff, OPT_DOUBLE, 15.0)
OPTION(ms_nocrc, OPT_BOOL, false)
ldout(msgr->cct,0) << "couldn't set TCP_NODELAY: " << cpp_strerror(r) << dendl;
}
}
+ if (msgr->cct->_conf->ms_tcp_rcvbuf) {
+ int size = msgr->cct->_conf->ms_tcp_rcvbuf;
+ int r = ::setsockopt(sd, SOL_SOCKET, SO_RCVBUF, (void*)&size, sizeof(size));
+ if (r < 0) {
+ r = -errno;
+ ldout(msgr->cct,0) << "couldn't set SO_RCVBUF to " << size << ": " << cpp_strerror(r) << dendl;
+ }
+ }
}
int Pipe::connect()