rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
sock_release(con->sock);
con->sock = NULL;
+ clear_bit(SOCK_CLOSED, &con->state);
return rc;
}
dout("close %p peer %u.%u.%u.%u:%u\n", con,
IPQUADPORT(con->peer_addr.ipaddr));
set_bit(CLOSED, &con->state); /* in case there's queued work */
+ clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
reset_connection(con);
queue_con(con);
}
struct ceph_connection *con)
{
int len = strlen(CEPH_BANNER);
+ unsigned global_seq = get_global_seq(con->msgr, 0);
- dout("prepare_write_connect %p\n", con);
+ dout("prepare_write_connect %p connect_seq=%d global_seq=%d\n", con,
+ con->connect_seq, global_seq);
con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
- con->out_connect.global_seq =
- cpu_to_le32(get_global_seq(con->msgr, 0));
+ con->out_connect.global_seq = cpu_to_le32(global_seq);
con->out_connect.flags = 0;
if (test_bit(LOSSYTX, &con->state))
con->out_connect.flags = CEPH_MSG_CONNECT_LOSSY;
/* Tell ceph about it. */
pr_info("reset on %s%d\n", ENTITY_NAME(con->peer_name));
- con->ops->peer_reset(con);
+ if (con->ops->peer_reset)
+ con->ops->peer_reset(con);
break;
case CEPH_MSGR_TAG_RETRY_SESSION:
while (middle_len > 0 && (!m->middle ||
m->middle->vec.iov_len < middle_len)) {
if (m->middle == NULL) {
- BUG_ON(!con->ops->alloc_middle);
- ret = con->ops->alloc_middle(con, m);
+ ret = -EOPNOTSUPP;
+ if (con->ops->alloc_middle)
+ ret = con->ops->alloc_middle(con, m);
if (ret < 0) {
dout("alloc_middle failed, skipping payload\n");
con->in_base_pos = -middle_len - data_len
con->in_msg_pos.data_pos = 0;
/* find pages for data payload */
want = calc_pages_for(data_off & ~PAGE_MASK, data_len);
- ret = 0;
- BUG_ON(!con->ops->prepare_pages);
- ret = con->ops->prepare_pages(con, m, want);
+ ret = -1;
+ if (con->ops->prepare_pages)
+ ret = con->ops->prepare_pages(con, m, want);
if (ret < 0) {
dout("%p prepare_pages failed, skipping payload\n", m);
con->in_base_pos = -data_len - sizeof(m->footer);