1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
15 #include "messenger.h"
20 * Ceph uses the messenger to exchange ceph_msg messages with other
21 * hosts in the system. The messenger provides ordered and reliable
22 * delivery. We tolerate TCP disconnects by reconnecting (with
23 * exponential backoff) in the case of a fault (disconnection, bad
24 * crc, protocol error). Acks allow sent messages to be discarded by
28 /* static tag bytes (protocol control messages) */
29 static char tag_msg = CEPH_MSGR_TAG_MSG;
30 static char tag_ack = CEPH_MSGR_TAG_ACK;
31 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
34 static struct lock_class_key socket_class;
38 static void queue_con(struct ceph_connection *con);
39 static void con_work(struct work_struct *);
40 static void ceph_fault(struct ceph_connection *con);
43 * nicely render a sockaddr as a string.
45 #define MAX_ADDR_STR 20
46 static char addr_str[MAX_ADDR_STR][40];
47 static DEFINE_SPINLOCK(addr_str_lock);
48 static int last_addr_str;
50 const char *pr_addr(const struct sockaddr_storage *ss)
54 struct sockaddr_in *in4 = (void *)ss;
55 unsigned char *quad = (void *)&in4->sin_addr.s_addr;
56 struct sockaddr_in6 *in6 = (void *)ss;
58 spin_lock(&addr_str_lock);
60 if (last_addr_str == MAX_ADDR_STR)
62 spin_unlock(&addr_str_lock);
65 switch (ss->ss_family) {
67 sprintf(s, "%u.%u.%u.%u:%u",
68 (unsigned int)quad[0],
69 (unsigned int)quad[1],
70 (unsigned int)quad[2],
71 (unsigned int)quad[3],
72 (unsigned int)ntohs(in4->sin_port));
76 sprintf(s, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%u",
77 in6->sin6_addr.s6_addr16[0],
78 in6->sin6_addr.s6_addr16[1],
79 in6->sin6_addr.s6_addr16[2],
80 in6->sin6_addr.s6_addr16[3],
81 in6->sin6_addr.s6_addr16[4],
82 in6->sin6_addr.s6_addr16[5],
83 in6->sin6_addr.s6_addr16[6],
84 in6->sin6_addr.s6_addr16[7],
85 (unsigned int)ntohs(in6->sin6_port));
89 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
95 static void encode_my_addr(struct ceph_messenger *msgr)
97 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
98 ceph_encode_addr(&msgr->my_enc_addr);
102 * work queue for all reading and writing to/from the socket.
104 struct workqueue_struct *ceph_msgr_wq;
106 int __init ceph_msgr_init(void)
108 ceph_msgr_wq = create_workqueue("ceph-msgr");
109 if (IS_ERR(ceph_msgr_wq)) {
110 int ret = PTR_ERR(ceph_msgr_wq);
111 pr_err("msgr_init failed to create workqueue: %d\n", ret);
118 void ceph_msgr_exit(void)
120 destroy_workqueue(ceph_msgr_wq);
123 void ceph_msgr_flush()
125 flush_workqueue(ceph_msgr_wq);
130 * socket callback functions
133 /* data available on socket, or listen socket received a connect */
134 static void ceph_data_ready(struct sock *sk, int count_unused)
136 struct ceph_connection *con =
137 (struct ceph_connection *)sk->sk_user_data;
138 if (sk->sk_state != TCP_CLOSE_WAIT) {
139 dout("ceph_data_ready on %p state = %lu, queueing work\n",
145 /* socket has buffer space for writing */
146 static void ceph_write_space(struct sock *sk)
148 struct ceph_connection *con =
149 (struct ceph_connection *)sk->sk_user_data;
151 /* only queue to workqueue if there is data we want to write. */
152 if (test_bit(WRITE_PENDING, &con->state)) {
153 dout("ceph_write_space %p queueing write work\n", con);
156 dout("ceph_write_space %p nothing to write\n", con);
159 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
160 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
163 /* socket's state has changed */
164 static void ceph_state_change(struct sock *sk)
166 struct ceph_connection *con =
167 (struct ceph_connection *)sk->sk_user_data;
169 dout("ceph_state_change %p state = %lu sk_state = %u\n",
170 con, con->state, sk->sk_state);
172 if (test_bit(CLOSED, &con->state))
175 switch (sk->sk_state) {
177 dout("ceph_state_change TCP_CLOSE\n");
179 dout("ceph_state_change TCP_CLOSE_WAIT\n");
180 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
181 if (test_bit(CONNECTING, &con->state))
182 con->error_msg = "connection failed";
184 con->error_msg = "socket closed";
188 case TCP_ESTABLISHED:
189 dout("ceph_state_change TCP_ESTABLISHED\n");
196 * set up socket callbacks
198 static void set_sock_callbacks(struct socket *sock,
199 struct ceph_connection *con)
201 struct sock *sk = sock->sk;
202 sk->sk_user_data = (void *)con;
203 sk->sk_data_ready = ceph_data_ready;
204 sk->sk_write_space = ceph_write_space;
205 sk->sk_state_change = ceph_state_change;
214 * initiate connection to a remote socket.
216 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
218 struct sockaddr *paddr = (struct sockaddr *)&con->peer_addr.in_addr;
223 ret = sock_create_kern(AF_INET, SOCK_STREAM, IPPROTO_TCP, &sock);
227 sock->sk->sk_allocation = GFP_NOFS;
229 #ifdef CONFIG_LOCKDEP
230 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
233 set_sock_callbacks(sock, con);
235 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
237 ret = sock->ops->connect(sock, paddr, sizeof(*paddr), O_NONBLOCK);
238 if (ret == -EINPROGRESS) {
239 dout("connect %s EINPROGRESS sk_state = %u\n",
240 pr_addr(&con->peer_addr.in_addr),
245 pr_err("connect %s error %d\n",
246 pr_addr(&con->peer_addr.in_addr), ret);
249 con->error_msg = "connect error";
257 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
259 struct kvec iov = {buf, len};
260 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
262 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
266 * write something. @more is true if caller will be sending more data
269 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
270 size_t kvlen, size_t len, int more)
272 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
275 msg.msg_flags |= MSG_MORE;
277 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
279 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
284 * Shutdown/close the socket for the given connection.
286 static int con_close_socket(struct ceph_connection *con)
290 dout("con_close_socket on %p sock %p\n", con, con->sock);
293 set_bit(SOCK_CLOSED, &con->state);
294 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
295 sock_release(con->sock);
297 clear_bit(SOCK_CLOSED, &con->state);
302 * Reset a connection. Discard all incoming and outgoing messages
303 * and clear *_seq state.
305 static void ceph_msg_remove(struct ceph_msg *msg)
307 list_del_init(&msg->list_head);
310 static void ceph_msg_remove_list(struct list_head *head)
312 while (!list_empty(head)) {
313 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
315 ceph_msg_remove(msg);
319 static void reset_connection(struct ceph_connection *con)
321 /* reset connection, out_queue, msg_ and connect_seq */
322 /* discard existing out_queue and msg_seq */
323 ceph_msg_remove_list(&con->out_queue);
324 ceph_msg_remove_list(&con->out_sent);
327 ceph_msg_put(con->in_msg);
331 con->connect_seq = 0;
334 ceph_msg_put(con->out_msg);
337 con->out_keepalive_pending = false;
339 con->in_seq_acked = 0;
343 * mark a peer down. drop any open connections.
345 void ceph_con_close(struct ceph_connection *con)
347 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
348 set_bit(CLOSED, &con->state); /* in case there's queued work */
349 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
350 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
351 clear_bit(KEEPALIVE_PENDING, &con->state);
352 clear_bit(WRITE_PENDING, &con->state);
353 mutex_lock(&con->mutex);
354 reset_connection(con);
355 con->peer_global_seq = 0;
356 cancel_delayed_work(&con->work);
357 mutex_unlock(&con->mutex);
362 * Reopen a closed connection, with a new peer address.
364 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
366 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
367 set_bit(OPENING, &con->state);
368 clear_bit(CLOSED, &con->state);
369 memcpy(&con->peer_addr, addr, sizeof(*addr));
370 con->delay = 0; /* reset backoff memory */
375 * return true if this connection ever successfully opened
377 bool ceph_con_opened(struct ceph_connection *con)
379 return con->connect_seq > 0;
385 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
387 dout("con_get %p nref = %d -> %d\n", con,
388 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
389 if (atomic_inc_not_zero(&con->nref))
394 void ceph_con_put(struct ceph_connection *con)
396 dout("con_put %p nref = %d -> %d\n", con,
397 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
398 BUG_ON(atomic_read(&con->nref) == 0);
399 if (atomic_dec_and_test(&con->nref)) {
406 * initialize a new connection.
408 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
410 dout("con_init %p\n", con);
411 memset(con, 0, sizeof(*con));
412 atomic_set(&con->nref, 1);
414 mutex_init(&con->mutex);
415 INIT_LIST_HEAD(&con->out_queue);
416 INIT_LIST_HEAD(&con->out_sent);
417 INIT_DELAYED_WORK(&con->work, con_work);
422 * We maintain a global counter to order connection attempts. Get
423 * a unique seq greater than @gt.
425 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
429 spin_lock(&msgr->global_seq_lock);
430 if (msgr->global_seq < gt)
431 msgr->global_seq = gt;
432 ret = ++msgr->global_seq;
433 spin_unlock(&msgr->global_seq_lock);
439 * Prepare footer for currently outgoing message, and finish things
440 * off. Assumes out_kvec* are already valid.. we just add on to the end.
442 static void prepare_write_message_footer(struct ceph_connection *con, int v)
444 struct ceph_msg *m = con->out_msg;
446 dout("prepare_write_message_footer %p\n", con);
447 con->out_kvec_is_msg = true;
448 con->out_kvec[v].iov_base = &m->footer;
449 con->out_kvec[v].iov_len = sizeof(m->footer);
450 con->out_kvec_bytes += sizeof(m->footer);
451 con->out_kvec_left++;
452 con->out_more = m->more_to_follow;
453 con->out_msg_done = true;
457 * Prepare headers for the next outgoing message.
459 static void prepare_write_message(struct ceph_connection *con)
464 con->out_kvec_bytes = 0;
465 con->out_kvec_is_msg = true;
466 con->out_msg_done = false;
468 /* Sneak an ack in there first? If we can get it into the same
469 * TCP packet that's a good thing. */
470 if (con->in_seq > con->in_seq_acked) {
471 con->in_seq_acked = con->in_seq;
472 con->out_kvec[v].iov_base = &tag_ack;
473 con->out_kvec[v++].iov_len = 1;
474 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
475 con->out_kvec[v].iov_base = &con->out_temp_ack;
476 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
477 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
480 m = list_first_entry(&con->out_queue,
481 struct ceph_msg, list_head);
483 if (test_bit(LOSSYTX, &con->state)) {
484 list_del_init(&m->list_head);
486 /* put message on sent list */
488 list_move_tail(&m->list_head, &con->out_sent);
492 * only assign outgoing seq # if we haven't sent this message
493 * yet. if it is requeued, resend with it's original seq.
495 if (m->needs_out_seq) {
496 m->hdr.seq = cpu_to_le64(++con->out_seq);
497 m->needs_out_seq = false;
500 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
501 m, con->out_seq, le16_to_cpu(m->hdr.type),
502 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
503 le32_to_cpu(m->hdr.data_len),
505 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
507 /* tag + hdr + front + middle */
508 con->out_kvec[v].iov_base = &tag_msg;
509 con->out_kvec[v++].iov_len = 1;
510 con->out_kvec[v].iov_base = &m->hdr;
511 con->out_kvec[v++].iov_len = sizeof(m->hdr);
512 con->out_kvec[v++] = m->front;
514 con->out_kvec[v++] = m->middle->vec;
515 con->out_kvec_left = v;
516 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
517 (m->middle ? m->middle->vec.iov_len : 0);
518 con->out_kvec_cur = con->out_kvec;
520 /* fill in crc (except data pages), footer */
521 con->out_msg->hdr.crc =
522 cpu_to_le32(crc32c(0, (void *)&m->hdr,
523 sizeof(m->hdr) - sizeof(m->hdr.crc)));
524 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
525 con->out_msg->footer.front_crc =
526 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
528 con->out_msg->footer.middle_crc =
529 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
530 m->middle->vec.iov_len));
532 con->out_msg->footer.middle_crc = 0;
533 con->out_msg->footer.data_crc = 0;
534 dout("prepare_write_message front_crc %u data_crc %u\n",
535 le32_to_cpu(con->out_msg->footer.front_crc),
536 le32_to_cpu(con->out_msg->footer.middle_crc));
538 /* is there a data payload? */
539 if (le32_to_cpu(m->hdr.data_len) > 0) {
540 /* initialize page iterator */
541 con->out_msg_pos.page = 0;
542 con->out_msg_pos.page_pos =
543 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
544 con->out_msg_pos.data_pos = 0;
545 con->out_msg_pos.did_page_crc = 0;
546 con->out_more = 1; /* data + footer will follow */
548 /* no, queue up footer too and be done */
549 prepare_write_message_footer(con, v);
552 set_bit(WRITE_PENDING, &con->state);
558 static void prepare_write_ack(struct ceph_connection *con)
560 dout("prepare_write_ack %p %llu -> %llu\n", con,
561 con->in_seq_acked, con->in_seq);
562 con->in_seq_acked = con->in_seq;
564 con->out_kvec[0].iov_base = &tag_ack;
565 con->out_kvec[0].iov_len = 1;
566 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
567 con->out_kvec[1].iov_base = &con->out_temp_ack;
568 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
569 con->out_kvec_left = 2;
570 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
571 con->out_kvec_cur = con->out_kvec;
572 con->out_more = 1; /* more will follow.. eventually.. */
573 set_bit(WRITE_PENDING, &con->state);
577 * Prepare to write keepalive byte.
579 static void prepare_write_keepalive(struct ceph_connection *con)
581 dout("prepare_write_keepalive %p\n", con);
582 con->out_kvec[0].iov_base = &tag_keepalive;
583 con->out_kvec[0].iov_len = 1;
584 con->out_kvec_left = 1;
585 con->out_kvec_bytes = 1;
586 con->out_kvec_cur = con->out_kvec;
587 set_bit(WRITE_PENDING, &con->state);
591 * Connection negotiation.
594 static void prepare_connect_authorizer(struct ceph_connection *con)
598 int auth_protocol = 0;
600 mutex_unlock(&con->mutex);
601 if (con->ops->get_authorizer)
602 con->ops->get_authorizer(con, &auth_buf, &auth_len,
603 &auth_protocol, &con->auth_reply_buf,
604 &con->auth_reply_buf_len,
606 mutex_lock(&con->mutex);
608 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
609 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
611 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
612 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
613 con->out_kvec_left++;
614 con->out_kvec_bytes += auth_len;
618 * We connected to a peer and are saying hello.
620 static void prepare_write_banner(struct ceph_messenger *msgr,
621 struct ceph_connection *con)
623 int len = strlen(CEPH_BANNER);
625 con->out_kvec[0].iov_base = CEPH_BANNER;
626 con->out_kvec[0].iov_len = len;
627 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
628 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
629 con->out_kvec_left = 2;
630 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
631 con->out_kvec_cur = con->out_kvec;
633 set_bit(WRITE_PENDING, &con->state);
636 static void prepare_write_connect(struct ceph_messenger *msgr,
637 struct ceph_connection *con,
640 unsigned global_seq = get_global_seq(con->msgr, 0);
643 switch (con->peer_name.type) {
644 case CEPH_ENTITY_TYPE_MON:
645 proto = CEPH_MONC_PROTOCOL;
647 case CEPH_ENTITY_TYPE_OSD:
648 proto = CEPH_OSDC_PROTOCOL;
650 case CEPH_ENTITY_TYPE_MDS:
651 proto = CEPH_MDSC_PROTOCOL;
657 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
658 con->connect_seq, global_seq, proto);
660 con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED_CLIENT);
661 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
662 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
663 con->out_connect.global_seq = cpu_to_le32(global_seq);
664 con->out_connect.protocol_version = cpu_to_le32(proto);
665 con->out_connect.flags = 0;
668 con->out_kvec_left = 0;
669 con->out_kvec_bytes = 0;
671 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
672 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
673 con->out_kvec_left++;
674 con->out_kvec_bytes += sizeof(con->out_connect);
675 con->out_kvec_cur = con->out_kvec;
677 set_bit(WRITE_PENDING, &con->state);
679 prepare_connect_authorizer(con);
684 * write as much of pending kvecs to the socket as we can.
686 * 0 -> socket full, but more to do
689 static int write_partial_kvec(struct ceph_connection *con)
693 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
694 while (con->out_kvec_bytes > 0) {
695 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
696 con->out_kvec_left, con->out_kvec_bytes,
700 con->out_kvec_bytes -= ret;
701 if (con->out_kvec_bytes == 0)
704 if (ret >= con->out_kvec_cur->iov_len) {
705 ret -= con->out_kvec_cur->iov_len;
707 con->out_kvec_left--;
709 con->out_kvec_cur->iov_len -= ret;
710 con->out_kvec_cur->iov_base += ret;
716 con->out_kvec_left = 0;
717 con->out_kvec_is_msg = false;
720 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
721 con->out_kvec_bytes, con->out_kvec_left, ret);
722 return ret; /* done! */
726 * Write as much message data payload as we can. If we finish, queue
728 * 1 -> done, footer is now queued in out_kvec[].
729 * 0 -> socket full, but more to do
732 static int write_partial_msg_pages(struct ceph_connection *con)
734 struct ceph_msg *msg = con->out_msg;
735 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
737 int crc = con->msgr->nocrc;
740 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
741 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
742 con->out_msg_pos.page_pos);
744 while (con->out_msg_pos.page < con->out_msg->nr_pages) {
745 struct page *page = NULL;
749 * if we are calculating the data crc (the default), we need
750 * to map the page. if our pages[] has been revoked, use the
754 page = msg->pages[con->out_msg_pos.page];
757 } else if (msg->pagelist) {
758 page = list_first_entry(&msg->pagelist->head,
763 page = con->msgr->zero_page;
765 kaddr = page_address(con->msgr->zero_page);
767 len = min((int)(PAGE_SIZE - con->out_msg_pos.page_pos),
768 (int)(data_len - con->out_msg_pos.data_pos));
769 if (crc && !con->out_msg_pos.did_page_crc) {
770 void *base = kaddr + con->out_msg_pos.page_pos;
771 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
773 BUG_ON(kaddr == NULL);
774 con->out_msg->footer.data_crc =
775 cpu_to_le32(crc32c(tmpcrc, base, len));
776 con->out_msg_pos.did_page_crc = 1;
779 ret = kernel_sendpage(con->sock, page,
780 con->out_msg_pos.page_pos, len,
781 MSG_DONTWAIT | MSG_NOSIGNAL |
784 if (crc && (msg->pages || msg->pagelist))
790 con->out_msg_pos.data_pos += ret;
791 con->out_msg_pos.page_pos += ret;
793 con->out_msg_pos.page_pos = 0;
794 con->out_msg_pos.page++;
795 con->out_msg_pos.did_page_crc = 0;
797 list_move_tail(&page->lru,
798 &msg->pagelist->head);
802 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
804 /* prepare and queue up footer, too */
806 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
807 con->out_kvec_bytes = 0;
808 con->out_kvec_left = 0;
809 con->out_kvec_cur = con->out_kvec;
810 prepare_write_message_footer(con, 0);
819 static int write_partial_skip(struct ceph_connection *con)
823 while (con->out_skip > 0) {
825 .iov_base = page_address(con->msgr->zero_page),
826 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
829 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
832 con->out_skip -= ret;
840 * Prepare to read connection handshake, or an ack.
842 static void prepare_read_banner(struct ceph_connection *con)
844 dout("prepare_read_banner %p\n", con);
845 con->in_base_pos = 0;
848 static void prepare_read_connect(struct ceph_connection *con)
850 dout("prepare_read_connect %p\n", con);
851 con->in_base_pos = 0;
854 static void prepare_read_ack(struct ceph_connection *con)
856 dout("prepare_read_ack %p\n", con);
857 con->in_base_pos = 0;
860 static void prepare_read_tag(struct ceph_connection *con)
862 dout("prepare_read_tag %p\n", con);
863 con->in_base_pos = 0;
864 con->in_tag = CEPH_MSGR_TAG_READY;
868 * Prepare to read a message.
870 static int prepare_read_message(struct ceph_connection *con)
872 dout("prepare_read_message %p\n", con);
873 BUG_ON(con->in_msg != NULL);
874 con->in_base_pos = 0;
875 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
880 static int read_partial(struct ceph_connection *con,
881 int *to, int size, void *object)
884 while (con->in_base_pos < *to) {
885 int left = *to - con->in_base_pos;
886 int have = size - left;
887 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
890 con->in_base_pos += ret;
897 * Read all or part of the connect-side handshake on a new connection
899 static int read_partial_banner(struct ceph_connection *con)
903 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
906 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
909 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
910 &con->actual_peer_addr);
913 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
914 &con->peer_addr_for_me);
921 static int read_partial_connect(struct ceph_connection *con)
925 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
927 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
930 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
931 con->auth_reply_buf);
935 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
936 con, (int)con->in_reply.tag,
937 le32_to_cpu(con->in_reply.connect_seq),
938 le32_to_cpu(con->in_reply.global_seq));
945 * Verify the hello banner looks okay.
947 static int verify_hello(struct ceph_connection *con)
949 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
950 pr_err("connect to %s got bad banner\n",
951 pr_addr(&con->peer_addr.in_addr));
952 con->error_msg = "protocol error, bad banner";
958 static bool addr_is_blank(struct sockaddr_storage *ss)
960 switch (ss->ss_family) {
962 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
965 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
966 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
967 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
968 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
973 static int addr_port(struct sockaddr_storage *ss)
975 switch (ss->ss_family) {
977 return ntohs(((struct sockaddr_in *)ss)->sin_port);
979 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
984 static void addr_set_port(struct sockaddr_storage *ss, int p)
986 switch (ss->ss_family) {
988 ((struct sockaddr_in *)ss)->sin_port = htons(p);
990 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
995 * Parse an ip[:port] list into an addr array. Use the default
996 * monitor port if a port isn't specified.
998 int ceph_parse_ips(const char *c, const char *end,
999 struct ceph_entity_addr *addr,
1000 int max_count, int *count)
1005 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1006 for (i = 0; i < max_count; i++) {
1008 struct sockaddr_storage *ss = &addr[i].in_addr;
1009 struct sockaddr_in *in4 = (void *)ss;
1010 struct sockaddr_in6 *in6 = (void *)ss;
1013 memset(ss, 0, sizeof(*ss));
1014 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1016 ss->ss_family = AF_INET;
1017 } else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1019 ss->ss_family = AF_INET6;
1026 if (p < end && *p == ':') {
1029 while (p < end && *p >= '0' && *p <= '9') {
1030 port = (port * 10) + (*p - '0');
1033 if (port > 65535 || port == 0)
1036 port = CEPH_MON_PORT;
1039 addr_set_port(ss, port);
1041 dout("parse_ips got %s\n", pr_addr(ss));
1058 pr_err("parse_ips bad ip '%s'\n", c);
1062 static int process_banner(struct ceph_connection *con)
1064 dout("process_banner on %p\n", con);
1066 if (verify_hello(con) < 0)
1069 ceph_decode_addr(&con->actual_peer_addr);
1070 ceph_decode_addr(&con->peer_addr_for_me);
1073 * Make sure the other end is who we wanted. note that the other
1074 * end may not yet know their ip address, so if it's 0.0.0.0, give
1075 * them the benefit of the doubt.
1077 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1078 sizeof(con->peer_addr)) != 0 &&
1079 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1080 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1081 pr_warning("wrong peer, want %s/%lld, got %s/%lld\n",
1082 pr_addr(&con->peer_addr.in_addr),
1083 le64_to_cpu(con->peer_addr.nonce),
1084 pr_addr(&con->actual_peer_addr.in_addr),
1085 le64_to_cpu(con->actual_peer_addr.nonce));
1086 con->error_msg = "wrong peer at address";
1091 * did we learn our address?
1093 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1094 int port = addr_port(&con->msgr->inst.addr.in_addr);
1096 memcpy(&con->msgr->inst.addr.in_addr,
1097 &con->peer_addr_for_me.in_addr,
1098 sizeof(con->peer_addr_for_me.in_addr));
1099 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1100 encode_my_addr(con->msgr);
1101 dout("process_banner learned my addr is %s\n",
1102 pr_addr(&con->msgr->inst.addr.in_addr));
1105 set_bit(NEGOTIATING, &con->state);
1106 prepare_read_connect(con);
1110 static void fail_protocol(struct ceph_connection *con)
1112 reset_connection(con);
1113 set_bit(CLOSED, &con->state); /* in case there's queued work */
1115 mutex_unlock(&con->mutex);
1116 if (con->ops->bad_proto)
1117 con->ops->bad_proto(con);
1118 mutex_lock(&con->mutex);
1121 static int process_connect(struct ceph_connection *con)
1123 u64 sup_feat = CEPH_FEATURE_SUPPORTED_CLIENT;
1124 u64 req_feat = CEPH_FEATURE_REQUIRED_CLIENT;
1125 u64 server_feat = le64_to_cpu(con->in_reply.features);
1127 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1129 switch (con->in_reply.tag) {
1130 case CEPH_MSGR_TAG_FEATURES:
1131 pr_err("%s%lld %s feature set mismatch,"
1132 " my %llx < server's %llx, missing %llx\n",
1133 ENTITY_NAME(con->peer_name),
1134 pr_addr(&con->peer_addr.in_addr),
1135 sup_feat, server_feat, server_feat & ~sup_feat);
1136 con->error_msg = "missing required protocol features";
1140 case CEPH_MSGR_TAG_BADPROTOVER:
1141 pr_err("%s%lld %s protocol version mismatch,"
1142 " my %d != server's %d\n",
1143 ENTITY_NAME(con->peer_name),
1144 pr_addr(&con->peer_addr.in_addr),
1145 le32_to_cpu(con->out_connect.protocol_version),
1146 le32_to_cpu(con->in_reply.protocol_version));
1147 con->error_msg = "protocol version mismatch";
1151 case CEPH_MSGR_TAG_BADAUTHORIZER:
1153 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1155 if (con->auth_retry == 2) {
1156 con->error_msg = "connect authorization failure";
1157 reset_connection(con);
1158 set_bit(CLOSED, &con->state);
1161 con->auth_retry = 1;
1162 prepare_write_connect(con->msgr, con, 0);
1163 prepare_read_connect(con);
1166 case CEPH_MSGR_TAG_RESETSESSION:
1168 * If we connected with a large connect_seq but the peer
1169 * has no record of a session with us (no connection, or
1170 * connect_seq == 0), they will send RESETSESION to indicate
1171 * that they must have reset their session, and may have
1174 dout("process_connect got RESET peer seq %u\n",
1175 le32_to_cpu(con->in_connect.connect_seq));
1176 pr_err("%s%lld %s connection reset\n",
1177 ENTITY_NAME(con->peer_name),
1178 pr_addr(&con->peer_addr.in_addr));
1179 reset_connection(con);
1180 prepare_write_connect(con->msgr, con, 0);
1181 prepare_read_connect(con);
1183 /* Tell ceph about it. */
1184 mutex_unlock(&con->mutex);
1185 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1186 if (con->ops->peer_reset)
1187 con->ops->peer_reset(con);
1188 mutex_lock(&con->mutex);
1191 case CEPH_MSGR_TAG_RETRY_SESSION:
1193 * If we sent a smaller connect_seq than the peer has, try
1194 * again with a larger value.
1196 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1197 le32_to_cpu(con->out_connect.connect_seq),
1198 le32_to_cpu(con->in_connect.connect_seq));
1199 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1200 prepare_write_connect(con->msgr, con, 0);
1201 prepare_read_connect(con);
1204 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1206 * If we sent a smaller global_seq than the peer has, try
1207 * again with a larger value.
1209 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1210 con->peer_global_seq,
1211 le32_to_cpu(con->in_connect.global_seq));
1212 get_global_seq(con->msgr,
1213 le32_to_cpu(con->in_connect.global_seq));
1214 prepare_write_connect(con->msgr, con, 0);
1215 prepare_read_connect(con);
1218 case CEPH_MSGR_TAG_READY:
1219 if (req_feat & ~server_feat) {
1220 pr_err("%s%lld %s protocol feature mismatch,"
1221 " my required %llx > server's %llx, need %llx\n",
1222 ENTITY_NAME(con->peer_name),
1223 pr_addr(&con->peer_addr.in_addr),
1224 req_feat, server_feat, req_feat & ~server_feat);
1225 con->error_msg = "missing required protocol features";
1229 clear_bit(CONNECTING, &con->state);
1230 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1232 con->peer_features = server_feat;
1233 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1234 con->peer_global_seq,
1235 le32_to_cpu(con->in_reply.connect_seq),
1237 WARN_ON(con->connect_seq !=
1238 le32_to_cpu(con->in_reply.connect_seq));
1240 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1241 set_bit(LOSSYTX, &con->state);
1243 prepare_read_tag(con);
1246 case CEPH_MSGR_TAG_WAIT:
1248 * If there is a connection race (we are opening
1249 * connections to each other), one of us may just have
1250 * to WAIT. This shouldn't happen if we are the
1253 pr_err("process_connect peer connecting WAIT\n");
1256 pr_err("connect protocol error, will retry\n");
1257 con->error_msg = "protocol error, garbage tag during connect";
1265 * read (part of) an ack
1267 static int read_partial_ack(struct ceph_connection *con)
1271 return read_partial(con, &to, sizeof(con->in_temp_ack),
1277 * We can finally discard anything that's been acked.
1279 static void process_ack(struct ceph_connection *con)
1282 u64 ack = le64_to_cpu(con->in_temp_ack);
1285 while (!list_empty(&con->out_sent)) {
1286 m = list_first_entry(&con->out_sent, struct ceph_msg,
1288 seq = le64_to_cpu(m->hdr.seq);
1291 dout("got ack for seq %llu type %d at %p\n", seq,
1292 le16_to_cpu(m->hdr.type), m);
1295 prepare_read_tag(con);
1301 static int read_partial_message_section(struct ceph_connection *con,
1302 struct kvec *section, unsigned int sec_len,
1310 while (section->iov_len < sec_len) {
1311 BUG_ON(section->iov_base == NULL);
1312 left = sec_len - section->iov_len;
1313 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1314 section->iov_len, left);
1317 section->iov_len += ret;
1318 if (section->iov_len == sec_len)
1319 *crc = crc32c(0, section->iov_base,
1326 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1327 struct ceph_msg_header *hdr,
1330 * read (part of) a message.
1332 static int read_partial_message(struct ceph_connection *con)
1334 struct ceph_msg *m = con->in_msg;
1338 unsigned front_len, middle_len, data_len, data_off;
1339 int datacrc = con->msgr->nocrc;
1343 dout("read_partial_message con %p msg %p\n", con, m);
1346 while (con->in_base_pos < sizeof(con->in_hdr)) {
1347 left = sizeof(con->in_hdr) - con->in_base_pos;
1348 ret = ceph_tcp_recvmsg(con->sock,
1349 (char *)&con->in_hdr + con->in_base_pos,
1353 con->in_base_pos += ret;
1354 if (con->in_base_pos == sizeof(con->in_hdr)) {
1355 u32 crc = crc32c(0, (void *)&con->in_hdr,
1356 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1357 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1358 pr_err("read_partial_message bad hdr "
1359 " crc %u != expected %u\n",
1360 crc, con->in_hdr.crc);
1365 front_len = le32_to_cpu(con->in_hdr.front_len);
1366 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1368 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1369 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1371 data_len = le32_to_cpu(con->in_hdr.data_len);
1372 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1374 data_off = le16_to_cpu(con->in_hdr.data_off);
1377 seq = le64_to_cpu(con->in_hdr.seq);
1378 if ((s64)seq - (s64)con->in_seq < 1) {
1379 pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
1380 ENTITY_NAME(con->peer_name),
1381 pr_addr(&con->peer_addr.in_addr),
1382 seq, con->in_seq + 1);
1383 con->in_base_pos = -front_len - middle_len - data_len -
1385 con->in_tag = CEPH_MSGR_TAG_READY;
1388 } else if ((s64)seq - (s64)con->in_seq > 1) {
1389 pr_err("read_partial_message bad seq %lld expected %lld\n",
1390 seq, con->in_seq + 1);
1391 con->error_msg = "bad message sequence # for incoming message";
1395 /* allocate message? */
1397 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1398 con->in_hdr.front_len, con->in_hdr.data_len);
1400 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1402 /* skip this message */
1403 dout("alloc_msg said skip message\n");
1404 BUG_ON(con->in_msg);
1405 con->in_base_pos = -front_len - middle_len - data_len -
1407 con->in_tag = CEPH_MSGR_TAG_READY;
1413 "error allocating memory for incoming message";
1417 m->front.iov_len = 0; /* haven't read it yet */
1419 m->middle->vec.iov_len = 0;
1421 con->in_msg_pos.page = 0;
1422 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1423 con->in_msg_pos.data_pos = 0;
1427 ret = read_partial_message_section(con, &m->front, front_len,
1428 &con->in_front_crc);
1434 ret = read_partial_message_section(con, &m->middle->vec, middle_len,
1435 &con->in_middle_crc);
1441 while (con->in_msg_pos.data_pos < data_len) {
1442 left = min((int)(data_len - con->in_msg_pos.data_pos),
1443 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1444 BUG_ON(m->pages == NULL);
1445 p = kmap(m->pages[con->in_msg_pos.page]);
1446 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1448 if (ret > 0 && datacrc)
1450 crc32c(con->in_data_crc,
1451 p + con->in_msg_pos.page_pos, ret);
1452 kunmap(m->pages[con->in_msg_pos.page]);
1455 con->in_msg_pos.data_pos += ret;
1456 con->in_msg_pos.page_pos += ret;
1457 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1458 con->in_msg_pos.page_pos = 0;
1459 con->in_msg_pos.page++;
1464 to = sizeof(m->hdr) + sizeof(m->footer);
1465 while (con->in_base_pos < to) {
1466 left = to - con->in_base_pos;
1467 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1468 (con->in_base_pos - sizeof(m->hdr)),
1472 con->in_base_pos += ret;
1474 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1475 m, front_len, m->footer.front_crc, middle_len,
1476 m->footer.middle_crc, data_len, m->footer.data_crc);
1479 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1480 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1481 m, con->in_front_crc, m->footer.front_crc);
1484 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1485 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1486 m, con->in_middle_crc, m->footer.middle_crc);
1490 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1491 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1492 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1493 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1497 return 1; /* done! */
1501 * Process message. This happens in the worker thread. The callback should
1502 * be careful not to do anything that waits on other incoming messages or it
1505 static void process_message(struct ceph_connection *con)
1507 struct ceph_msg *msg;
1512 /* if first message, set peer_name */
1513 if (con->peer_name.type == 0)
1514 con->peer_name = msg->hdr.src;
1517 mutex_unlock(&con->mutex);
1519 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1520 msg, le64_to_cpu(msg->hdr.seq),
1521 ENTITY_NAME(msg->hdr.src),
1522 le16_to_cpu(msg->hdr.type),
1523 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1524 le32_to_cpu(msg->hdr.front_len),
1525 le32_to_cpu(msg->hdr.data_len),
1526 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1527 con->ops->dispatch(con, msg);
1529 mutex_lock(&con->mutex);
1530 prepare_read_tag(con);
1535 * Write something to the socket. Called in a worker thread when the
1536 * socket appears to be writeable and we have something ready to send.
1538 static int try_write(struct ceph_connection *con)
1540 struct ceph_messenger *msgr = con->msgr;
1543 dout("try_write start %p state %lu nref %d\n", con, con->state,
1544 atomic_read(&con->nref));
1547 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1549 /* open the socket first? */
1550 if (con->sock == NULL) {
1552 * if we were STANDBY and are reconnecting _this_
1553 * connection, bump connect_seq now. Always bump
1556 if (test_and_clear_bit(STANDBY, &con->state))
1559 prepare_write_banner(msgr, con);
1560 prepare_write_connect(msgr, con, 1);
1561 prepare_read_banner(con);
1562 set_bit(CONNECTING, &con->state);
1563 clear_bit(NEGOTIATING, &con->state);
1565 BUG_ON(con->in_msg);
1566 con->in_tag = CEPH_MSGR_TAG_READY;
1567 dout("try_write initiating connect on %p new state %lu\n",
1569 con->sock = ceph_tcp_connect(con);
1570 if (IS_ERR(con->sock)) {
1572 con->error_msg = "connect error";
1579 /* kvec data queued? */
1580 if (con->out_skip) {
1581 ret = write_partial_skip(con);
1585 dout("try_write write_partial_skip err %d\n", ret);
1589 if (con->out_kvec_left) {
1590 ret = write_partial_kvec(con);
1597 if (con->out_msg_done) {
1598 ceph_msg_put(con->out_msg);
1599 con->out_msg = NULL; /* we're done with this one */
1603 ret = write_partial_msg_pages(con);
1605 goto more_kvec; /* we need to send the footer, too! */
1609 dout("try_write write_partial_msg_pages err %d\n",
1616 if (!test_bit(CONNECTING, &con->state)) {
1617 /* is anything else pending? */
1618 if (!list_empty(&con->out_queue)) {
1619 prepare_write_message(con);
1622 if (con->in_seq > con->in_seq_acked) {
1623 prepare_write_ack(con);
1626 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1627 prepare_write_keepalive(con);
1632 /* Nothing to do! */
1633 clear_bit(WRITE_PENDING, &con->state);
1634 dout("try_write nothing else to write.\n");
1638 dout("try_write done on %p\n", con);
1645 * Read what we can from the socket.
1647 static int try_read(struct ceph_connection *con)
1654 if (test_bit(STANDBY, &con->state))
1657 dout("try_read start on %p\n", con);
1660 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1662 if (test_bit(CONNECTING, &con->state)) {
1663 if (!test_bit(NEGOTIATING, &con->state)) {
1664 dout("try_read connecting\n");
1665 ret = read_partial_banner(con);
1668 if (process_banner(con) < 0) {
1673 ret = read_partial_connect(con);
1676 if (process_connect(con) < 0) {
1683 if (con->in_base_pos < 0) {
1685 * skipping + discarding content.
1687 * FIXME: there must be a better way to do this!
1689 static char buf[1024];
1690 int skip = min(1024, -con->in_base_pos);
1691 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1692 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1695 con->in_base_pos += ret;
1696 if (con->in_base_pos)
1699 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1703 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1706 dout("try_read got tag %d\n", (int)con->in_tag);
1707 switch (con->in_tag) {
1708 case CEPH_MSGR_TAG_MSG:
1709 prepare_read_message(con);
1711 case CEPH_MSGR_TAG_ACK:
1712 prepare_read_ack(con);
1714 case CEPH_MSGR_TAG_CLOSE:
1715 set_bit(CLOSED, &con->state); /* fixme */
1721 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1722 ret = read_partial_message(con);
1726 con->error_msg = "bad crc";
1730 con->error_msg = "io error";
1736 if (con->in_tag == CEPH_MSGR_TAG_READY)
1738 process_message(con);
1741 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1742 ret = read_partial_ack(con);
1752 dout("try_read done on %p\n", con);
1756 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1757 con->error_msg = "protocol error, garbage tag";
1764 * Atomically queue work on a connection. Bump @con reference to
1765 * avoid races with connection teardown.
1767 * There is some trickery going on with QUEUED and BUSY because we
1768 * only want a _single_ thread operating on each connection at any
1769 * point in time, but we want to use all available CPUs.
1771 * The worker thread only proceeds if it can atomically set BUSY. It
1772 * clears QUEUED and does it's thing. When it thinks it's done, it
1773 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1774 * (tries again to set BUSY).
1776 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1777 * try to queue work. If that fails (work is already queued, or BUSY)
1778 * we give up (work also already being done or is queued) but leave QUEUED
1779 * set so that the worker thread will loop if necessary.
1781 static void queue_con(struct ceph_connection *con)
1783 if (test_bit(DEAD, &con->state)) {
1784 dout("queue_con %p ignoring: DEAD\n",
1789 if (!con->ops->get(con)) {
1790 dout("queue_con %p ref count 0\n", con);
1794 set_bit(QUEUED, &con->state);
1795 if (test_bit(BUSY, &con->state)) {
1796 dout("queue_con %p - already BUSY\n", con);
1798 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1799 dout("queue_con %p - already queued\n", con);
1802 dout("queue_con %p\n", con);
1807 * Do some work on a connection. Drop a connection ref when we're done.
1809 static void con_work(struct work_struct *work)
1811 struct ceph_connection *con = container_of(work, struct ceph_connection,
1816 if (test_and_set_bit(BUSY, &con->state) != 0) {
1817 dout("con_work %p BUSY already set\n", con);
1820 dout("con_work %p start, clearing QUEUED\n", con);
1821 clear_bit(QUEUED, &con->state);
1823 mutex_lock(&con->mutex);
1825 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1826 dout("con_work CLOSED\n");
1827 con_close_socket(con);
1830 if (test_and_clear_bit(OPENING, &con->state)) {
1831 /* reopen w/ new peer */
1832 dout("con_work OPENING\n");
1833 con_close_socket(con);
1836 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1837 try_read(con) < 0 ||
1838 try_write(con) < 0) {
1839 mutex_unlock(&con->mutex);
1841 ceph_fault(con); /* error/fault path */
1846 mutex_unlock(&con->mutex);
1849 clear_bit(BUSY, &con->state);
1850 dout("con->state=%lu\n", con->state);
1851 if (test_bit(QUEUED, &con->state)) {
1852 if (!backoff || test_bit(OPENING, &con->state)) {
1853 dout("con_work %p QUEUED reset, looping\n", con);
1856 dout("con_work %p QUEUED reset, but just faulted\n", con);
1857 clear_bit(QUEUED, &con->state);
1859 dout("con_work %p done\n", con);
1867 * Generic error/fault handler. A retry mechanism is used with
1868 * exponential backoff
1870 static void ceph_fault(struct ceph_connection *con)
1872 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
1873 pr_addr(&con->peer_addr.in_addr), con->error_msg);
1874 dout("fault %p state %lu to peer %s\n",
1875 con, con->state, pr_addr(&con->peer_addr.in_addr));
1877 if (test_bit(LOSSYTX, &con->state)) {
1878 dout("fault on LOSSYTX channel\n");
1882 mutex_lock(&con->mutex);
1883 if (test_bit(CLOSED, &con->state))
1886 con_close_socket(con);
1889 ceph_msg_put(con->in_msg);
1893 /* Requeue anything that hasn't been acked */
1894 list_splice_init(&con->out_sent, &con->out_queue);
1896 /* If there are no messages in the queue, place the connection
1897 * in a STANDBY state (i.e., don't try to reconnect just yet). */
1898 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
1899 dout("fault setting STANDBY\n");
1900 set_bit(STANDBY, &con->state);
1902 /* retry after a delay. */
1903 if (con->delay == 0)
1904 con->delay = BASE_DELAY_INTERVAL;
1905 else if (con->delay < MAX_DELAY_INTERVAL)
1907 dout("fault queueing %p delay %lu\n", con, con->delay);
1909 if (queue_delayed_work(ceph_msgr_wq, &con->work,
1910 round_jiffies_relative(con->delay)) == 0)
1915 mutex_unlock(&con->mutex);
1918 * in case we faulted due to authentication, invalidate our
1919 * current tickets so that we can get new ones.
1921 if (con->auth_retry && con->ops->invalidate_authorizer) {
1922 dout("calling invalidate_authorizer()\n");
1923 con->ops->invalidate_authorizer(con);
1926 if (con->ops->fault)
1927 con->ops->fault(con);
1933 * create a new messenger instance
1935 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
1937 struct ceph_messenger *msgr;
1939 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
1941 return ERR_PTR(-ENOMEM);
1943 spin_lock_init(&msgr->global_seq_lock);
1945 /* the zero page is needed if a request is "canceled" while the message
1946 * is being written over the socket */
1947 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
1948 if (!msgr->zero_page) {
1950 return ERR_PTR(-ENOMEM);
1952 kmap(msgr->zero_page);
1955 msgr->inst.addr = *myaddr;
1957 /* select a random nonce */
1958 msgr->inst.addr.type = 0;
1959 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
1960 encode_my_addr(msgr);
1962 dout("messenger_create %p\n", msgr);
1966 void ceph_messenger_destroy(struct ceph_messenger *msgr)
1968 dout("destroy %p\n", msgr);
1969 kunmap(msgr->zero_page);
1970 __free_page(msgr->zero_page);
1972 dout("destroyed messenger %p\n", msgr);
1976 * Queue up an outgoing message on the given connection.
1978 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
1980 if (test_bit(CLOSED, &con->state)) {
1981 dout("con_send %p closed, dropping %p\n", con, msg);
1987 msg->hdr.src = con->msgr->inst.name;
1989 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
1991 msg->needs_out_seq = true;
1994 mutex_lock(&con->mutex);
1995 BUG_ON(!list_empty(&msg->list_head));
1996 list_add_tail(&msg->list_head, &con->out_queue);
1997 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
1998 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
1999 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2000 le32_to_cpu(msg->hdr.front_len),
2001 le32_to_cpu(msg->hdr.middle_len),
2002 le32_to_cpu(msg->hdr.data_len));
2003 mutex_unlock(&con->mutex);
2005 /* if there wasn't anything waiting to send before, queue
2007 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2012 * Revoke a message that was previously queued for send
2014 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2016 mutex_lock(&con->mutex);
2017 if (!list_empty(&msg->list_head)) {
2018 dout("con_revoke %p msg %p\n", con, msg);
2019 list_del_init(&msg->list_head);
2022 if (con->out_msg == msg) {
2023 ceph_msg_put(con->out_msg);
2024 con->out_msg = NULL;
2026 if (con->out_kvec_is_msg) {
2027 con->out_skip = con->out_kvec_bytes;
2028 con->out_kvec_is_msg = false;
2031 dout("con_revoke %p msg %p - not queued (sent?)\n", con, msg);
2033 mutex_unlock(&con->mutex);
2037 * Revoke a message that we may be reading data into
2039 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2041 mutex_lock(&con->mutex);
2042 if (con->in_msg && con->in_msg == msg) {
2043 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2044 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2045 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2047 /* skip rest of message */
2048 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2049 con->in_base_pos = con->in_base_pos -
2050 sizeof(struct ceph_msg_header) -
2054 sizeof(struct ceph_msg_footer);
2055 ceph_msg_put(con->in_msg);
2057 con->in_tag = CEPH_MSGR_TAG_READY;
2060 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2061 con, con->in_msg, msg);
2063 mutex_unlock(&con->mutex);
2067 * Queue a keepalive byte to ensure the tcp connection is alive.
2069 void ceph_con_keepalive(struct ceph_connection *con)
2071 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2072 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2078 * construct a new message with given type, size
2079 * the new msg has a ref count of 1.
2081 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2085 m = kmalloc(sizeof(*m), flags);
2088 kref_init(&m->kref);
2089 INIT_LIST_HEAD(&m->list_head);
2092 m->hdr.type = cpu_to_le16(type);
2093 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2095 m->hdr.front_len = cpu_to_le32(front_len);
2096 m->hdr.middle_len = 0;
2097 m->hdr.data_len = 0;
2098 m->hdr.data_off = 0;
2099 m->hdr.reserved = 0;
2100 m->footer.front_crc = 0;
2101 m->footer.middle_crc = 0;
2102 m->footer.data_crc = 0;
2103 m->footer.flags = 0;
2104 m->front_max = front_len;
2105 m->front_is_vmalloc = false;
2106 m->more_to_follow = false;
2111 if (front_len > PAGE_CACHE_SIZE) {
2112 m->front.iov_base = __vmalloc(front_len, flags,
2114 m->front_is_vmalloc = true;
2116 m->front.iov_base = kmalloc(front_len, flags);
2118 if (m->front.iov_base == NULL) {
2119 pr_err("msg_new can't allocate %d bytes\n",
2124 m->front.iov_base = NULL;
2126 m->front.iov_len = front_len;
2136 dout("ceph_msg_new %p front %d\n", m, front_len);
2142 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2147 * Allocate "middle" portion of a message, if it is needed and wasn't
2148 * allocated by alloc_msg. This allows us to read a small fixed-size
2149 * per-type header in the front and then gracefully fail (i.e.,
2150 * propagate the error to the caller based on info in the front) when
2151 * the middle is too large.
2153 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2155 int type = le16_to_cpu(msg->hdr.type);
2156 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2158 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2159 ceph_msg_type_name(type), middle_len);
2160 BUG_ON(!middle_len);
2161 BUG_ON(msg->middle);
2163 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2170 * Generic message allocator, for incoming messages.
2172 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2173 struct ceph_msg_header *hdr,
2176 int type = le16_to_cpu(hdr->type);
2177 int front_len = le32_to_cpu(hdr->front_len);
2178 int middle_len = le32_to_cpu(hdr->middle_len);
2179 struct ceph_msg *msg = NULL;
2182 if (con->ops->alloc_msg) {
2183 mutex_unlock(&con->mutex);
2184 msg = con->ops->alloc_msg(con, hdr, skip);
2185 mutex_lock(&con->mutex);
2191 msg = ceph_msg_new(type, front_len, GFP_NOFS);
2193 pr_err("unable to allocate msg type %d len %d\n",
2198 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2200 if (middle_len && !msg->middle) {
2201 ret = ceph_alloc_middle(con, msg);
2213 * Free a generically kmalloc'd message.
2215 void ceph_msg_kfree(struct ceph_msg *m)
2217 dout("msg_kfree %p\n", m);
2218 if (m->front_is_vmalloc)
2219 vfree(m->front.iov_base);
2221 kfree(m->front.iov_base);
2226 * Drop a msg ref. Destroy as needed.
2228 void ceph_msg_last_put(struct kref *kref)
2230 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2232 dout("ceph_msg_put last one on %p\n", m);
2233 WARN_ON(!list_empty(&m->list_head));
2235 /* drop middle, data, if any */
2237 ceph_buffer_put(m->middle);
2244 ceph_pagelist_release(m->pagelist);
2250 ceph_msgpool_put(m->pool, m);
2255 void ceph_msg_dump(struct ceph_msg *msg)
2257 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2258 msg->front_max, msg->nr_pages);
2259 print_hex_dump(KERN_DEBUG, "header: ",
2260 DUMP_PREFIX_OFFSET, 16, 1,
2261 &msg->hdr, sizeof(msg->hdr), true);
2262 print_hex_dump(KERN_DEBUG, " front: ",
2263 DUMP_PREFIX_OFFSET, 16, 1,
2264 msg->front.iov_base, msg->front.iov_len, true);
2266 print_hex_dump(KERN_DEBUG, "middle: ",
2267 DUMP_PREFIX_OFFSET, 16, 1,
2268 msg->middle->vec.iov_base,
2269 msg->middle->vec.iov_len, true);
2270 print_hex_dump(KERN_DEBUG, "footer: ",
2271 DUMP_PREFIX_OFFSET, 16, 1,
2272 &msg->footer, sizeof(msg->footer), true);