1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/dns_resolver.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/export.h>
24 * Ceph uses the messenger to exchange ceph_msg messages with other
25 * hosts in the system. The messenger provides ordered and reliable
26 * delivery. We tolerate TCP disconnects by reconnecting (with
27 * exponential backoff) in the case of a fault (disconnection, bad
28 * crc, protocol error). Acks allow sent messages to be discarded by
33 * We track the state of the socket on a given connection using
34 * values defined below. The transition to a new socket state is
35 * handled by a function which verifies we aren't coming from an
39 * | NEW* | transient initial state
41 * | con_sock_state_init()
44 * | CLOSED | initialized, but no socket (and no
45 * ---------- TCP connection)
47 * | \ con_sock_state_connecting()
48 * | ----------------------
50 * + con_sock_state_closed() \
51 * |+--------------------------- \
54 * | | CLOSING | socket event; \ \
55 * | ----------- await close \ \
58 * | + con_sock_state_closing() \ |
60 * | / --------------- | |
63 * | / -----------------| CONNECTING | socket created, TCP
64 * | | / -------------- connect initiated
65 * | | | con_sock_state_connected()
68 * | CONNECTED | TCP connection established
71 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
74 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
75 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
76 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
77 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
78 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
80 /* static tag bytes (protocol control messages) */
81 static char tag_msg = CEPH_MSGR_TAG_MSG;
82 static char tag_ack = CEPH_MSGR_TAG_ACK;
83 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
86 static struct lock_class_key socket_class;
90 * When skipping (ignoring) a block of input we read it into a "skip
91 * buffer," which is this many bytes in size.
93 #define SKIP_BUF_SIZE 1024
95 static void queue_con(struct ceph_connection *con);
96 static void con_work(struct work_struct *);
97 static void ceph_fault(struct ceph_connection *con);
100 * Nicely render a sockaddr as a string. An array of formatted
101 * strings is used, to approximate reentrancy.
103 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
104 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
105 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
106 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
108 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
109 static atomic_t addr_str_seq = ATOMIC_INIT(0);
111 static struct page *zero_page; /* used in certain error cases */
113 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
117 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
118 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
120 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
123 switch (ss->ss_family) {
125 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
126 ntohs(in4->sin_port));
130 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
131 ntohs(in6->sin6_port));
135 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
141 EXPORT_SYMBOL(ceph_pr_addr);
143 static void encode_my_addr(struct ceph_messenger *msgr)
145 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
146 ceph_encode_addr(&msgr->my_enc_addr);
150 * work queue for all reading and writing to/from the socket.
152 static struct workqueue_struct *ceph_msgr_wq;
154 void _ceph_msgr_exit(void)
157 destroy_workqueue(ceph_msgr_wq);
161 BUG_ON(zero_page == NULL);
163 page_cache_release(zero_page);
167 int ceph_msgr_init(void)
169 BUG_ON(zero_page != NULL);
170 zero_page = ZERO_PAGE(0);
171 page_cache_get(zero_page);
173 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_NON_REENTRANT, 0);
177 pr_err("msgr_init failed to create workqueue\n");
182 EXPORT_SYMBOL(ceph_msgr_init);
184 void ceph_msgr_exit(void)
186 BUG_ON(ceph_msgr_wq == NULL);
190 EXPORT_SYMBOL(ceph_msgr_exit);
192 void ceph_msgr_flush(void)
194 flush_workqueue(ceph_msgr_wq);
196 EXPORT_SYMBOL(ceph_msgr_flush);
198 /* Connection socket state transition functions */
200 static void con_sock_state_init(struct ceph_connection *con)
204 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
205 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
206 printk("%s: unexpected old state %d\n", __func__, old_state);
209 static void con_sock_state_connecting(struct ceph_connection *con)
213 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
214 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
215 printk("%s: unexpected old state %d\n", __func__, old_state);
218 static void con_sock_state_connected(struct ceph_connection *con)
222 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
223 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
224 printk("%s: unexpected old state %d\n", __func__, old_state);
227 static void con_sock_state_closing(struct ceph_connection *con)
231 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
232 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
233 old_state != CON_SOCK_STATE_CONNECTED &&
234 old_state != CON_SOCK_STATE_CLOSING))
235 printk("%s: unexpected old state %d\n", __func__, old_state);
238 static void con_sock_state_closed(struct ceph_connection *con)
242 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
243 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
244 old_state != CON_SOCK_STATE_CLOSING &&
245 old_state != CON_SOCK_STATE_CONNECTING))
246 printk("%s: unexpected old state %d\n", __func__, old_state);
250 * socket callback functions
253 /* data available on socket, or listen socket received a connect */
254 static void ceph_sock_data_ready(struct sock *sk, int count_unused)
256 struct ceph_connection *con = sk->sk_user_data;
257 if (atomic_read(&con->msgr->stopping)) {
261 if (sk->sk_state != TCP_CLOSE_WAIT) {
262 dout("%s on %p state = %lu, queueing work\n", __func__,
268 /* socket has buffer space for writing */
269 static void ceph_sock_write_space(struct sock *sk)
271 struct ceph_connection *con = sk->sk_user_data;
273 /* only queue to workqueue if there is data we want to write,
274 * and there is sufficient space in the socket buffer to accept
275 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
276 * doesn't get called again until try_write() fills the socket
277 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
278 * and net/core/stream.c:sk_stream_write_space().
280 if (test_bit(WRITE_PENDING, &con->flags)) {
281 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
282 dout("%s %p queueing write work\n", __func__, con);
283 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
287 dout("%s %p nothing to write\n", __func__, con);
291 /* socket's state has changed */
292 static void ceph_sock_state_change(struct sock *sk)
294 struct ceph_connection *con = sk->sk_user_data;
296 dout("%s %p state = %lu sk_state = %u\n", __func__,
297 con, con->state, sk->sk_state);
299 if (test_bit(CLOSED, &con->state))
302 switch (sk->sk_state) {
304 dout("%s TCP_CLOSE\n", __func__);
306 dout("%s TCP_CLOSE_WAIT\n", __func__);
307 con_sock_state_closing(con);
308 set_bit(SOCK_CLOSED, &con->flags);
311 case TCP_ESTABLISHED:
312 dout("%s TCP_ESTABLISHED\n", __func__);
313 con_sock_state_connected(con);
316 default: /* Everything else is uninteresting */
322 * set up socket callbacks
324 static void set_sock_callbacks(struct socket *sock,
325 struct ceph_connection *con)
327 struct sock *sk = sock->sk;
328 sk->sk_user_data = con;
329 sk->sk_data_ready = ceph_sock_data_ready;
330 sk->sk_write_space = ceph_sock_write_space;
331 sk->sk_state_change = ceph_sock_state_change;
340 * initiate connection to a remote socket.
342 static int ceph_tcp_connect(struct ceph_connection *con)
344 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
349 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
353 sock->sk->sk_allocation = GFP_NOFS;
355 #ifdef CONFIG_LOCKDEP
356 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
359 set_sock_callbacks(sock, con);
361 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
363 con_sock_state_connecting(con);
364 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
366 if (ret == -EINPROGRESS) {
367 dout("connect %s EINPROGRESS sk_state = %u\n",
368 ceph_pr_addr(&con->peer_addr.in_addr),
370 } else if (ret < 0) {
371 pr_err("connect %s error %d\n",
372 ceph_pr_addr(&con->peer_addr.in_addr), ret);
374 con->error_msg = "connect error";
382 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
384 struct kvec iov = {buf, len};
385 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
388 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
395 * write something. @more is true if caller will be sending more data
398 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
399 size_t kvlen, size_t len, int more)
401 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
405 msg.msg_flags |= MSG_MORE;
407 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
409 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
415 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
416 int offset, size_t size, int more)
418 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
421 ret = kernel_sendpage(sock, page, offset, size, flags);
430 * Shutdown/close the socket for the given connection.
432 static int con_close_socket(struct ceph_connection *con)
436 dout("con_close_socket on %p sock %p\n", con, con->sock);
439 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
440 sock_release(con->sock);
444 * Forcibly clear the SOCK_CLOSE flag. It gets set
445 * independent of the connection mutex, and we could have
446 * received a socket close event before we had the chance to
447 * shut the socket down.
449 clear_bit(SOCK_CLOSED, &con->flags);
450 con_sock_state_closed(con);
455 * Reset a connection. Discard all incoming and outgoing messages
456 * and clear *_seq state.
458 static void ceph_msg_remove(struct ceph_msg *msg)
460 list_del_init(&msg->list_head);
461 BUG_ON(msg->con == NULL);
462 msg->con->ops->put(msg->con);
467 static void ceph_msg_remove_list(struct list_head *head)
469 while (!list_empty(head)) {
470 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
472 ceph_msg_remove(msg);
476 static void reset_connection(struct ceph_connection *con)
478 /* reset connection, out_queue, msg_ and connect_seq */
479 /* discard existing out_queue and msg_seq */
480 ceph_msg_remove_list(&con->out_queue);
481 ceph_msg_remove_list(&con->out_sent);
484 BUG_ON(con->in_msg->con != con);
485 con->in_msg->con = NULL;
486 ceph_msg_put(con->in_msg);
491 con->connect_seq = 0;
494 ceph_msg_put(con->out_msg);
498 con->in_seq_acked = 0;
502 * mark a peer down. drop any open connections.
504 void ceph_con_close(struct ceph_connection *con)
506 mutex_lock(&con->mutex);
507 dout("con_close %p peer %s\n", con,
508 ceph_pr_addr(&con->peer_addr.in_addr));
509 clear_bit(NEGOTIATING, &con->state);
510 clear_bit(CONNECTING, &con->state);
511 clear_bit(CONNECTED, &con->state);
512 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
513 set_bit(CLOSED, &con->state);
515 clear_bit(LOSSYTX, &con->flags); /* so we retry next connect */
516 clear_bit(KEEPALIVE_PENDING, &con->flags);
517 clear_bit(WRITE_PENDING, &con->flags);
519 reset_connection(con);
520 con->peer_global_seq = 0;
521 cancel_delayed_work(&con->work);
522 mutex_unlock(&con->mutex);
525 * We cannot close the socket directly from here because the
526 * work threads use it without holding the mutex. Instead, let
531 EXPORT_SYMBOL(ceph_con_close);
534 * Reopen a closed connection, with a new peer address.
536 void ceph_con_open(struct ceph_connection *con,
537 __u8 entity_type, __u64 entity_num,
538 struct ceph_entity_addr *addr)
540 mutex_lock(&con->mutex);
541 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
542 set_bit(OPENING, &con->state);
543 WARN_ON(!test_and_clear_bit(CLOSED, &con->state));
545 con->peer_name.type = (__u8) entity_type;
546 con->peer_name.num = cpu_to_le64(entity_num);
548 memcpy(&con->peer_addr, addr, sizeof(*addr));
549 con->delay = 0; /* reset backoff memory */
550 mutex_unlock(&con->mutex);
553 EXPORT_SYMBOL(ceph_con_open);
556 * return true if this connection ever successfully opened
558 bool ceph_con_opened(struct ceph_connection *con)
560 return con->connect_seq > 0;
564 * initialize a new connection.
566 void ceph_con_init(struct ceph_connection *con, void *private,
567 const struct ceph_connection_operations *ops,
568 struct ceph_messenger *msgr)
570 dout("con_init %p\n", con);
571 memset(con, 0, sizeof(*con));
572 con->private = private;
576 con_sock_state_init(con);
578 mutex_init(&con->mutex);
579 INIT_LIST_HEAD(&con->out_queue);
580 INIT_LIST_HEAD(&con->out_sent);
581 INIT_DELAYED_WORK(&con->work, con_work);
583 set_bit(CLOSED, &con->state);
585 EXPORT_SYMBOL(ceph_con_init);
589 * We maintain a global counter to order connection attempts. Get
590 * a unique seq greater than @gt.
592 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
596 spin_lock(&msgr->global_seq_lock);
597 if (msgr->global_seq < gt)
598 msgr->global_seq = gt;
599 ret = ++msgr->global_seq;
600 spin_unlock(&msgr->global_seq_lock);
604 static void con_out_kvec_reset(struct ceph_connection *con)
606 con->out_kvec_left = 0;
607 con->out_kvec_bytes = 0;
608 con->out_kvec_cur = &con->out_kvec[0];
611 static void con_out_kvec_add(struct ceph_connection *con,
612 size_t size, void *data)
616 index = con->out_kvec_left;
617 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
619 con->out_kvec[index].iov_len = size;
620 con->out_kvec[index].iov_base = data;
621 con->out_kvec_left++;
622 con->out_kvec_bytes += size;
626 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
637 static void iter_bio_next(struct bio **bio_iter, int *seg)
639 if (*bio_iter == NULL)
642 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
645 if (*seg == (*bio_iter)->bi_vcnt)
646 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
650 static void prepare_write_message_data(struct ceph_connection *con)
652 struct ceph_msg *msg = con->out_msg;
655 BUG_ON(!msg->hdr.data_len);
657 /* initialize page iterator */
658 con->out_msg_pos.page = 0;
660 con->out_msg_pos.page_pos = msg->page_alignment;
662 con->out_msg_pos.page_pos = 0;
665 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
667 con->out_msg_pos.data_pos = 0;
668 con->out_msg_pos.did_page_crc = false;
669 con->out_more = 1; /* data + footer will follow */
673 * Prepare footer for currently outgoing message, and finish things
674 * off. Assumes out_kvec* are already valid.. we just add on to the end.
676 static void prepare_write_message_footer(struct ceph_connection *con)
678 struct ceph_msg *m = con->out_msg;
679 int v = con->out_kvec_left;
681 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
683 dout("prepare_write_message_footer %p\n", con);
684 con->out_kvec_is_msg = true;
685 con->out_kvec[v].iov_base = &m->footer;
686 con->out_kvec[v].iov_len = sizeof(m->footer);
687 con->out_kvec_bytes += sizeof(m->footer);
688 con->out_kvec_left++;
689 con->out_more = m->more_to_follow;
690 con->out_msg_done = true;
694 * Prepare headers for the next outgoing message.
696 static void prepare_write_message(struct ceph_connection *con)
701 con_out_kvec_reset(con);
702 con->out_kvec_is_msg = true;
703 con->out_msg_done = false;
705 /* Sneak an ack in there first? If we can get it into the same
706 * TCP packet that's a good thing. */
707 if (con->in_seq > con->in_seq_acked) {
708 con->in_seq_acked = con->in_seq;
709 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
710 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
711 con_out_kvec_add(con, sizeof (con->out_temp_ack),
715 BUG_ON(list_empty(&con->out_queue));
716 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
718 BUG_ON(m->con != con);
720 /* put message on sent list */
722 list_move_tail(&m->list_head, &con->out_sent);
725 * only assign outgoing seq # if we haven't sent this message
726 * yet. if it is requeued, resend with it's original seq.
728 if (m->needs_out_seq) {
729 m->hdr.seq = cpu_to_le64(++con->out_seq);
730 m->needs_out_seq = false;
733 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
734 m, con->out_seq, le16_to_cpu(m->hdr.type),
735 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
736 le32_to_cpu(m->hdr.data_len),
738 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
740 /* tag + hdr + front + middle */
741 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
742 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
743 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
746 con_out_kvec_add(con, m->middle->vec.iov_len,
747 m->middle->vec.iov_base);
749 /* fill in crc (except data pages), footer */
750 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
751 con->out_msg->hdr.crc = cpu_to_le32(crc);
752 con->out_msg->footer.flags = 0;
754 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
755 con->out_msg->footer.front_crc = cpu_to_le32(crc);
757 crc = crc32c(0, m->middle->vec.iov_base,
758 m->middle->vec.iov_len);
759 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
761 con->out_msg->footer.middle_crc = 0;
762 dout("%s front_crc %u middle_crc %u\n", __func__,
763 le32_to_cpu(con->out_msg->footer.front_crc),
764 le32_to_cpu(con->out_msg->footer.middle_crc));
766 /* is there a data payload? */
767 con->out_msg->footer.data_crc = 0;
769 prepare_write_message_data(con);
771 /* no, queue up footer too and be done */
772 prepare_write_message_footer(con);
774 set_bit(WRITE_PENDING, &con->flags);
780 static void prepare_write_ack(struct ceph_connection *con)
782 dout("prepare_write_ack %p %llu -> %llu\n", con,
783 con->in_seq_acked, con->in_seq);
784 con->in_seq_acked = con->in_seq;
786 con_out_kvec_reset(con);
788 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
790 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
791 con_out_kvec_add(con, sizeof (con->out_temp_ack),
794 con->out_more = 1; /* more will follow.. eventually.. */
795 set_bit(WRITE_PENDING, &con->flags);
799 * Prepare to write keepalive byte.
801 static void prepare_write_keepalive(struct ceph_connection *con)
803 dout("prepare_write_keepalive %p\n", con);
804 con_out_kvec_reset(con);
805 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
806 set_bit(WRITE_PENDING, &con->flags);
810 * Connection negotiation.
813 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
816 struct ceph_auth_handshake *auth;
818 if (!con->ops->get_authorizer) {
819 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
820 con->out_connect.authorizer_len = 0;
825 /* Can't hold the mutex while getting authorizer */
827 mutex_unlock(&con->mutex);
829 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
831 mutex_lock(&con->mutex);
835 if (test_bit(CLOSED, &con->state) || test_bit(OPENING, &con->flags))
836 return ERR_PTR(-EAGAIN);
838 con->auth_reply_buf = auth->authorizer_reply_buf;
839 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
846 * We connected to a peer and are saying hello.
848 static void prepare_write_banner(struct ceph_connection *con)
850 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
851 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
852 &con->msgr->my_enc_addr);
855 set_bit(WRITE_PENDING, &con->flags);
858 static int prepare_write_connect(struct ceph_connection *con)
860 unsigned int global_seq = get_global_seq(con->msgr, 0);
863 struct ceph_auth_handshake *auth;
865 switch (con->peer_name.type) {
866 case CEPH_ENTITY_TYPE_MON:
867 proto = CEPH_MONC_PROTOCOL;
869 case CEPH_ENTITY_TYPE_OSD:
870 proto = CEPH_OSDC_PROTOCOL;
872 case CEPH_ENTITY_TYPE_MDS:
873 proto = CEPH_MDSC_PROTOCOL;
879 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
880 con->connect_seq, global_seq, proto);
882 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
883 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
884 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
885 con->out_connect.global_seq = cpu_to_le32(global_seq);
886 con->out_connect.protocol_version = cpu_to_le32(proto);
887 con->out_connect.flags = 0;
889 auth_proto = CEPH_AUTH_UNKNOWN;
890 auth = get_connect_authorizer(con, &auth_proto);
892 return PTR_ERR(auth);
894 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
895 con->out_connect.authorizer_len = auth ?
896 cpu_to_le32(auth->authorizer_buf_len) : 0;
898 con_out_kvec_reset(con);
899 con_out_kvec_add(con, sizeof (con->out_connect),
901 if (auth && auth->authorizer_buf_len)
902 con_out_kvec_add(con, auth->authorizer_buf_len,
903 auth->authorizer_buf);
906 set_bit(WRITE_PENDING, &con->flags);
912 * write as much of pending kvecs to the socket as we can.
914 * 0 -> socket full, but more to do
917 static int write_partial_kvec(struct ceph_connection *con)
921 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
922 while (con->out_kvec_bytes > 0) {
923 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
924 con->out_kvec_left, con->out_kvec_bytes,
928 con->out_kvec_bytes -= ret;
929 if (con->out_kvec_bytes == 0)
932 /* account for full iov entries consumed */
933 while (ret >= con->out_kvec_cur->iov_len) {
934 BUG_ON(!con->out_kvec_left);
935 ret -= con->out_kvec_cur->iov_len;
937 con->out_kvec_left--;
939 /* and for a partially-consumed entry */
941 con->out_kvec_cur->iov_len -= ret;
942 con->out_kvec_cur->iov_base += ret;
945 con->out_kvec_left = 0;
946 con->out_kvec_is_msg = false;
949 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
950 con->out_kvec_bytes, con->out_kvec_left, ret);
951 return ret; /* done! */
954 static void out_msg_pos_next(struct ceph_connection *con, struct page *page,
955 size_t len, size_t sent, bool in_trail)
957 struct ceph_msg *msg = con->out_msg;
962 con->out_msg_pos.data_pos += sent;
963 con->out_msg_pos.page_pos += sent;
968 con->out_msg_pos.page_pos = 0;
969 con->out_msg_pos.page++;
970 con->out_msg_pos.did_page_crc = false;
972 list_move_tail(&page->lru,
974 else if (msg->pagelist)
975 list_move_tail(&page->lru,
976 &msg->pagelist->head);
979 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
984 * Write as much message data payload as we can. If we finish, queue
986 * 1 -> done, footer is now queued in out_kvec[].
987 * 0 -> socket full, but more to do
990 static int write_partial_msg_pages(struct ceph_connection *con)
992 struct ceph_msg *msg = con->out_msg;
993 unsigned int data_len = le32_to_cpu(msg->hdr.data_len);
995 bool do_datacrc = !con->msgr->nocrc;
998 bool in_trail = false;
999 const size_t trail_len = (msg->trail ? msg->trail->length : 0);
1000 const size_t trail_off = data_len - trail_len;
1002 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
1003 con, msg, con->out_msg_pos.page, msg->nr_pages,
1004 con->out_msg_pos.page_pos);
1007 * Iterate through each page that contains data to be
1008 * written, and send as much as possible for each.
1010 * If we are calculating the data crc (the default), we will
1011 * need to map the page. If we have no pages, they have
1012 * been revoked, so use the zero page.
1014 while (data_len > con->out_msg_pos.data_pos) {
1015 struct page *page = NULL;
1016 int max_write = PAGE_SIZE;
1019 in_trail = in_trail || con->out_msg_pos.data_pos >= trail_off;
1021 total_max_write = trail_off - con->out_msg_pos.data_pos;
1024 total_max_write = data_len - con->out_msg_pos.data_pos;
1026 page = list_first_entry(&msg->trail->head,
1028 } else if (msg->pages) {
1029 page = msg->pages[con->out_msg_pos.page];
1030 } else if (msg->pagelist) {
1031 page = list_first_entry(&msg->pagelist->head,
1034 } else if (msg->bio) {
1037 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
1039 bio_offset = bv->bv_offset;
1040 max_write = bv->bv_len;
1045 len = min_t(int, max_write - con->out_msg_pos.page_pos,
1048 if (do_datacrc && !con->out_msg_pos.did_page_crc) {
1050 u32 crc = le32_to_cpu(msg->footer.data_crc);
1054 BUG_ON(kaddr == NULL);
1055 base = kaddr + con->out_msg_pos.page_pos + bio_offset;
1056 crc = crc32c(crc, base, len);
1057 msg->footer.data_crc = cpu_to_le32(crc);
1058 con->out_msg_pos.did_page_crc = true;
1060 ret = ceph_tcp_sendpage(con->sock, page,
1061 con->out_msg_pos.page_pos + bio_offset,
1070 out_msg_pos_next(con, page, len, (size_t) ret, in_trail);
1073 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
1075 /* prepare and queue up footer, too */
1077 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1078 con_out_kvec_reset(con);
1079 prepare_write_message_footer(con);
1088 static int write_partial_skip(struct ceph_connection *con)
1092 while (con->out_skip > 0) {
1093 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1095 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, 1);
1098 con->out_skip -= ret;
1106 * Prepare to read connection handshake, or an ack.
1108 static void prepare_read_banner(struct ceph_connection *con)
1110 dout("prepare_read_banner %p\n", con);
1111 con->in_base_pos = 0;
1114 static void prepare_read_connect(struct ceph_connection *con)
1116 dout("prepare_read_connect %p\n", con);
1117 con->in_base_pos = 0;
1120 static void prepare_read_ack(struct ceph_connection *con)
1122 dout("prepare_read_ack %p\n", con);
1123 con->in_base_pos = 0;
1126 static void prepare_read_tag(struct ceph_connection *con)
1128 dout("prepare_read_tag %p\n", con);
1129 con->in_base_pos = 0;
1130 con->in_tag = CEPH_MSGR_TAG_READY;
1134 * Prepare to read a message.
1136 static int prepare_read_message(struct ceph_connection *con)
1138 dout("prepare_read_message %p\n", con);
1139 BUG_ON(con->in_msg != NULL);
1140 con->in_base_pos = 0;
1141 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1146 static int read_partial(struct ceph_connection *con,
1147 int end, int size, void *object)
1149 while (con->in_base_pos < end) {
1150 int left = end - con->in_base_pos;
1151 int have = size - left;
1152 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1155 con->in_base_pos += ret;
1162 * Read all or part of the connect-side handshake on a new connection
1164 static int read_partial_banner(struct ceph_connection *con)
1170 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1173 size = strlen(CEPH_BANNER);
1175 ret = read_partial(con, end, size, con->in_banner);
1179 size = sizeof (con->actual_peer_addr);
1181 ret = read_partial(con, end, size, &con->actual_peer_addr);
1185 size = sizeof (con->peer_addr_for_me);
1187 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1195 static int read_partial_connect(struct ceph_connection *con)
1201 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1203 size = sizeof (con->in_reply);
1205 ret = read_partial(con, end, size, &con->in_reply);
1209 size = le32_to_cpu(con->in_reply.authorizer_len);
1211 ret = read_partial(con, end, size, con->auth_reply_buf);
1215 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1216 con, (int)con->in_reply.tag,
1217 le32_to_cpu(con->in_reply.connect_seq),
1218 le32_to_cpu(con->in_reply.global_seq));
1225 * Verify the hello banner looks okay.
1227 static int verify_hello(struct ceph_connection *con)
1229 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1230 pr_err("connect to %s got bad banner\n",
1231 ceph_pr_addr(&con->peer_addr.in_addr));
1232 con->error_msg = "protocol error, bad banner";
1238 static bool addr_is_blank(struct sockaddr_storage *ss)
1240 switch (ss->ss_family) {
1242 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1245 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1246 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1247 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1248 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1253 static int addr_port(struct sockaddr_storage *ss)
1255 switch (ss->ss_family) {
1257 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1259 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1264 static void addr_set_port(struct sockaddr_storage *ss, int p)
1266 switch (ss->ss_family) {
1268 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1271 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1277 * Unlike other *_pton function semantics, zero indicates success.
1279 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1280 char delim, const char **ipend)
1282 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1283 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1285 memset(ss, 0, sizeof(*ss));
1287 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1288 ss->ss_family = AF_INET;
1292 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1293 ss->ss_family = AF_INET6;
1301 * Extract hostname string and resolve using kernel DNS facility.
1303 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1304 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1305 struct sockaddr_storage *ss, char delim, const char **ipend)
1307 const char *end, *delim_p;
1308 char *colon_p, *ip_addr = NULL;
1312 * The end of the hostname occurs immediately preceding the delimiter or
1313 * the port marker (':') where the delimiter takes precedence.
1315 delim_p = memchr(name, delim, namelen);
1316 colon_p = memchr(name, ':', namelen);
1318 if (delim_p && colon_p)
1319 end = delim_p < colon_p ? delim_p : colon_p;
1320 else if (!delim_p && colon_p)
1324 if (!end) /* case: hostname:/ */
1325 end = name + namelen;
1331 /* do dns_resolve upcall */
1332 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1334 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1342 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1343 ret, ret ? "failed" : ceph_pr_addr(ss));
1348 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1349 struct sockaddr_storage *ss, char delim, const char **ipend)
1356 * Parse a server name (IP or hostname). If a valid IP address is not found
1357 * then try to extract a hostname to resolve using userspace DNS upcall.
1359 static int ceph_parse_server_name(const char *name, size_t namelen,
1360 struct sockaddr_storage *ss, char delim, const char **ipend)
1364 ret = ceph_pton(name, namelen, ss, delim, ipend);
1366 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1372 * Parse an ip[:port] list into an addr array. Use the default
1373 * monitor port if a port isn't specified.
1375 int ceph_parse_ips(const char *c, const char *end,
1376 struct ceph_entity_addr *addr,
1377 int max_count, int *count)
1379 int i, ret = -EINVAL;
1382 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1383 for (i = 0; i < max_count; i++) {
1385 struct sockaddr_storage *ss = &addr[i].in_addr;
1394 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1403 dout("missing matching ']'\n");
1410 if (p < end && *p == ':') {
1413 while (p < end && *p >= '0' && *p <= '9') {
1414 port = (port * 10) + (*p - '0');
1417 if (port > 65535 || port == 0)
1420 port = CEPH_MON_PORT;
1423 addr_set_port(ss, port);
1425 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1442 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1445 EXPORT_SYMBOL(ceph_parse_ips);
1447 static int process_banner(struct ceph_connection *con)
1449 dout("process_banner on %p\n", con);
1451 if (verify_hello(con) < 0)
1454 ceph_decode_addr(&con->actual_peer_addr);
1455 ceph_decode_addr(&con->peer_addr_for_me);
1458 * Make sure the other end is who we wanted. note that the other
1459 * end may not yet know their ip address, so if it's 0.0.0.0, give
1460 * them the benefit of the doubt.
1462 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1463 sizeof(con->peer_addr)) != 0 &&
1464 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1465 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1466 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1467 ceph_pr_addr(&con->peer_addr.in_addr),
1468 (int)le32_to_cpu(con->peer_addr.nonce),
1469 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1470 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1471 con->error_msg = "wrong peer at address";
1476 * did we learn our address?
1478 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1479 int port = addr_port(&con->msgr->inst.addr.in_addr);
1481 memcpy(&con->msgr->inst.addr.in_addr,
1482 &con->peer_addr_for_me.in_addr,
1483 sizeof(con->peer_addr_for_me.in_addr));
1484 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1485 encode_my_addr(con->msgr);
1486 dout("process_banner learned my addr is %s\n",
1487 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1493 static void fail_protocol(struct ceph_connection *con)
1495 reset_connection(con);
1496 set_bit(CLOSED, &con->state); /* in case there's queued work */
1499 static int process_connect(struct ceph_connection *con)
1501 u64 sup_feat = con->msgr->supported_features;
1502 u64 req_feat = con->msgr->required_features;
1503 u64 server_feat = le64_to_cpu(con->in_reply.features);
1506 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1508 switch (con->in_reply.tag) {
1509 case CEPH_MSGR_TAG_FEATURES:
1510 pr_err("%s%lld %s feature set mismatch,"
1511 " my %llx < server's %llx, missing %llx\n",
1512 ENTITY_NAME(con->peer_name),
1513 ceph_pr_addr(&con->peer_addr.in_addr),
1514 sup_feat, server_feat, server_feat & ~sup_feat);
1515 con->error_msg = "missing required protocol features";
1519 case CEPH_MSGR_TAG_BADPROTOVER:
1520 pr_err("%s%lld %s protocol version mismatch,"
1521 " my %d != server's %d\n",
1522 ENTITY_NAME(con->peer_name),
1523 ceph_pr_addr(&con->peer_addr.in_addr),
1524 le32_to_cpu(con->out_connect.protocol_version),
1525 le32_to_cpu(con->in_reply.protocol_version));
1526 con->error_msg = "protocol version mismatch";
1530 case CEPH_MSGR_TAG_BADAUTHORIZER:
1532 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1534 if (con->auth_retry == 2) {
1535 con->error_msg = "connect authorization failure";
1538 con->auth_retry = 1;
1539 ret = prepare_write_connect(con);
1542 prepare_read_connect(con);
1545 case CEPH_MSGR_TAG_RESETSESSION:
1547 * If we connected with a large connect_seq but the peer
1548 * has no record of a session with us (no connection, or
1549 * connect_seq == 0), they will send RESETSESION to indicate
1550 * that they must have reset their session, and may have
1553 dout("process_connect got RESET peer seq %u\n",
1554 le32_to_cpu(con->in_reply.connect_seq));
1555 pr_err("%s%lld %s connection reset\n",
1556 ENTITY_NAME(con->peer_name),
1557 ceph_pr_addr(&con->peer_addr.in_addr));
1558 reset_connection(con);
1559 ret = prepare_write_connect(con);
1562 prepare_read_connect(con);
1564 /* Tell ceph about it. */
1565 mutex_unlock(&con->mutex);
1566 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1567 if (con->ops->peer_reset)
1568 con->ops->peer_reset(con);
1569 mutex_lock(&con->mutex);
1570 if (test_bit(CLOSED, &con->state) ||
1571 test_bit(OPENING, &con->state))
1575 case CEPH_MSGR_TAG_RETRY_SESSION:
1577 * If we sent a smaller connect_seq than the peer has, try
1578 * again with a larger value.
1580 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
1581 le32_to_cpu(con->out_connect.connect_seq),
1582 le32_to_cpu(con->in_reply.connect_seq));
1583 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
1584 ret = prepare_write_connect(con);
1587 prepare_read_connect(con);
1590 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1592 * If we sent a smaller global_seq than the peer has, try
1593 * again with a larger value.
1595 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1596 con->peer_global_seq,
1597 le32_to_cpu(con->in_reply.global_seq));
1598 get_global_seq(con->msgr,
1599 le32_to_cpu(con->in_reply.global_seq));
1600 ret = prepare_write_connect(con);
1603 prepare_read_connect(con);
1606 case CEPH_MSGR_TAG_READY:
1607 if (req_feat & ~server_feat) {
1608 pr_err("%s%lld %s protocol feature mismatch,"
1609 " my required %llx > server's %llx, need %llx\n",
1610 ENTITY_NAME(con->peer_name),
1611 ceph_pr_addr(&con->peer_addr.in_addr),
1612 req_feat, server_feat, req_feat & ~server_feat);
1613 con->error_msg = "missing required protocol features";
1617 clear_bit(NEGOTIATING, &con->state);
1618 set_bit(CONNECTED, &con->state);
1619 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1621 con->peer_features = server_feat;
1622 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1623 con->peer_global_seq,
1624 le32_to_cpu(con->in_reply.connect_seq),
1626 WARN_ON(con->connect_seq !=
1627 le32_to_cpu(con->in_reply.connect_seq));
1629 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1630 set_bit(LOSSYTX, &con->flags);
1632 con->delay = 0; /* reset backoff memory */
1634 prepare_read_tag(con);
1637 case CEPH_MSGR_TAG_WAIT:
1639 * If there is a connection race (we are opening
1640 * connections to each other), one of us may just have
1641 * to WAIT. This shouldn't happen if we are the
1644 pr_err("process_connect got WAIT as client\n");
1645 con->error_msg = "protocol error, got WAIT as client";
1649 pr_err("connect protocol error, will retry\n");
1650 con->error_msg = "protocol error, garbage tag during connect";
1658 * read (part of) an ack
1660 static int read_partial_ack(struct ceph_connection *con)
1662 int size = sizeof (con->in_temp_ack);
1665 return read_partial(con, end, size, &con->in_temp_ack);
1670 * We can finally discard anything that's been acked.
1672 static void process_ack(struct ceph_connection *con)
1675 u64 ack = le64_to_cpu(con->in_temp_ack);
1678 while (!list_empty(&con->out_sent)) {
1679 m = list_first_entry(&con->out_sent, struct ceph_msg,
1681 seq = le64_to_cpu(m->hdr.seq);
1684 dout("got ack for seq %llu type %d at %p\n", seq,
1685 le16_to_cpu(m->hdr.type), m);
1686 m->ack_stamp = jiffies;
1689 prepare_read_tag(con);
1695 static int read_partial_message_section(struct ceph_connection *con,
1696 struct kvec *section,
1697 unsigned int sec_len, u32 *crc)
1703 while (section->iov_len < sec_len) {
1704 BUG_ON(section->iov_base == NULL);
1705 left = sec_len - section->iov_len;
1706 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1707 section->iov_len, left);
1710 section->iov_len += ret;
1712 if (section->iov_len == sec_len)
1713 *crc = crc32c(0, section->iov_base, section->iov_len);
1718 static bool ceph_con_in_msg_alloc(struct ceph_connection *con,
1719 struct ceph_msg_header *hdr);
1722 static int read_partial_message_pages(struct ceph_connection *con,
1723 struct page **pages,
1724 unsigned int data_len, bool do_datacrc)
1730 left = min((int)(data_len - con->in_msg_pos.data_pos),
1731 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1733 BUG_ON(pages == NULL);
1734 p = kmap(pages[con->in_msg_pos.page]);
1735 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1737 if (ret > 0 && do_datacrc)
1739 crc32c(con->in_data_crc,
1740 p + con->in_msg_pos.page_pos, ret);
1741 kunmap(pages[con->in_msg_pos.page]);
1744 con->in_msg_pos.data_pos += ret;
1745 con->in_msg_pos.page_pos += ret;
1746 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1747 con->in_msg_pos.page_pos = 0;
1748 con->in_msg_pos.page++;
1755 static int read_partial_message_bio(struct ceph_connection *con,
1756 struct bio **bio_iter, int *bio_seg,
1757 unsigned int data_len, bool do_datacrc)
1759 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1763 left = min((int)(data_len - con->in_msg_pos.data_pos),
1764 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1766 p = kmap(bv->bv_page) + bv->bv_offset;
1768 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1770 if (ret > 0 && do_datacrc)
1772 crc32c(con->in_data_crc,
1773 p + con->in_msg_pos.page_pos, ret);
1774 kunmap(bv->bv_page);
1777 con->in_msg_pos.data_pos += ret;
1778 con->in_msg_pos.page_pos += ret;
1779 if (con->in_msg_pos.page_pos == bv->bv_len) {
1780 con->in_msg_pos.page_pos = 0;
1781 iter_bio_next(bio_iter, bio_seg);
1789 * read (part of) a message.
1791 static int read_partial_message(struct ceph_connection *con)
1793 struct ceph_msg *m = con->in_msg;
1797 unsigned int front_len, middle_len, data_len;
1798 bool do_datacrc = !con->msgr->nocrc;
1802 dout("read_partial_message con %p msg %p\n", con, m);
1805 size = sizeof (con->in_hdr);
1807 ret = read_partial(con, end, size, &con->in_hdr);
1811 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
1812 if (cpu_to_le32(crc) != con->in_hdr.crc) {
1813 pr_err("read_partial_message bad hdr "
1814 " crc %u != expected %u\n",
1815 crc, con->in_hdr.crc);
1819 front_len = le32_to_cpu(con->in_hdr.front_len);
1820 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1822 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1823 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1825 data_len = le32_to_cpu(con->in_hdr.data_len);
1826 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1830 seq = le64_to_cpu(con->in_hdr.seq);
1831 if ((s64)seq - (s64)con->in_seq < 1) {
1832 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
1833 ENTITY_NAME(con->peer_name),
1834 ceph_pr_addr(&con->peer_addr.in_addr),
1835 seq, con->in_seq + 1);
1836 con->in_base_pos = -front_len - middle_len - data_len -
1838 con->in_tag = CEPH_MSGR_TAG_READY;
1840 } else if ((s64)seq - (s64)con->in_seq > 1) {
1841 pr_err("read_partial_message bad seq %lld expected %lld\n",
1842 seq, con->in_seq + 1);
1843 con->error_msg = "bad message sequence # for incoming message";
1847 /* allocate message? */
1849 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1850 con->in_hdr.front_len, con->in_hdr.data_len);
1851 if (ceph_con_in_msg_alloc(con, &con->in_hdr)) {
1852 /* skip this message */
1853 dout("alloc_msg said skip message\n");
1854 BUG_ON(con->in_msg);
1855 con->in_base_pos = -front_len - middle_len - data_len -
1857 con->in_tag = CEPH_MSGR_TAG_READY;
1863 "error allocating memory for incoming message";
1867 BUG_ON(con->in_msg->con != con);
1869 m->front.iov_len = 0; /* haven't read it yet */
1871 m->middle->vec.iov_len = 0;
1873 con->in_msg_pos.page = 0;
1875 con->in_msg_pos.page_pos = m->page_alignment;
1877 con->in_msg_pos.page_pos = 0;
1878 con->in_msg_pos.data_pos = 0;
1882 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1887 ret = read_partial_message_section(con, &m->front, front_len,
1888 &con->in_front_crc);
1894 ret = read_partial_message_section(con, &m->middle->vec,
1896 &con->in_middle_crc);
1902 while (con->in_msg_pos.data_pos < data_len) {
1904 ret = read_partial_message_pages(con, m->pages,
1905 data_len, do_datacrc);
1909 } else if (m->bio) {
1910 BUG_ON(!m->bio_iter);
1911 ret = read_partial_message_bio(con,
1912 &m->bio_iter, &m->bio_seg,
1913 data_len, do_datacrc);
1923 size = sizeof (m->footer);
1925 ret = read_partial(con, end, size, &m->footer);
1929 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1930 m, front_len, m->footer.front_crc, middle_len,
1931 m->footer.middle_crc, data_len, m->footer.data_crc);
1934 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1935 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1936 m, con->in_front_crc, m->footer.front_crc);
1939 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1940 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1941 m, con->in_middle_crc, m->footer.middle_crc);
1945 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1946 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1947 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1948 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1952 return 1; /* done! */
1956 * Process message. This happens in the worker thread. The callback should
1957 * be careful not to do anything that waits on other incoming messages or it
1960 static void process_message(struct ceph_connection *con)
1962 struct ceph_msg *msg;
1964 BUG_ON(con->in_msg->con != con);
1965 con->in_msg->con = NULL;
1970 /* if first message, set peer_name */
1971 if (con->peer_name.type == 0)
1972 con->peer_name = msg->hdr.src;
1975 mutex_unlock(&con->mutex);
1977 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1978 msg, le64_to_cpu(msg->hdr.seq),
1979 ENTITY_NAME(msg->hdr.src),
1980 le16_to_cpu(msg->hdr.type),
1981 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1982 le32_to_cpu(msg->hdr.front_len),
1983 le32_to_cpu(msg->hdr.data_len),
1984 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1985 con->ops->dispatch(con, msg);
1987 mutex_lock(&con->mutex);
1988 prepare_read_tag(con);
1993 * Write something to the socket. Called in a worker thread when the
1994 * socket appears to be writeable and we have something ready to send.
1996 static int try_write(struct ceph_connection *con)
2000 dout("try_write start %p state %lu\n", con, con->state);
2003 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2005 /* open the socket first? */
2006 if (con->sock == NULL) {
2007 set_bit(CONNECTING, &con->state);
2009 con_out_kvec_reset(con);
2010 prepare_write_banner(con);
2011 prepare_read_banner(con);
2013 BUG_ON(con->in_msg);
2014 con->in_tag = CEPH_MSGR_TAG_READY;
2015 dout("try_write initiating connect on %p new state %lu\n",
2017 ret = ceph_tcp_connect(con);
2019 con->error_msg = "connect error";
2025 /* kvec data queued? */
2026 if (con->out_skip) {
2027 ret = write_partial_skip(con);
2031 if (con->out_kvec_left) {
2032 ret = write_partial_kvec(con);
2039 if (con->out_msg_done) {
2040 ceph_msg_put(con->out_msg);
2041 con->out_msg = NULL; /* we're done with this one */
2045 ret = write_partial_msg_pages(con);
2047 goto more_kvec; /* we need to send the footer, too! */
2051 dout("try_write write_partial_msg_pages err %d\n",
2058 if (!test_bit(CONNECTING, &con->state) &&
2059 !test_bit(NEGOTIATING, &con->state)) {
2060 /* is anything else pending? */
2061 if (!list_empty(&con->out_queue)) {
2062 prepare_write_message(con);
2065 if (con->in_seq > con->in_seq_acked) {
2066 prepare_write_ack(con);
2069 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->flags)) {
2070 prepare_write_keepalive(con);
2075 /* Nothing to do! */
2076 clear_bit(WRITE_PENDING, &con->flags);
2077 dout("try_write nothing else to write.\n");
2080 dout("try_write done on %p ret %d\n", con, ret);
2087 * Read what we can from the socket.
2089 static int try_read(struct ceph_connection *con)
2096 if (test_bit(STANDBY, &con->state))
2099 dout("try_read start on %p\n", con);
2102 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2106 * process_connect and process_message drop and re-take
2107 * con->mutex. make sure we handle a racing close or reopen.
2109 if (test_bit(CLOSED, &con->state) ||
2110 test_bit(OPENING, &con->state)) {
2115 if (test_bit(CONNECTING, &con->state)) {
2116 dout("try_read connecting\n");
2117 ret = read_partial_banner(con);
2120 ret = process_banner(con);
2124 clear_bit(CONNECTING, &con->state);
2125 set_bit(NEGOTIATING, &con->state);
2127 /* Banner is good, exchange connection info */
2128 ret = prepare_write_connect(con);
2131 prepare_read_connect(con);
2133 /* Send connection info before awaiting response */
2137 if (test_bit(NEGOTIATING, &con->state)) {
2138 dout("try_read negotiating\n");
2139 ret = read_partial_connect(con);
2142 ret = process_connect(con);
2148 if (con->in_base_pos < 0) {
2150 * skipping + discarding content.
2152 * FIXME: there must be a better way to do this!
2154 static char buf[SKIP_BUF_SIZE];
2155 int skip = min((int) sizeof (buf), -con->in_base_pos);
2157 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2158 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2161 con->in_base_pos += ret;
2162 if (con->in_base_pos)
2165 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2169 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2172 dout("try_read got tag %d\n", (int)con->in_tag);
2173 switch (con->in_tag) {
2174 case CEPH_MSGR_TAG_MSG:
2175 prepare_read_message(con);
2177 case CEPH_MSGR_TAG_ACK:
2178 prepare_read_ack(con);
2180 case CEPH_MSGR_TAG_CLOSE:
2181 clear_bit(CONNECTED, &con->state);
2182 set_bit(CLOSED, &con->state); /* fixme */
2188 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2189 ret = read_partial_message(con);
2193 con->error_msg = "bad crc";
2197 con->error_msg = "io error";
2202 if (con->in_tag == CEPH_MSGR_TAG_READY)
2204 process_message(con);
2207 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
2208 ret = read_partial_ack(con);
2216 dout("try_read done on %p ret %d\n", con, ret);
2220 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2221 con->error_msg = "protocol error, garbage tag";
2228 * Atomically queue work on a connection. Bump @con reference to
2229 * avoid races with connection teardown.
2231 static void queue_con(struct ceph_connection *con)
2233 if (!con->ops->get(con)) {
2234 dout("queue_con %p ref count 0\n", con);
2238 if (!queue_delayed_work(ceph_msgr_wq, &con->work, 0)) {
2239 dout("queue_con %p - already queued\n", con);
2242 dout("queue_con %p\n", con);
2247 * Do some work on a connection. Drop a connection ref when we're done.
2249 static void con_work(struct work_struct *work)
2251 struct ceph_connection *con = container_of(work, struct ceph_connection,
2255 mutex_lock(&con->mutex);
2257 if (test_and_clear_bit(SOCK_CLOSED, &con->flags)) {
2258 if (test_and_clear_bit(CONNECTED, &con->state))
2259 con->error_msg = "socket closed";
2260 else if (test_and_clear_bit(NEGOTIATING, &con->state))
2261 con->error_msg = "negotiation failed";
2262 else if (test_and_clear_bit(CONNECTING, &con->state))
2263 con->error_msg = "connection failed";
2265 con->error_msg = "unrecognized con state";
2269 if (test_and_clear_bit(BACKOFF, &con->flags)) {
2270 dout("con_work %p backing off\n", con);
2271 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2272 round_jiffies_relative(con->delay))) {
2273 dout("con_work %p backoff %lu\n", con, con->delay);
2274 mutex_unlock(&con->mutex);
2278 dout("con_work %p FAILED to back off %lu\n", con,
2283 if (test_bit(STANDBY, &con->state)) {
2284 dout("con_work %p STANDBY\n", con);
2287 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
2288 dout("con_work CLOSED\n");
2289 con_close_socket(con);
2292 if (test_and_clear_bit(OPENING, &con->state)) {
2293 /* reopen w/ new peer */
2294 dout("con_work OPENING\n");
2295 con_close_socket(con);
2298 ret = try_read(con);
2302 con->error_msg = "socket error on read";
2306 ret = try_write(con);
2310 con->error_msg = "socket error on write";
2315 mutex_unlock(&con->mutex);
2321 mutex_unlock(&con->mutex);
2322 ceph_fault(con); /* error/fault path */
2328 * Generic error/fault handler. A retry mechanism is used with
2329 * exponential backoff
2331 static void ceph_fault(struct ceph_connection *con)
2333 mutex_lock(&con->mutex);
2335 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2336 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2337 dout("fault %p state %lu to peer %s\n",
2338 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2340 if (test_bit(CLOSED, &con->state))
2343 con_close_socket(con);
2345 if (test_bit(LOSSYTX, &con->flags)) {
2346 dout("fault on LOSSYTX channel\n");
2351 BUG_ON(con->in_msg->con != con);
2352 con->in_msg->con = NULL;
2353 ceph_msg_put(con->in_msg);
2358 /* Requeue anything that hasn't been acked */
2359 list_splice_init(&con->out_sent, &con->out_queue);
2361 /* If there are no messages queued or keepalive pending, place
2362 * the connection in a STANDBY state */
2363 if (list_empty(&con->out_queue) &&
2364 !test_bit(KEEPALIVE_PENDING, &con->flags)) {
2365 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2366 clear_bit(WRITE_PENDING, &con->flags);
2367 set_bit(STANDBY, &con->state);
2369 /* retry after a delay. */
2370 if (con->delay == 0)
2371 con->delay = BASE_DELAY_INTERVAL;
2372 else if (con->delay < MAX_DELAY_INTERVAL)
2375 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2376 round_jiffies_relative(con->delay))) {
2377 dout("fault queued %p delay %lu\n", con, con->delay);
2380 dout("fault failed to queue %p delay %lu, backoff\n",
2383 * In many cases we see a socket state change
2384 * while con_work is running and end up
2385 * queuing (non-delayed) work, such that we
2386 * can't backoff with a delay. Set a flag so
2387 * that when con_work restarts we schedule the
2390 set_bit(BACKOFF, &con->flags);
2395 mutex_unlock(&con->mutex);
2397 * in case we faulted due to authentication, invalidate our
2398 * current tickets so that we can get new ones.
2400 if (con->auth_retry && con->ops->invalidate_authorizer) {
2401 dout("calling invalidate_authorizer()\n");
2402 con->ops->invalidate_authorizer(con);
2405 if (con->ops->fault)
2406 con->ops->fault(con);
2412 * initialize a new messenger instance
2414 void ceph_messenger_init(struct ceph_messenger *msgr,
2415 struct ceph_entity_addr *myaddr,
2416 u32 supported_features,
2417 u32 required_features,
2420 msgr->supported_features = supported_features;
2421 msgr->required_features = required_features;
2423 spin_lock_init(&msgr->global_seq_lock);
2426 msgr->inst.addr = *myaddr;
2428 /* select a random nonce */
2429 msgr->inst.addr.type = 0;
2430 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2431 encode_my_addr(msgr);
2432 msgr->nocrc = nocrc;
2434 atomic_set(&msgr->stopping, 0);
2436 dout("%s %p\n", __func__, msgr);
2438 EXPORT_SYMBOL(ceph_messenger_init);
2440 static void clear_standby(struct ceph_connection *con)
2442 /* come back from STANDBY? */
2443 if (test_and_clear_bit(STANDBY, &con->state)) {
2444 mutex_lock(&con->mutex);
2445 dout("clear_standby %p and ++connect_seq\n", con);
2447 WARN_ON(test_bit(WRITE_PENDING, &con->flags));
2448 WARN_ON(test_bit(KEEPALIVE_PENDING, &con->flags));
2449 mutex_unlock(&con->mutex);
2454 * Queue up an outgoing message on the given connection.
2456 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2458 if (test_bit(CLOSED, &con->state)) {
2459 dout("con_send %p closed, dropping %p\n", con, msg);
2465 msg->hdr.src = con->msgr->inst.name;
2467 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2469 msg->needs_out_seq = true;
2472 mutex_lock(&con->mutex);
2474 BUG_ON(msg->con != NULL);
2475 msg->con = con->ops->get(con);
2476 BUG_ON(msg->con == NULL);
2478 BUG_ON(!list_empty(&msg->list_head));
2479 list_add_tail(&msg->list_head, &con->out_queue);
2480 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2481 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2482 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2483 le32_to_cpu(msg->hdr.front_len),
2484 le32_to_cpu(msg->hdr.middle_len),
2485 le32_to_cpu(msg->hdr.data_len));
2486 mutex_unlock(&con->mutex);
2488 /* if there wasn't anything waiting to send before, queue
2491 if (test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2494 EXPORT_SYMBOL(ceph_con_send);
2497 * Revoke a message that was previously queued for send
2499 void ceph_msg_revoke(struct ceph_msg *msg)
2501 struct ceph_connection *con = msg->con;
2504 return; /* Message not in our possession */
2506 mutex_lock(&con->mutex);
2507 if (!list_empty(&msg->list_head)) {
2508 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
2509 list_del_init(&msg->list_head);
2510 BUG_ON(msg->con == NULL);
2511 msg->con->ops->put(msg->con);
2517 if (con->out_msg == msg) {
2518 dout("%s %p msg %p - was sending\n", __func__, con, msg);
2519 con->out_msg = NULL;
2520 if (con->out_kvec_is_msg) {
2521 con->out_skip = con->out_kvec_bytes;
2522 con->out_kvec_is_msg = false;
2528 mutex_unlock(&con->mutex);
2532 * Revoke a message that we may be reading data into
2534 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
2536 struct ceph_connection *con;
2538 BUG_ON(msg == NULL);
2540 dout("%s msg %p null con\n", __func__, msg);
2542 return; /* Message not in our possession */
2546 mutex_lock(&con->mutex);
2547 if (con->in_msg == msg) {
2548 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
2549 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
2550 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
2552 /* skip rest of message */
2553 dout("%s %p msg %p revoked\n", __func__, con, msg);
2554 con->in_base_pos = con->in_base_pos -
2555 sizeof(struct ceph_msg_header) -
2559 sizeof(struct ceph_msg_footer);
2560 ceph_msg_put(con->in_msg);
2562 con->in_tag = CEPH_MSGR_TAG_READY;
2565 dout("%s %p in_msg %p msg %p no-op\n",
2566 __func__, con, con->in_msg, msg);
2568 mutex_unlock(&con->mutex);
2572 * Queue a keepalive byte to ensure the tcp connection is alive.
2574 void ceph_con_keepalive(struct ceph_connection *con)
2576 dout("con_keepalive %p\n", con);
2578 if (test_and_set_bit(KEEPALIVE_PENDING, &con->flags) == 0 &&
2579 test_and_set_bit(WRITE_PENDING, &con->flags) == 0)
2582 EXPORT_SYMBOL(ceph_con_keepalive);
2586 * construct a new message with given type, size
2587 * the new msg has a ref count of 1.
2589 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
2594 m = kmalloc(sizeof(*m), flags);
2597 kref_init(&m->kref);
2600 INIT_LIST_HEAD(&m->list_head);
2603 m->hdr.type = cpu_to_le16(type);
2604 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2606 m->hdr.front_len = cpu_to_le32(front_len);
2607 m->hdr.middle_len = 0;
2608 m->hdr.data_len = 0;
2609 m->hdr.data_off = 0;
2610 m->hdr.reserved = 0;
2611 m->footer.front_crc = 0;
2612 m->footer.middle_crc = 0;
2613 m->footer.data_crc = 0;
2614 m->footer.flags = 0;
2615 m->front_max = front_len;
2616 m->front_is_vmalloc = false;
2617 m->more_to_follow = false;
2626 m->page_alignment = 0;
2636 if (front_len > PAGE_CACHE_SIZE) {
2637 m->front.iov_base = __vmalloc(front_len, flags,
2639 m->front_is_vmalloc = true;
2641 m->front.iov_base = kmalloc(front_len, flags);
2643 if (m->front.iov_base == NULL) {
2644 dout("ceph_msg_new can't allocate %d bytes\n",
2649 m->front.iov_base = NULL;
2651 m->front.iov_len = front_len;
2653 dout("ceph_msg_new %p front %d\n", m, front_len);
2660 pr_err("msg_new can't create type %d front %d\n", type,
2664 dout("msg_new can't create type %d front %d\n", type,
2669 EXPORT_SYMBOL(ceph_msg_new);
2672 * Allocate "middle" portion of a message, if it is needed and wasn't
2673 * allocated by alloc_msg. This allows us to read a small fixed-size
2674 * per-type header in the front and then gracefully fail (i.e.,
2675 * propagate the error to the caller based on info in the front) when
2676 * the middle is too large.
2678 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2680 int type = le16_to_cpu(msg->hdr.type);
2681 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2683 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2684 ceph_msg_type_name(type), middle_len);
2685 BUG_ON(!middle_len);
2686 BUG_ON(msg->middle);
2688 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2695 * Allocate a message for receiving an incoming message on a
2696 * connection, and save the result in con->in_msg. Uses the
2697 * connection's private alloc_msg op if available.
2699 * Returns true if the message should be skipped, false otherwise.
2700 * If true is returned (skip message), con->in_msg will be NULL.
2701 * If false is returned, con->in_msg will contain a pointer to the
2702 * newly-allocated message, or NULL in case of memory exhaustion.
2704 static bool ceph_con_in_msg_alloc(struct ceph_connection *con,
2705 struct ceph_msg_header *hdr)
2707 int type = le16_to_cpu(hdr->type);
2708 int front_len = le32_to_cpu(hdr->front_len);
2709 int middle_len = le32_to_cpu(hdr->middle_len);
2712 BUG_ON(con->in_msg != NULL);
2714 if (con->ops->alloc_msg) {
2717 mutex_unlock(&con->mutex);
2718 con->in_msg = con->ops->alloc_msg(con, hdr, &skip);
2719 mutex_lock(&con->mutex);
2721 con->in_msg->con = con->ops->get(con);
2722 BUG_ON(con->in_msg->con == NULL);
2731 con->in_msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
2733 pr_err("unable to allocate msg type %d len %d\n",
2737 con->in_msg->con = con->ops->get(con);
2738 BUG_ON(con->in_msg->con == NULL);
2739 con->in_msg->page_alignment = le16_to_cpu(hdr->data_off);
2741 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2743 if (middle_len && !con->in_msg->middle) {
2744 ret = ceph_alloc_middle(con, con->in_msg);
2746 ceph_msg_put(con->in_msg);
2756 * Free a generically kmalloc'd message.
2758 void ceph_msg_kfree(struct ceph_msg *m)
2760 dout("msg_kfree %p\n", m);
2761 if (m->front_is_vmalloc)
2762 vfree(m->front.iov_base);
2764 kfree(m->front.iov_base);
2769 * Drop a msg ref. Destroy as needed.
2771 void ceph_msg_last_put(struct kref *kref)
2773 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2775 dout("ceph_msg_put last one on %p\n", m);
2776 WARN_ON(!list_empty(&m->list_head));
2778 /* drop middle, data, if any */
2780 ceph_buffer_put(m->middle);
2787 ceph_pagelist_release(m->pagelist);
2795 ceph_msgpool_put(m->pool, m);
2799 EXPORT_SYMBOL(ceph_msg_last_put);
2801 void ceph_msg_dump(struct ceph_msg *msg)
2803 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2804 msg->front_max, msg->nr_pages);
2805 print_hex_dump(KERN_DEBUG, "header: ",
2806 DUMP_PREFIX_OFFSET, 16, 1,
2807 &msg->hdr, sizeof(msg->hdr), true);
2808 print_hex_dump(KERN_DEBUG, " front: ",
2809 DUMP_PREFIX_OFFSET, 16, 1,
2810 msg->front.iov_base, msg->front.iov_len, true);
2812 print_hex_dump(KERN_DEBUG, "middle: ",
2813 DUMP_PREFIX_OFFSET, 16, 1,
2814 msg->middle->vec.iov_base,
2815 msg->middle->vec.iov_len, true);
2816 print_hex_dump(KERN_DEBUG, "footer: ",
2817 DUMP_PREFIX_OFFSET, 16, 1,
2818 &msg->footer, sizeof(msg->footer), true);
2820 EXPORT_SYMBOL(ceph_msg_dump);