1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
13 #include <linux/bio.h>
14 #endif /* CONFIG_BLOCK */
15 #include <linux/dns_resolver.h>
18 #include <linux/ceph/ceph_features.h>
19 #include <linux/ceph/libceph.h>
20 #include <linux/ceph/messenger.h>
21 #include <linux/ceph/decode.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/export.h>
25 #define list_entry_next(pos, member) \
26 list_entry(pos->member.next, typeof(*pos), member)
29 * Ceph uses the messenger to exchange ceph_msg messages with other
30 * hosts in the system. The messenger provides ordered and reliable
31 * delivery. We tolerate TCP disconnects by reconnecting (with
32 * exponential backoff) in the case of a fault (disconnection, bad
33 * crc, protocol error). Acks allow sent messages to be discarded by
38 * We track the state of the socket on a given connection using
39 * values defined below. The transition to a new socket state is
40 * handled by a function which verifies we aren't coming from an
44 * | NEW* | transient initial state
46 * | con_sock_state_init()
49 * | CLOSED | initialized, but no socket (and no
50 * ---------- TCP connection)
52 * | \ con_sock_state_connecting()
53 * | ----------------------
55 * + con_sock_state_closed() \
56 * |+--------------------------- \
59 * | | CLOSING | socket event; \ \
60 * | ----------- await close \ \
63 * | + con_sock_state_closing() \ |
65 * | / --------------- | |
68 * | / -----------------| CONNECTING | socket created, TCP
69 * | | / -------------- connect initiated
70 * | | | con_sock_state_connected()
73 * | CONNECTED | TCP connection established
76 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
79 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
80 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
81 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
82 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
83 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
88 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
89 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
90 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
91 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
92 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
93 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
96 * ceph_connection flag bits
98 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
99 * messages on errors */
100 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
101 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
102 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
103 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
105 static bool con_flag_valid(unsigned long con_flag)
108 case CON_FLAG_LOSSYTX:
109 case CON_FLAG_KEEPALIVE_PENDING:
110 case CON_FLAG_WRITE_PENDING:
111 case CON_FLAG_SOCK_CLOSED:
112 case CON_FLAG_BACKOFF:
119 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
121 BUG_ON(!con_flag_valid(con_flag));
123 clear_bit(con_flag, &con->flags);
126 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
128 BUG_ON(!con_flag_valid(con_flag));
130 set_bit(con_flag, &con->flags);
133 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
135 BUG_ON(!con_flag_valid(con_flag));
137 return test_bit(con_flag, &con->flags);
140 static bool con_flag_test_and_clear(struct ceph_connection *con,
141 unsigned long con_flag)
143 BUG_ON(!con_flag_valid(con_flag));
145 return test_and_clear_bit(con_flag, &con->flags);
148 static bool con_flag_test_and_set(struct ceph_connection *con,
149 unsigned long con_flag)
151 BUG_ON(!con_flag_valid(con_flag));
153 return test_and_set_bit(con_flag, &con->flags);
156 /* Slab caches for frequently-allocated structures */
158 static struct kmem_cache *ceph_msg_cache;
159 static struct kmem_cache *ceph_msg_data_cache;
161 /* static tag bytes (protocol control messages) */
162 static char tag_msg = CEPH_MSGR_TAG_MSG;
163 static char tag_ack = CEPH_MSGR_TAG_ACK;
164 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
166 #ifdef CONFIG_LOCKDEP
167 static struct lock_class_key socket_class;
171 * When skipping (ignoring) a block of input we read it into a "skip
172 * buffer," which is this many bytes in size.
174 #define SKIP_BUF_SIZE 1024
176 static void queue_con(struct ceph_connection *con);
177 static void cancel_con(struct ceph_connection *con);
178 static void con_work(struct work_struct *);
179 static void con_fault(struct ceph_connection *con);
182 * Nicely render a sockaddr as a string. An array of formatted
183 * strings is used, to approximate reentrancy.
185 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
186 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
187 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
188 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
190 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
191 static atomic_t addr_str_seq = ATOMIC_INIT(0);
193 static struct page *zero_page; /* used in certain error cases */
195 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
199 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
200 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
202 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
205 switch (ss->ss_family) {
207 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
208 ntohs(in4->sin_port));
212 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
213 ntohs(in6->sin6_port));
217 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
223 EXPORT_SYMBOL(ceph_pr_addr);
225 static void encode_my_addr(struct ceph_messenger *msgr)
227 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
228 ceph_encode_addr(&msgr->my_enc_addr);
232 * work queue for all reading and writing to/from the socket.
234 static struct workqueue_struct *ceph_msgr_wq;
236 static int ceph_msgr_slab_init(void)
238 BUG_ON(ceph_msg_cache);
239 ceph_msg_cache = kmem_cache_create("ceph_msg",
240 sizeof (struct ceph_msg),
241 __alignof__(struct ceph_msg), 0, NULL);
246 BUG_ON(ceph_msg_data_cache);
247 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data",
248 sizeof (struct ceph_msg_data),
249 __alignof__(struct ceph_msg_data),
251 if (ceph_msg_data_cache)
254 kmem_cache_destroy(ceph_msg_cache);
255 ceph_msg_cache = NULL;
260 static void ceph_msgr_slab_exit(void)
262 BUG_ON(!ceph_msg_data_cache);
263 kmem_cache_destroy(ceph_msg_data_cache);
264 ceph_msg_data_cache = NULL;
266 BUG_ON(!ceph_msg_cache);
267 kmem_cache_destroy(ceph_msg_cache);
268 ceph_msg_cache = NULL;
271 static void _ceph_msgr_exit(void)
274 destroy_workqueue(ceph_msgr_wq);
278 ceph_msgr_slab_exit();
280 BUG_ON(zero_page == NULL);
282 page_cache_release(zero_page);
286 int ceph_msgr_init(void)
288 BUG_ON(zero_page != NULL);
289 zero_page = ZERO_PAGE(0);
290 page_cache_get(zero_page);
292 if (ceph_msgr_slab_init())
296 * The number of active work items is limited by the number of
297 * connections, so leave @max_active at default.
299 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
303 pr_err("msgr_init failed to create workqueue\n");
308 EXPORT_SYMBOL(ceph_msgr_init);
310 void ceph_msgr_exit(void)
312 BUG_ON(ceph_msgr_wq == NULL);
316 EXPORT_SYMBOL(ceph_msgr_exit);
318 void ceph_msgr_flush(void)
320 flush_workqueue(ceph_msgr_wq);
322 EXPORT_SYMBOL(ceph_msgr_flush);
324 /* Connection socket state transition functions */
326 static void con_sock_state_init(struct ceph_connection *con)
330 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
331 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
332 printk("%s: unexpected old state %d\n", __func__, old_state);
333 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
334 CON_SOCK_STATE_CLOSED);
337 static void con_sock_state_connecting(struct ceph_connection *con)
341 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
342 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
343 printk("%s: unexpected old state %d\n", __func__, old_state);
344 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
345 CON_SOCK_STATE_CONNECTING);
348 static void con_sock_state_connected(struct ceph_connection *con)
352 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
353 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
354 printk("%s: unexpected old state %d\n", __func__, old_state);
355 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
356 CON_SOCK_STATE_CONNECTED);
359 static void con_sock_state_closing(struct ceph_connection *con)
363 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
364 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
365 old_state != CON_SOCK_STATE_CONNECTED &&
366 old_state != CON_SOCK_STATE_CLOSING))
367 printk("%s: unexpected old state %d\n", __func__, old_state);
368 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
369 CON_SOCK_STATE_CLOSING);
372 static void con_sock_state_closed(struct ceph_connection *con)
376 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
377 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
378 old_state != CON_SOCK_STATE_CLOSING &&
379 old_state != CON_SOCK_STATE_CONNECTING &&
380 old_state != CON_SOCK_STATE_CLOSED))
381 printk("%s: unexpected old state %d\n", __func__, old_state);
382 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
383 CON_SOCK_STATE_CLOSED);
387 * socket callback functions
390 /* data available on socket, or listen socket received a connect */
391 static void ceph_sock_data_ready(struct sock *sk)
393 struct ceph_connection *con = sk->sk_user_data;
394 if (atomic_read(&con->msgr->stopping)) {
398 if (sk->sk_state != TCP_CLOSE_WAIT) {
399 dout("%s on %p state = %lu, queueing work\n", __func__,
405 /* socket has buffer space for writing */
406 static void ceph_sock_write_space(struct sock *sk)
408 struct ceph_connection *con = sk->sk_user_data;
410 /* only queue to workqueue if there is data we want to write,
411 * and there is sufficient space in the socket buffer to accept
412 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
413 * doesn't get called again until try_write() fills the socket
414 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
415 * and net/core/stream.c:sk_stream_write_space().
417 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
418 if (sk_stream_is_writeable(sk)) {
419 dout("%s %p queueing write work\n", __func__, con);
420 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
424 dout("%s %p nothing to write\n", __func__, con);
428 /* socket's state has changed */
429 static void ceph_sock_state_change(struct sock *sk)
431 struct ceph_connection *con = sk->sk_user_data;
433 dout("%s %p state = %lu sk_state = %u\n", __func__,
434 con, con->state, sk->sk_state);
436 switch (sk->sk_state) {
438 dout("%s TCP_CLOSE\n", __func__);
440 dout("%s TCP_CLOSE_WAIT\n", __func__);
441 con_sock_state_closing(con);
442 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
445 case TCP_ESTABLISHED:
446 dout("%s TCP_ESTABLISHED\n", __func__);
447 con_sock_state_connected(con);
450 default: /* Everything else is uninteresting */
456 * set up socket callbacks
458 static void set_sock_callbacks(struct socket *sock,
459 struct ceph_connection *con)
461 struct sock *sk = sock->sk;
462 sk->sk_user_data = con;
463 sk->sk_data_ready = ceph_sock_data_ready;
464 sk->sk_write_space = ceph_sock_write_space;
465 sk->sk_state_change = ceph_sock_state_change;
474 * initiate connection to a remote socket.
476 static int ceph_tcp_connect(struct ceph_connection *con)
478 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
483 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
487 sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
489 #ifdef CONFIG_LOCKDEP
490 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
493 set_sock_callbacks(sock, con);
495 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
497 con_sock_state_connecting(con);
498 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
500 if (ret == -EINPROGRESS) {
501 dout("connect %s EINPROGRESS sk_state = %u\n",
502 ceph_pr_addr(&con->peer_addr.in_addr),
504 } else if (ret < 0) {
505 pr_err("connect %s error %d\n",
506 ceph_pr_addr(&con->peer_addr.in_addr), ret);
508 con->error_msg = "connect error";
513 if (con->msgr->tcp_nodelay) {
516 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
517 (char *)&optval, sizeof(optval));
519 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
523 sk_set_memalloc(sock->sk);
529 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
531 struct kvec iov = {buf, len};
532 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
535 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
541 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
542 int page_offset, size_t length)
547 BUG_ON(page_offset + length > PAGE_SIZE);
551 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length);
558 * write something. @more is true if caller will be sending more data
561 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
562 size_t kvlen, size_t len, int more)
564 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
568 msg.msg_flags |= MSG_MORE;
570 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
572 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
578 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
579 int offset, size_t size, bool more)
581 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
584 ret = kernel_sendpage(sock, page, offset, size, flags);
591 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
592 int offset, size_t size, bool more)
597 /* sendpage cannot properly handle pages with page_count == 0,
598 * we need to fallback to sendmsg if that's the case */
599 if (page_count(page) >= 1)
600 return __ceph_tcp_sendpage(sock, page, offset, size, more);
602 iov.iov_base = kmap(page) + offset;
604 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
611 * Shutdown/close the socket for the given connection.
613 static int con_close_socket(struct ceph_connection *con)
617 dout("con_close_socket on %p sock %p\n", con, con->sock);
619 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
620 sock_release(con->sock);
625 * Forcibly clear the SOCK_CLOSED flag. It gets set
626 * independent of the connection mutex, and we could have
627 * received a socket close event before we had the chance to
628 * shut the socket down.
630 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
632 con_sock_state_closed(con);
637 * Reset a connection. Discard all incoming and outgoing messages
638 * and clear *_seq state.
640 static void ceph_msg_remove(struct ceph_msg *msg)
642 list_del_init(&msg->list_head);
643 BUG_ON(msg->con == NULL);
644 msg->con->ops->put(msg->con);
649 static void ceph_msg_remove_list(struct list_head *head)
651 while (!list_empty(head)) {
652 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
654 ceph_msg_remove(msg);
658 static void reset_connection(struct ceph_connection *con)
660 /* reset connection, out_queue, msg_ and connect_seq */
661 /* discard existing out_queue and msg_seq */
662 dout("reset_connection %p\n", con);
663 ceph_msg_remove_list(&con->out_queue);
664 ceph_msg_remove_list(&con->out_sent);
667 BUG_ON(con->in_msg->con != con);
668 con->in_msg->con = NULL;
669 ceph_msg_put(con->in_msg);
674 con->connect_seq = 0;
677 ceph_msg_put(con->out_msg);
681 con->in_seq_acked = 0;
685 * mark a peer down. drop any open connections.
687 void ceph_con_close(struct ceph_connection *con)
689 mutex_lock(&con->mutex);
690 dout("con_close %p peer %s\n", con,
691 ceph_pr_addr(&con->peer_addr.in_addr));
692 con->state = CON_STATE_CLOSED;
694 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
695 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
696 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
697 con_flag_clear(con, CON_FLAG_BACKOFF);
699 reset_connection(con);
700 con->peer_global_seq = 0;
702 con_close_socket(con);
703 mutex_unlock(&con->mutex);
705 EXPORT_SYMBOL(ceph_con_close);
708 * Reopen a closed connection, with a new peer address.
710 void ceph_con_open(struct ceph_connection *con,
711 __u8 entity_type, __u64 entity_num,
712 struct ceph_entity_addr *addr)
714 mutex_lock(&con->mutex);
715 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
717 WARN_ON(con->state != CON_STATE_CLOSED);
718 con->state = CON_STATE_PREOPEN;
720 con->peer_name.type = (__u8) entity_type;
721 con->peer_name.num = cpu_to_le64(entity_num);
723 memcpy(&con->peer_addr, addr, sizeof(*addr));
724 con->delay = 0; /* reset backoff memory */
725 mutex_unlock(&con->mutex);
728 EXPORT_SYMBOL(ceph_con_open);
731 * return true if this connection ever successfully opened
733 bool ceph_con_opened(struct ceph_connection *con)
735 return con->connect_seq > 0;
739 * initialize a new connection.
741 void ceph_con_init(struct ceph_connection *con, void *private,
742 const struct ceph_connection_operations *ops,
743 struct ceph_messenger *msgr)
745 dout("con_init %p\n", con);
746 memset(con, 0, sizeof(*con));
747 con->private = private;
751 con_sock_state_init(con);
753 mutex_init(&con->mutex);
754 INIT_LIST_HEAD(&con->out_queue);
755 INIT_LIST_HEAD(&con->out_sent);
756 INIT_DELAYED_WORK(&con->work, con_work);
758 con->state = CON_STATE_CLOSED;
760 EXPORT_SYMBOL(ceph_con_init);
764 * We maintain a global counter to order connection attempts. Get
765 * a unique seq greater than @gt.
767 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
771 spin_lock(&msgr->global_seq_lock);
772 if (msgr->global_seq < gt)
773 msgr->global_seq = gt;
774 ret = ++msgr->global_seq;
775 spin_unlock(&msgr->global_seq_lock);
779 static void con_out_kvec_reset(struct ceph_connection *con)
781 con->out_kvec_left = 0;
782 con->out_kvec_bytes = 0;
783 con->out_kvec_cur = &con->out_kvec[0];
786 static void con_out_kvec_add(struct ceph_connection *con,
787 size_t size, void *data)
791 index = con->out_kvec_left;
792 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
794 con->out_kvec[index].iov_len = size;
795 con->out_kvec[index].iov_base = data;
796 con->out_kvec_left++;
797 con->out_kvec_bytes += size;
803 * For a bio data item, a piece is whatever remains of the next
804 * entry in the current bio iovec, or the first entry in the next
807 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
810 struct ceph_msg_data *data = cursor->data;
813 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
818 cursor->resid = min(length, data->bio_length);
820 cursor->bvec_iter = bio->bi_iter;
822 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
825 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
829 struct ceph_msg_data *data = cursor->data;
831 struct bio_vec bio_vec;
833 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
838 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
840 *page_offset = (size_t) bio_vec.bv_offset;
841 BUG_ON(*page_offset >= PAGE_SIZE);
842 if (cursor->last_piece) /* pagelist offset is always 0 */
843 *length = cursor->resid;
845 *length = (size_t) bio_vec.bv_len;
846 BUG_ON(*length > cursor->resid);
847 BUG_ON(*page_offset + *length > PAGE_SIZE);
849 return bio_vec.bv_page;
852 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
856 struct bio_vec bio_vec;
858 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
863 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
865 /* Advance the cursor offset */
867 BUG_ON(cursor->resid < bytes);
868 cursor->resid -= bytes;
870 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
872 if (bytes < bio_vec.bv_len)
873 return false; /* more bytes to process in this segment */
875 /* Move on to the next segment, and possibly the next bio */
877 if (!cursor->bvec_iter.bi_size) {
881 cursor->bvec_iter = bio->bi_iter;
883 memset(&cursor->bvec_iter, 0,
884 sizeof(cursor->bvec_iter));
887 if (!cursor->last_piece) {
888 BUG_ON(!cursor->resid);
890 /* A short read is OK, so use <= rather than == */
891 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
892 cursor->last_piece = true;
897 #endif /* CONFIG_BLOCK */
900 * For a page array, a piece comes from the first page in the array
901 * that has not already been fully consumed.
903 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
906 struct ceph_msg_data *data = cursor->data;
909 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
911 BUG_ON(!data->pages);
912 BUG_ON(!data->length);
914 cursor->resid = min(length, data->length);
915 page_count = calc_pages_for(data->alignment, (u64)data->length);
916 cursor->page_offset = data->alignment & ~PAGE_MASK;
917 cursor->page_index = 0;
918 BUG_ON(page_count > (int)USHRT_MAX);
919 cursor->page_count = (unsigned short)page_count;
920 BUG_ON(length > SIZE_MAX - cursor->page_offset);
921 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
925 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
926 size_t *page_offset, size_t *length)
928 struct ceph_msg_data *data = cursor->data;
930 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
932 BUG_ON(cursor->page_index >= cursor->page_count);
933 BUG_ON(cursor->page_offset >= PAGE_SIZE);
935 *page_offset = cursor->page_offset;
936 if (cursor->last_piece)
937 *length = cursor->resid;
939 *length = PAGE_SIZE - *page_offset;
941 return data->pages[cursor->page_index];
944 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
947 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
949 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
951 /* Advance the cursor page offset */
953 cursor->resid -= bytes;
954 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
955 if (!bytes || cursor->page_offset)
956 return false; /* more bytes to process in the current page */
959 return false; /* no more data */
961 /* Move on to the next page; offset is already at 0 */
963 BUG_ON(cursor->page_index >= cursor->page_count);
964 cursor->page_index++;
965 cursor->last_piece = cursor->resid <= PAGE_SIZE;
971 * For a pagelist, a piece is whatever remains to be consumed in the
972 * first page in the list, or the front of the next page.
975 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
978 struct ceph_msg_data *data = cursor->data;
979 struct ceph_pagelist *pagelist;
982 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
984 pagelist = data->pagelist;
988 return; /* pagelist can be assigned but empty */
990 BUG_ON(list_empty(&pagelist->head));
991 page = list_first_entry(&pagelist->head, struct page, lru);
993 cursor->resid = min(length, pagelist->length);
996 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1000 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1001 size_t *page_offset, size_t *length)
1003 struct ceph_msg_data *data = cursor->data;
1004 struct ceph_pagelist *pagelist;
1006 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1008 pagelist = data->pagelist;
1011 BUG_ON(!cursor->page);
1012 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1014 /* offset of first page in pagelist is always 0 */
1015 *page_offset = cursor->offset & ~PAGE_MASK;
1016 if (cursor->last_piece)
1017 *length = cursor->resid;
1019 *length = PAGE_SIZE - *page_offset;
1021 return cursor->page;
1024 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1027 struct ceph_msg_data *data = cursor->data;
1028 struct ceph_pagelist *pagelist;
1030 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1032 pagelist = data->pagelist;
1035 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1036 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1038 /* Advance the cursor offset */
1040 cursor->resid -= bytes;
1041 cursor->offset += bytes;
1042 /* offset of first page in pagelist is always 0 */
1043 if (!bytes || cursor->offset & ~PAGE_MASK)
1044 return false; /* more bytes to process in the current page */
1047 return false; /* no more data */
1049 /* Move on to the next page */
1051 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1052 cursor->page = list_entry_next(cursor->page, lru);
1053 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1059 * Message data is handled (sent or received) in pieces, where each
1060 * piece resides on a single page. The network layer might not
1061 * consume an entire piece at once. A data item's cursor keeps
1062 * track of which piece is next to process and how much remains to
1063 * be processed in that piece. It also tracks whether the current
1064 * piece is the last one in the data item.
1066 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1068 size_t length = cursor->total_resid;
1070 switch (cursor->data->type) {
1071 case CEPH_MSG_DATA_PAGELIST:
1072 ceph_msg_data_pagelist_cursor_init(cursor, length);
1074 case CEPH_MSG_DATA_PAGES:
1075 ceph_msg_data_pages_cursor_init(cursor, length);
1078 case CEPH_MSG_DATA_BIO:
1079 ceph_msg_data_bio_cursor_init(cursor, length);
1081 #endif /* CONFIG_BLOCK */
1082 case CEPH_MSG_DATA_NONE:
1087 cursor->need_crc = true;
1090 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1092 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1093 struct ceph_msg_data *data;
1096 BUG_ON(length > msg->data_length);
1097 BUG_ON(list_empty(&msg->data));
1099 cursor->data_head = &msg->data;
1100 cursor->total_resid = length;
1101 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1102 cursor->data = data;
1104 __ceph_msg_data_cursor_init(cursor);
1108 * Return the page containing the next piece to process for a given
1109 * data item, and supply the page offset and length of that piece.
1110 * Indicate whether this is the last piece in this data item.
1112 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1113 size_t *page_offset, size_t *length,
1118 switch (cursor->data->type) {
1119 case CEPH_MSG_DATA_PAGELIST:
1120 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1122 case CEPH_MSG_DATA_PAGES:
1123 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1126 case CEPH_MSG_DATA_BIO:
1127 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1129 #endif /* CONFIG_BLOCK */
1130 case CEPH_MSG_DATA_NONE:
1136 BUG_ON(*page_offset + *length > PAGE_SIZE);
1139 *last_piece = cursor->last_piece;
1145 * Returns true if the result moves the cursor on to the next piece
1148 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1153 BUG_ON(bytes > cursor->resid);
1154 switch (cursor->data->type) {
1155 case CEPH_MSG_DATA_PAGELIST:
1156 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1158 case CEPH_MSG_DATA_PAGES:
1159 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1162 case CEPH_MSG_DATA_BIO:
1163 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1165 #endif /* CONFIG_BLOCK */
1166 case CEPH_MSG_DATA_NONE:
1171 cursor->total_resid -= bytes;
1173 if (!cursor->resid && cursor->total_resid) {
1174 WARN_ON(!cursor->last_piece);
1175 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
1176 cursor->data = list_entry_next(cursor->data, links);
1177 __ceph_msg_data_cursor_init(cursor);
1180 cursor->need_crc = new_piece;
1185 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1190 /* Initialize data cursor */
1192 ceph_msg_data_cursor_init(msg, (size_t)data_len);
1196 * Prepare footer for currently outgoing message, and finish things
1197 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1199 static void prepare_write_message_footer(struct ceph_connection *con)
1201 struct ceph_msg *m = con->out_msg;
1202 int v = con->out_kvec_left;
1204 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1206 dout("prepare_write_message_footer %p\n", con);
1207 con->out_kvec_is_msg = true;
1208 con->out_kvec[v].iov_base = &m->footer;
1209 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1210 if (con->ops->sign_message)
1211 con->ops->sign_message(con, m);
1214 con->out_kvec[v].iov_len = sizeof(m->footer);
1215 con->out_kvec_bytes += sizeof(m->footer);
1217 m->old_footer.flags = m->footer.flags;
1218 con->out_kvec[v].iov_len = sizeof(m->old_footer);
1219 con->out_kvec_bytes += sizeof(m->old_footer);
1221 con->out_kvec_left++;
1222 con->out_more = m->more_to_follow;
1223 con->out_msg_done = true;
1227 * Prepare headers for the next outgoing message.
1229 static void prepare_write_message(struct ceph_connection *con)
1234 con_out_kvec_reset(con);
1235 con->out_kvec_is_msg = true;
1236 con->out_msg_done = false;
1238 /* Sneak an ack in there first? If we can get it into the same
1239 * TCP packet that's a good thing. */
1240 if (con->in_seq > con->in_seq_acked) {
1241 con->in_seq_acked = con->in_seq;
1242 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1243 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1244 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1245 &con->out_temp_ack);
1248 BUG_ON(list_empty(&con->out_queue));
1249 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1251 BUG_ON(m->con != con);
1253 /* put message on sent list */
1255 list_move_tail(&m->list_head, &con->out_sent);
1258 * only assign outgoing seq # if we haven't sent this message
1259 * yet. if it is requeued, resend with it's original seq.
1261 if (m->needs_out_seq) {
1262 m->hdr.seq = cpu_to_le64(++con->out_seq);
1263 m->needs_out_seq = false;
1265 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1267 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1268 m, con->out_seq, le16_to_cpu(m->hdr.type),
1269 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1271 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
1273 /* tag + hdr + front + middle */
1274 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1275 con_out_kvec_add(con, sizeof (m->hdr), &m->hdr);
1276 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1279 con_out_kvec_add(con, m->middle->vec.iov_len,
1280 m->middle->vec.iov_base);
1282 /* fill in crc (except data pages), footer */
1283 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1284 con->out_msg->hdr.crc = cpu_to_le32(crc);
1285 con->out_msg->footer.flags = 0;
1287 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1288 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1290 crc = crc32c(0, m->middle->vec.iov_base,
1291 m->middle->vec.iov_len);
1292 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1294 con->out_msg->footer.middle_crc = 0;
1295 dout("%s front_crc %u middle_crc %u\n", __func__,
1296 le32_to_cpu(con->out_msg->footer.front_crc),
1297 le32_to_cpu(con->out_msg->footer.middle_crc));
1299 /* is there a data payload? */
1300 con->out_msg->footer.data_crc = 0;
1301 if (m->data_length) {
1302 prepare_message_data(con->out_msg, m->data_length);
1303 con->out_more = 1; /* data + footer will follow */
1305 /* no, queue up footer too and be done */
1306 prepare_write_message_footer(con);
1309 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1315 static void prepare_write_ack(struct ceph_connection *con)
1317 dout("prepare_write_ack %p %llu -> %llu\n", con,
1318 con->in_seq_acked, con->in_seq);
1319 con->in_seq_acked = con->in_seq;
1321 con_out_kvec_reset(con);
1323 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1325 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1326 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1327 &con->out_temp_ack);
1329 con->out_more = 1; /* more will follow.. eventually.. */
1330 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1334 * Prepare to share the seq during handshake
1336 static void prepare_write_seq(struct ceph_connection *con)
1338 dout("prepare_write_seq %p %llu -> %llu\n", con,
1339 con->in_seq_acked, con->in_seq);
1340 con->in_seq_acked = con->in_seq;
1342 con_out_kvec_reset(con);
1344 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1345 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1346 &con->out_temp_ack);
1348 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1352 * Prepare to write keepalive byte.
1354 static void prepare_write_keepalive(struct ceph_connection *con)
1356 dout("prepare_write_keepalive %p\n", con);
1357 con_out_kvec_reset(con);
1358 con_out_kvec_add(con, sizeof (tag_keepalive), &tag_keepalive);
1359 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1363 * Connection negotiation.
1366 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1369 struct ceph_auth_handshake *auth;
1371 if (!con->ops->get_authorizer) {
1372 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1373 con->out_connect.authorizer_len = 0;
1377 /* Can't hold the mutex while getting authorizer */
1378 mutex_unlock(&con->mutex);
1379 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1380 mutex_lock(&con->mutex);
1384 if (con->state != CON_STATE_NEGOTIATING)
1385 return ERR_PTR(-EAGAIN);
1387 con->auth_reply_buf = auth->authorizer_reply_buf;
1388 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1393 * We connected to a peer and are saying hello.
1395 static void prepare_write_banner(struct ceph_connection *con)
1397 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1398 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1399 &con->msgr->my_enc_addr);
1402 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1405 static int prepare_write_connect(struct ceph_connection *con)
1407 unsigned int global_seq = get_global_seq(con->msgr, 0);
1410 struct ceph_auth_handshake *auth;
1412 switch (con->peer_name.type) {
1413 case CEPH_ENTITY_TYPE_MON:
1414 proto = CEPH_MONC_PROTOCOL;
1416 case CEPH_ENTITY_TYPE_OSD:
1417 proto = CEPH_OSDC_PROTOCOL;
1419 case CEPH_ENTITY_TYPE_MDS:
1420 proto = CEPH_MDSC_PROTOCOL;
1426 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1427 con->connect_seq, global_seq, proto);
1429 con->out_connect.features = cpu_to_le64(con->msgr->supported_features);
1430 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1431 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1432 con->out_connect.global_seq = cpu_to_le32(global_seq);
1433 con->out_connect.protocol_version = cpu_to_le32(proto);
1434 con->out_connect.flags = 0;
1436 auth_proto = CEPH_AUTH_UNKNOWN;
1437 auth = get_connect_authorizer(con, &auth_proto);
1439 return PTR_ERR(auth);
1441 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1442 con->out_connect.authorizer_len = auth ?
1443 cpu_to_le32(auth->authorizer_buf_len) : 0;
1445 con_out_kvec_add(con, sizeof (con->out_connect),
1447 if (auth && auth->authorizer_buf_len)
1448 con_out_kvec_add(con, auth->authorizer_buf_len,
1449 auth->authorizer_buf);
1452 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1458 * write as much of pending kvecs to the socket as we can.
1460 * 0 -> socket full, but more to do
1463 static int write_partial_kvec(struct ceph_connection *con)
1467 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1468 while (con->out_kvec_bytes > 0) {
1469 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1470 con->out_kvec_left, con->out_kvec_bytes,
1474 con->out_kvec_bytes -= ret;
1475 if (con->out_kvec_bytes == 0)
1478 /* account for full iov entries consumed */
1479 while (ret >= con->out_kvec_cur->iov_len) {
1480 BUG_ON(!con->out_kvec_left);
1481 ret -= con->out_kvec_cur->iov_len;
1482 con->out_kvec_cur++;
1483 con->out_kvec_left--;
1485 /* and for a partially-consumed entry */
1487 con->out_kvec_cur->iov_len -= ret;
1488 con->out_kvec_cur->iov_base += ret;
1491 con->out_kvec_left = 0;
1492 con->out_kvec_is_msg = false;
1495 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1496 con->out_kvec_bytes, con->out_kvec_left, ret);
1497 return ret; /* done! */
1500 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1501 unsigned int page_offset,
1502 unsigned int length)
1507 BUG_ON(kaddr == NULL);
1508 crc = crc32c(crc, kaddr + page_offset, length);
1514 * Write as much message data payload as we can. If we finish, queue
1516 * 1 -> done, footer is now queued in out_kvec[].
1517 * 0 -> socket full, but more to do
1520 static int write_partial_message_data(struct ceph_connection *con)
1522 struct ceph_msg *msg = con->out_msg;
1523 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1524 bool do_datacrc = !con->msgr->nocrc;
1527 dout("%s %p msg %p\n", __func__, con, msg);
1529 if (list_empty(&msg->data))
1533 * Iterate through each page that contains data to be
1534 * written, and send as much as possible for each.
1536 * If we are calculating the data crc (the default), we will
1537 * need to map the page. If we have no pages, they have
1538 * been revoked, so use the zero page.
1540 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1541 while (cursor->resid) {
1549 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length,
1551 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1552 length, last_piece);
1555 msg->footer.data_crc = cpu_to_le32(crc);
1559 if (do_datacrc && cursor->need_crc)
1560 crc = ceph_crc32c_page(crc, page, page_offset, length);
1561 need_crc = ceph_msg_data_advance(&msg->cursor, (size_t)ret);
1564 dout("%s %p msg %p done\n", __func__, con, msg);
1566 /* prepare and queue up footer, too */
1568 msg->footer.data_crc = cpu_to_le32(crc);
1570 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1571 con_out_kvec_reset(con);
1572 prepare_write_message_footer(con);
1574 return 1; /* must return > 0 to indicate success */
1580 static int write_partial_skip(struct ceph_connection *con)
1584 while (con->out_skip > 0) {
1585 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1587 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1590 con->out_skip -= ret;
1598 * Prepare to read connection handshake, or an ack.
1600 static void prepare_read_banner(struct ceph_connection *con)
1602 dout("prepare_read_banner %p\n", con);
1603 con->in_base_pos = 0;
1606 static void prepare_read_connect(struct ceph_connection *con)
1608 dout("prepare_read_connect %p\n", con);
1609 con->in_base_pos = 0;
1612 static void prepare_read_ack(struct ceph_connection *con)
1614 dout("prepare_read_ack %p\n", con);
1615 con->in_base_pos = 0;
1618 static void prepare_read_seq(struct ceph_connection *con)
1620 dout("prepare_read_seq %p\n", con);
1621 con->in_base_pos = 0;
1622 con->in_tag = CEPH_MSGR_TAG_SEQ;
1625 static void prepare_read_tag(struct ceph_connection *con)
1627 dout("prepare_read_tag %p\n", con);
1628 con->in_base_pos = 0;
1629 con->in_tag = CEPH_MSGR_TAG_READY;
1633 * Prepare to read a message.
1635 static int prepare_read_message(struct ceph_connection *con)
1637 dout("prepare_read_message %p\n", con);
1638 BUG_ON(con->in_msg != NULL);
1639 con->in_base_pos = 0;
1640 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1645 static int read_partial(struct ceph_connection *con,
1646 int end, int size, void *object)
1648 while (con->in_base_pos < end) {
1649 int left = end - con->in_base_pos;
1650 int have = size - left;
1651 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1654 con->in_base_pos += ret;
1661 * Read all or part of the connect-side handshake on a new connection
1663 static int read_partial_banner(struct ceph_connection *con)
1669 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1672 size = strlen(CEPH_BANNER);
1674 ret = read_partial(con, end, size, con->in_banner);
1678 size = sizeof (con->actual_peer_addr);
1680 ret = read_partial(con, end, size, &con->actual_peer_addr);
1684 size = sizeof (con->peer_addr_for_me);
1686 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1694 static int read_partial_connect(struct ceph_connection *con)
1700 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1702 size = sizeof (con->in_reply);
1704 ret = read_partial(con, end, size, &con->in_reply);
1708 size = le32_to_cpu(con->in_reply.authorizer_len);
1710 ret = read_partial(con, end, size, con->auth_reply_buf);
1714 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1715 con, (int)con->in_reply.tag,
1716 le32_to_cpu(con->in_reply.connect_seq),
1717 le32_to_cpu(con->in_reply.global_seq));
1724 * Verify the hello banner looks okay.
1726 static int verify_hello(struct ceph_connection *con)
1728 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1729 pr_err("connect to %s got bad banner\n",
1730 ceph_pr_addr(&con->peer_addr.in_addr));
1731 con->error_msg = "protocol error, bad banner";
1737 static bool addr_is_blank(struct sockaddr_storage *ss)
1739 switch (ss->ss_family) {
1741 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1744 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1745 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1746 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1747 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1752 static int addr_port(struct sockaddr_storage *ss)
1754 switch (ss->ss_family) {
1756 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1758 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1763 static void addr_set_port(struct sockaddr_storage *ss, int p)
1765 switch (ss->ss_family) {
1767 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1770 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1776 * Unlike other *_pton function semantics, zero indicates success.
1778 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1779 char delim, const char **ipend)
1781 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1782 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1784 memset(ss, 0, sizeof(*ss));
1786 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1787 ss->ss_family = AF_INET;
1791 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1792 ss->ss_family = AF_INET6;
1800 * Extract hostname string and resolve using kernel DNS facility.
1802 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1803 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1804 struct sockaddr_storage *ss, char delim, const char **ipend)
1806 const char *end, *delim_p;
1807 char *colon_p, *ip_addr = NULL;
1811 * The end of the hostname occurs immediately preceding the delimiter or
1812 * the port marker (':') where the delimiter takes precedence.
1814 delim_p = memchr(name, delim, namelen);
1815 colon_p = memchr(name, ':', namelen);
1817 if (delim_p && colon_p)
1818 end = delim_p < colon_p ? delim_p : colon_p;
1819 else if (!delim_p && colon_p)
1823 if (!end) /* case: hostname:/ */
1824 end = name + namelen;
1830 /* do dns_resolve upcall */
1831 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1833 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1841 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1842 ret, ret ? "failed" : ceph_pr_addr(ss));
1847 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1848 struct sockaddr_storage *ss, char delim, const char **ipend)
1855 * Parse a server name (IP or hostname). If a valid IP address is not found
1856 * then try to extract a hostname to resolve using userspace DNS upcall.
1858 static int ceph_parse_server_name(const char *name, size_t namelen,
1859 struct sockaddr_storage *ss, char delim, const char **ipend)
1863 ret = ceph_pton(name, namelen, ss, delim, ipend);
1865 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1871 * Parse an ip[:port] list into an addr array. Use the default
1872 * monitor port if a port isn't specified.
1874 int ceph_parse_ips(const char *c, const char *end,
1875 struct ceph_entity_addr *addr,
1876 int max_count, int *count)
1878 int i, ret = -EINVAL;
1881 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1882 for (i = 0; i < max_count; i++) {
1884 struct sockaddr_storage *ss = &addr[i].in_addr;
1893 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1902 dout("missing matching ']'\n");
1909 if (p < end && *p == ':') {
1912 while (p < end && *p >= '0' && *p <= '9') {
1913 port = (port * 10) + (*p - '0');
1917 port = CEPH_MON_PORT;
1918 else if (port > 65535)
1921 port = CEPH_MON_PORT;
1924 addr_set_port(ss, port);
1926 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1943 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1946 EXPORT_SYMBOL(ceph_parse_ips);
1948 static int process_banner(struct ceph_connection *con)
1950 dout("process_banner on %p\n", con);
1952 if (verify_hello(con) < 0)
1955 ceph_decode_addr(&con->actual_peer_addr);
1956 ceph_decode_addr(&con->peer_addr_for_me);
1959 * Make sure the other end is who we wanted. note that the other
1960 * end may not yet know their ip address, so if it's 0.0.0.0, give
1961 * them the benefit of the doubt.
1963 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1964 sizeof(con->peer_addr)) != 0 &&
1965 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1966 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1967 pr_warn("wrong peer, want %s/%d, got %s/%d\n",
1968 ceph_pr_addr(&con->peer_addr.in_addr),
1969 (int)le32_to_cpu(con->peer_addr.nonce),
1970 ceph_pr_addr(&con->actual_peer_addr.in_addr),
1971 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1972 con->error_msg = "wrong peer at address";
1977 * did we learn our address?
1979 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1980 int port = addr_port(&con->msgr->inst.addr.in_addr);
1982 memcpy(&con->msgr->inst.addr.in_addr,
1983 &con->peer_addr_for_me.in_addr,
1984 sizeof(con->peer_addr_for_me.in_addr));
1985 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1986 encode_my_addr(con->msgr);
1987 dout("process_banner learned my addr is %s\n",
1988 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
1994 static int process_connect(struct ceph_connection *con)
1996 u64 sup_feat = con->msgr->supported_features;
1997 u64 req_feat = con->msgr->required_features;
1998 u64 server_feat = ceph_sanitize_features(
1999 le64_to_cpu(con->in_reply.features));
2002 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2004 switch (con->in_reply.tag) {
2005 case CEPH_MSGR_TAG_FEATURES:
2006 pr_err("%s%lld %s feature set mismatch,"
2007 " my %llx < server's %llx, missing %llx\n",
2008 ENTITY_NAME(con->peer_name),
2009 ceph_pr_addr(&con->peer_addr.in_addr),
2010 sup_feat, server_feat, server_feat & ~sup_feat);
2011 con->error_msg = "missing required protocol features";
2012 reset_connection(con);
2015 case CEPH_MSGR_TAG_BADPROTOVER:
2016 pr_err("%s%lld %s protocol version mismatch,"
2017 " my %d != server's %d\n",
2018 ENTITY_NAME(con->peer_name),
2019 ceph_pr_addr(&con->peer_addr.in_addr),
2020 le32_to_cpu(con->out_connect.protocol_version),
2021 le32_to_cpu(con->in_reply.protocol_version));
2022 con->error_msg = "protocol version mismatch";
2023 reset_connection(con);
2026 case CEPH_MSGR_TAG_BADAUTHORIZER:
2028 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2030 if (con->auth_retry == 2) {
2031 con->error_msg = "connect authorization failure";
2034 con_out_kvec_reset(con);
2035 ret = prepare_write_connect(con);
2038 prepare_read_connect(con);
2041 case CEPH_MSGR_TAG_RESETSESSION:
2043 * If we connected with a large connect_seq but the peer
2044 * has no record of a session with us (no connection, or
2045 * connect_seq == 0), they will send RESETSESION to indicate
2046 * that they must have reset their session, and may have
2049 dout("process_connect got RESET peer seq %u\n",
2050 le32_to_cpu(con->in_reply.connect_seq));
2051 pr_err("%s%lld %s connection reset\n",
2052 ENTITY_NAME(con->peer_name),
2053 ceph_pr_addr(&con->peer_addr.in_addr));
2054 reset_connection(con);
2055 con_out_kvec_reset(con);
2056 ret = prepare_write_connect(con);
2059 prepare_read_connect(con);
2061 /* Tell ceph about it. */
2062 mutex_unlock(&con->mutex);
2063 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2064 if (con->ops->peer_reset)
2065 con->ops->peer_reset(con);
2066 mutex_lock(&con->mutex);
2067 if (con->state != CON_STATE_NEGOTIATING)
2071 case CEPH_MSGR_TAG_RETRY_SESSION:
2073 * If we sent a smaller connect_seq than the peer has, try
2074 * again with a larger value.
2076 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2077 le32_to_cpu(con->out_connect.connect_seq),
2078 le32_to_cpu(con->in_reply.connect_seq));
2079 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2080 con_out_kvec_reset(con);
2081 ret = prepare_write_connect(con);
2084 prepare_read_connect(con);
2087 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2089 * If we sent a smaller global_seq than the peer has, try
2090 * again with a larger value.
2092 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2093 con->peer_global_seq,
2094 le32_to_cpu(con->in_reply.global_seq));
2095 get_global_seq(con->msgr,
2096 le32_to_cpu(con->in_reply.global_seq));
2097 con_out_kvec_reset(con);
2098 ret = prepare_write_connect(con);
2101 prepare_read_connect(con);
2104 case CEPH_MSGR_TAG_SEQ:
2105 case CEPH_MSGR_TAG_READY:
2106 if (req_feat & ~server_feat) {
2107 pr_err("%s%lld %s protocol feature mismatch,"
2108 " my required %llx > server's %llx, need %llx\n",
2109 ENTITY_NAME(con->peer_name),
2110 ceph_pr_addr(&con->peer_addr.in_addr),
2111 req_feat, server_feat, req_feat & ~server_feat);
2112 con->error_msg = "missing required protocol features";
2113 reset_connection(con);
2117 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2118 con->state = CON_STATE_OPEN;
2119 con->auth_retry = 0; /* we authenticated; clear flag */
2120 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2122 con->peer_features = server_feat;
2123 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2124 con->peer_global_seq,
2125 le32_to_cpu(con->in_reply.connect_seq),
2127 WARN_ON(con->connect_seq !=
2128 le32_to_cpu(con->in_reply.connect_seq));
2130 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2131 con_flag_set(con, CON_FLAG_LOSSYTX);
2133 con->delay = 0; /* reset backoff memory */
2135 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2136 prepare_write_seq(con);
2137 prepare_read_seq(con);
2139 prepare_read_tag(con);
2143 case CEPH_MSGR_TAG_WAIT:
2145 * If there is a connection race (we are opening
2146 * connections to each other), one of us may just have
2147 * to WAIT. This shouldn't happen if we are the
2150 pr_err("process_connect got WAIT as client\n");
2151 con->error_msg = "protocol error, got WAIT as client";
2155 pr_err("connect protocol error, will retry\n");
2156 con->error_msg = "protocol error, garbage tag during connect";
2164 * read (part of) an ack
2166 static int read_partial_ack(struct ceph_connection *con)
2168 int size = sizeof (con->in_temp_ack);
2171 return read_partial(con, end, size, &con->in_temp_ack);
2175 * We can finally discard anything that's been acked.
2177 static void process_ack(struct ceph_connection *con)
2180 u64 ack = le64_to_cpu(con->in_temp_ack);
2183 while (!list_empty(&con->out_sent)) {
2184 m = list_first_entry(&con->out_sent, struct ceph_msg,
2186 seq = le64_to_cpu(m->hdr.seq);
2189 dout("got ack for seq %llu type %d at %p\n", seq,
2190 le16_to_cpu(m->hdr.type), m);
2191 m->ack_stamp = jiffies;
2194 prepare_read_tag(con);
2198 static int read_partial_message_section(struct ceph_connection *con,
2199 struct kvec *section,
2200 unsigned int sec_len, u32 *crc)
2206 while (section->iov_len < sec_len) {
2207 BUG_ON(section->iov_base == NULL);
2208 left = sec_len - section->iov_len;
2209 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2210 section->iov_len, left);
2213 section->iov_len += ret;
2215 if (section->iov_len == sec_len)
2216 *crc = crc32c(0, section->iov_base, section->iov_len);
2221 static int read_partial_msg_data(struct ceph_connection *con)
2223 struct ceph_msg *msg = con->in_msg;
2224 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2225 const bool do_datacrc = !con->msgr->nocrc;
2233 if (list_empty(&msg->data))
2237 crc = con->in_data_crc;
2238 while (cursor->resid) {
2239 page = ceph_msg_data_next(&msg->cursor, &page_offset, &length,
2241 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2244 con->in_data_crc = crc;
2250 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2251 (void) ceph_msg_data_advance(&msg->cursor, (size_t)ret);
2254 con->in_data_crc = crc;
2256 return 1; /* must return > 0 to indicate success */
2260 * read (part of) a message.
2262 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2264 static int read_partial_message(struct ceph_connection *con)
2266 struct ceph_msg *m = con->in_msg;
2270 unsigned int front_len, middle_len, data_len;
2271 bool do_datacrc = !con->msgr->nocrc;
2272 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2276 dout("read_partial_message con %p msg %p\n", con, m);
2279 size = sizeof (con->in_hdr);
2281 ret = read_partial(con, end, size, &con->in_hdr);
2285 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2286 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2287 pr_err("read_partial_message bad hdr "
2288 " crc %u != expected %u\n",
2289 crc, con->in_hdr.crc);
2293 front_len = le32_to_cpu(con->in_hdr.front_len);
2294 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2296 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2297 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2299 data_len = le32_to_cpu(con->in_hdr.data_len);
2300 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2304 seq = le64_to_cpu(con->in_hdr.seq);
2305 if ((s64)seq - (s64)con->in_seq < 1) {
2306 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2307 ENTITY_NAME(con->peer_name),
2308 ceph_pr_addr(&con->peer_addr.in_addr),
2309 seq, con->in_seq + 1);
2310 con->in_base_pos = -front_len - middle_len - data_len -
2312 con->in_tag = CEPH_MSGR_TAG_READY;
2314 } else if ((s64)seq - (s64)con->in_seq > 1) {
2315 pr_err("read_partial_message bad seq %lld expected %lld\n",
2316 seq, con->in_seq + 1);
2317 con->error_msg = "bad message sequence # for incoming message";
2321 /* allocate message? */
2325 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2326 front_len, data_len);
2327 ret = ceph_con_in_msg_alloc(con, &skip);
2331 BUG_ON(!con->in_msg ^ skip);
2332 if (con->in_msg && data_len > con->in_msg->data_length) {
2333 pr_warn("%s skipping long message (%u > %zd)\n",
2334 __func__, data_len, con->in_msg->data_length);
2335 ceph_msg_put(con->in_msg);
2340 /* skip this message */
2341 dout("alloc_msg said skip message\n");
2342 con->in_base_pos = -front_len - middle_len - data_len -
2344 con->in_tag = CEPH_MSGR_TAG_READY;
2349 BUG_ON(!con->in_msg);
2350 BUG_ON(con->in_msg->con != con);
2352 m->front.iov_len = 0; /* haven't read it yet */
2354 m->middle->vec.iov_len = 0;
2356 /* prepare for data payload, if any */
2359 prepare_message_data(con->in_msg, data_len);
2363 ret = read_partial_message_section(con, &m->front, front_len,
2364 &con->in_front_crc);
2370 ret = read_partial_message_section(con, &m->middle->vec,
2372 &con->in_middle_crc);
2379 ret = read_partial_msg_data(con);
2386 size = sizeof(m->footer);
2388 size = sizeof(m->old_footer);
2391 ret = read_partial(con, end, size, &m->footer);
2396 m->footer.flags = m->old_footer.flags;
2400 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2401 m, front_len, m->footer.front_crc, middle_len,
2402 m->footer.middle_crc, data_len, m->footer.data_crc);
2405 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2406 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2407 m, con->in_front_crc, m->footer.front_crc);
2410 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2411 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2412 m, con->in_middle_crc, m->footer.middle_crc);
2416 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2417 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2418 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2419 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2423 if (need_sign && con->ops->check_message_signature &&
2424 con->ops->check_message_signature(con, m)) {
2425 pr_err("read_partial_message %p signature check failed\n", m);
2429 return 1; /* done! */
2433 * Process message. This happens in the worker thread. The callback should
2434 * be careful not to do anything that waits on other incoming messages or it
2437 static void process_message(struct ceph_connection *con)
2439 struct ceph_msg *msg;
2441 BUG_ON(con->in_msg->con != con);
2442 con->in_msg->con = NULL;
2447 /* if first message, set peer_name */
2448 if (con->peer_name.type == 0)
2449 con->peer_name = msg->hdr.src;
2452 mutex_unlock(&con->mutex);
2454 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2455 msg, le64_to_cpu(msg->hdr.seq),
2456 ENTITY_NAME(msg->hdr.src),
2457 le16_to_cpu(msg->hdr.type),
2458 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2459 le32_to_cpu(msg->hdr.front_len),
2460 le32_to_cpu(msg->hdr.data_len),
2461 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2462 con->ops->dispatch(con, msg);
2464 mutex_lock(&con->mutex);
2469 * Write something to the socket. Called in a worker thread when the
2470 * socket appears to be writeable and we have something ready to send.
2472 static int try_write(struct ceph_connection *con)
2476 dout("try_write start %p state %lu\n", con, con->state);
2479 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2481 /* open the socket first? */
2482 if (con->state == CON_STATE_PREOPEN) {
2484 con->state = CON_STATE_CONNECTING;
2486 con_out_kvec_reset(con);
2487 prepare_write_banner(con);
2488 prepare_read_banner(con);
2490 BUG_ON(con->in_msg);
2491 con->in_tag = CEPH_MSGR_TAG_READY;
2492 dout("try_write initiating connect on %p new state %lu\n",
2494 ret = ceph_tcp_connect(con);
2496 con->error_msg = "connect error";
2502 /* kvec data queued? */
2503 if (con->out_skip) {
2504 ret = write_partial_skip(con);
2508 if (con->out_kvec_left) {
2509 ret = write_partial_kvec(con);
2516 if (con->out_msg_done) {
2517 ceph_msg_put(con->out_msg);
2518 con->out_msg = NULL; /* we're done with this one */
2522 ret = write_partial_message_data(con);
2524 goto more_kvec; /* we need to send the footer, too! */
2528 dout("try_write write_partial_message_data err %d\n",
2535 if (con->state == CON_STATE_OPEN) {
2536 /* is anything else pending? */
2537 if (!list_empty(&con->out_queue)) {
2538 prepare_write_message(con);
2541 if (con->in_seq > con->in_seq_acked) {
2542 prepare_write_ack(con);
2545 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2546 prepare_write_keepalive(con);
2551 /* Nothing to do! */
2552 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2553 dout("try_write nothing else to write.\n");
2556 dout("try_write done on %p ret %d\n", con, ret);
2563 * Read what we can from the socket.
2565 static int try_read(struct ceph_connection *con)
2570 dout("try_read start on %p state %lu\n", con, con->state);
2571 if (con->state != CON_STATE_CONNECTING &&
2572 con->state != CON_STATE_NEGOTIATING &&
2573 con->state != CON_STATE_OPEN)
2578 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2581 if (con->state == CON_STATE_CONNECTING) {
2582 dout("try_read connecting\n");
2583 ret = read_partial_banner(con);
2586 ret = process_banner(con);
2590 con->state = CON_STATE_NEGOTIATING;
2593 * Received banner is good, exchange connection info.
2594 * Do not reset out_kvec, as sending our banner raced
2595 * with receiving peer banner after connect completed.
2597 ret = prepare_write_connect(con);
2600 prepare_read_connect(con);
2602 /* Send connection info before awaiting response */
2606 if (con->state == CON_STATE_NEGOTIATING) {
2607 dout("try_read negotiating\n");
2608 ret = read_partial_connect(con);
2611 ret = process_connect(con);
2617 WARN_ON(con->state != CON_STATE_OPEN);
2619 if (con->in_base_pos < 0) {
2621 * skipping + discarding content.
2623 * FIXME: there must be a better way to do this!
2625 static char buf[SKIP_BUF_SIZE];
2626 int skip = min((int) sizeof (buf), -con->in_base_pos);
2628 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2629 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2632 con->in_base_pos += ret;
2633 if (con->in_base_pos)
2636 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2640 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2643 dout("try_read got tag %d\n", (int)con->in_tag);
2644 switch (con->in_tag) {
2645 case CEPH_MSGR_TAG_MSG:
2646 prepare_read_message(con);
2648 case CEPH_MSGR_TAG_ACK:
2649 prepare_read_ack(con);
2651 case CEPH_MSGR_TAG_CLOSE:
2652 con_close_socket(con);
2653 con->state = CON_STATE_CLOSED;
2659 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2660 ret = read_partial_message(con);
2664 con->error_msg = "bad crc";
2668 con->error_msg = "io error";
2673 if (con->in_tag == CEPH_MSGR_TAG_READY)
2675 process_message(con);
2676 if (con->state == CON_STATE_OPEN)
2677 prepare_read_tag(con);
2680 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2681 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2683 * the final handshake seq exchange is semantically
2684 * equivalent to an ACK
2686 ret = read_partial_ack(con);
2694 dout("try_read done on %p ret %d\n", con, ret);
2698 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2699 con->error_msg = "protocol error, garbage tag";
2706 * Atomically queue work on a connection after the specified delay.
2707 * Bump @con reference to avoid races with connection teardown.
2708 * Returns 0 if work was queued, or an error code otherwise.
2710 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2712 if (!con->ops->get(con)) {
2713 dout("%s %p ref count 0\n", __func__, con);
2717 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2718 dout("%s %p - already queued\n", __func__, con);
2723 dout("%s %p %lu\n", __func__, con, delay);
2727 static void queue_con(struct ceph_connection *con)
2729 (void) queue_con_delay(con, 0);
2732 static void cancel_con(struct ceph_connection *con)
2734 if (cancel_delayed_work(&con->work)) {
2735 dout("%s %p\n", __func__, con);
2740 static bool con_sock_closed(struct ceph_connection *con)
2742 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2746 case CON_STATE_ ## x: \
2747 con->error_msg = "socket closed (con state " #x ")"; \
2750 switch (con->state) {
2758 pr_warn("%s con %p unrecognized state %lu\n",
2759 __func__, con, con->state);
2760 con->error_msg = "unrecognized con state";
2769 static bool con_backoff(struct ceph_connection *con)
2773 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2776 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2778 dout("%s: con %p FAILED to back off %lu\n", __func__,
2780 BUG_ON(ret == -ENOENT);
2781 con_flag_set(con, CON_FLAG_BACKOFF);
2787 /* Finish fault handling; con->mutex must *not* be held here */
2789 static void con_fault_finish(struct ceph_connection *con)
2792 * in case we faulted due to authentication, invalidate our
2793 * current tickets so that we can get new ones.
2795 if (con->auth_retry && con->ops->invalidate_authorizer) {
2796 dout("calling invalidate_authorizer()\n");
2797 con->ops->invalidate_authorizer(con);
2800 if (con->ops->fault)
2801 con->ops->fault(con);
2805 * Do some work on a connection. Drop a connection ref when we're done.
2807 static void con_work(struct work_struct *work)
2809 struct ceph_connection *con = container_of(work, struct ceph_connection,
2811 unsigned long pflags = current->flags;
2814 current->flags |= PF_MEMALLOC;
2816 mutex_lock(&con->mutex);
2820 if ((fault = con_sock_closed(con))) {
2821 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2824 if (con_backoff(con)) {
2825 dout("%s: con %p BACKOFF\n", __func__, con);
2828 if (con->state == CON_STATE_STANDBY) {
2829 dout("%s: con %p STANDBY\n", __func__, con);
2832 if (con->state == CON_STATE_CLOSED) {
2833 dout("%s: con %p CLOSED\n", __func__, con);
2837 if (con->state == CON_STATE_PREOPEN) {
2838 dout("%s: con %p PREOPEN\n", __func__, con);
2842 ret = try_read(con);
2846 con->error_msg = "socket error on read";
2851 ret = try_write(con);
2855 con->error_msg = "socket error on write";
2859 break; /* If we make it to here, we're done */
2863 mutex_unlock(&con->mutex);
2866 con_fault_finish(con);
2870 tsk_restore_flags(current, pflags, PF_MEMALLOC);
2874 * Generic error/fault handler. A retry mechanism is used with
2875 * exponential backoff
2877 static void con_fault(struct ceph_connection *con)
2879 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2880 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2881 dout("fault %p state %lu to peer %s\n",
2882 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2884 WARN_ON(con->state != CON_STATE_CONNECTING &&
2885 con->state != CON_STATE_NEGOTIATING &&
2886 con->state != CON_STATE_OPEN);
2888 con_close_socket(con);
2890 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
2891 dout("fault on LOSSYTX channel, marking CLOSED\n");
2892 con->state = CON_STATE_CLOSED;
2897 BUG_ON(con->in_msg->con != con);
2898 con->in_msg->con = NULL;
2899 ceph_msg_put(con->in_msg);
2904 /* Requeue anything that hasn't been acked */
2905 list_splice_init(&con->out_sent, &con->out_queue);
2907 /* If there are no messages queued or keepalive pending, place
2908 * the connection in a STANDBY state */
2909 if (list_empty(&con->out_queue) &&
2910 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
2911 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2912 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2913 con->state = CON_STATE_STANDBY;
2915 /* retry after a delay. */
2916 con->state = CON_STATE_PREOPEN;
2917 if (con->delay == 0)
2918 con->delay = BASE_DELAY_INTERVAL;
2919 else if (con->delay < MAX_DELAY_INTERVAL)
2921 con_flag_set(con, CON_FLAG_BACKOFF);
2929 * initialize a new messenger instance
2931 void ceph_messenger_init(struct ceph_messenger *msgr,
2932 struct ceph_entity_addr *myaddr,
2933 u64 supported_features,
2934 u64 required_features,
2938 msgr->supported_features = supported_features;
2939 msgr->required_features = required_features;
2941 spin_lock_init(&msgr->global_seq_lock);
2944 msgr->inst.addr = *myaddr;
2946 /* select a random nonce */
2947 msgr->inst.addr.type = 0;
2948 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2949 encode_my_addr(msgr);
2950 msgr->nocrc = nocrc;
2951 msgr->tcp_nodelay = tcp_nodelay;
2953 atomic_set(&msgr->stopping, 0);
2955 dout("%s %p\n", __func__, msgr);
2957 EXPORT_SYMBOL(ceph_messenger_init);
2959 static void clear_standby(struct ceph_connection *con)
2961 /* come back from STANDBY? */
2962 if (con->state == CON_STATE_STANDBY) {
2963 dout("clear_standby %p and ++connect_seq\n", con);
2964 con->state = CON_STATE_PREOPEN;
2966 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
2967 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
2972 * Queue up an outgoing message on the given connection.
2974 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2977 msg->hdr.src = con->msgr->inst.name;
2978 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2979 msg->needs_out_seq = true;
2981 mutex_lock(&con->mutex);
2983 if (con->state == CON_STATE_CLOSED) {
2984 dout("con_send %p closed, dropping %p\n", con, msg);
2986 mutex_unlock(&con->mutex);
2990 BUG_ON(msg->con != NULL);
2991 msg->con = con->ops->get(con);
2992 BUG_ON(msg->con == NULL);
2994 BUG_ON(!list_empty(&msg->list_head));
2995 list_add_tail(&msg->list_head, &con->out_queue);
2996 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2997 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2998 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2999 le32_to_cpu(msg->hdr.front_len),
3000 le32_to_cpu(msg->hdr.middle_len),
3001 le32_to_cpu(msg->hdr.data_len));
3004 mutex_unlock(&con->mutex);
3006 /* if there wasn't anything waiting to send before, queue
3008 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3011 EXPORT_SYMBOL(ceph_con_send);
3014 * Revoke a message that was previously queued for send
3016 void ceph_msg_revoke(struct ceph_msg *msg)
3018 struct ceph_connection *con = msg->con;
3021 return; /* Message not in our possession */
3023 mutex_lock(&con->mutex);
3024 if (!list_empty(&msg->list_head)) {
3025 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3026 list_del_init(&msg->list_head);
3027 BUG_ON(msg->con == NULL);
3028 msg->con->ops->put(msg->con);
3034 if (con->out_msg == msg) {
3035 dout("%s %p msg %p - was sending\n", __func__, con, msg);
3036 con->out_msg = NULL;
3037 if (con->out_kvec_is_msg) {
3038 con->out_skip = con->out_kvec_bytes;
3039 con->out_kvec_is_msg = false;
3045 mutex_unlock(&con->mutex);
3049 * Revoke a message that we may be reading data into
3051 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3053 struct ceph_connection *con;
3055 BUG_ON(msg == NULL);
3057 dout("%s msg %p null con\n", __func__, msg);
3059 return; /* Message not in our possession */
3063 mutex_lock(&con->mutex);
3064 if (con->in_msg == msg) {
3065 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3066 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3067 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3069 /* skip rest of message */
3070 dout("%s %p msg %p revoked\n", __func__, con, msg);
3071 con->in_base_pos = con->in_base_pos -
3072 sizeof(struct ceph_msg_header) -
3076 sizeof(struct ceph_msg_footer);
3077 ceph_msg_put(con->in_msg);
3079 con->in_tag = CEPH_MSGR_TAG_READY;
3082 dout("%s %p in_msg %p msg %p no-op\n",
3083 __func__, con, con->in_msg, msg);
3085 mutex_unlock(&con->mutex);
3089 * Queue a keepalive byte to ensure the tcp connection is alive.
3091 void ceph_con_keepalive(struct ceph_connection *con)
3093 dout("con_keepalive %p\n", con);
3094 mutex_lock(&con->mutex);
3096 mutex_unlock(&con->mutex);
3097 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3098 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3101 EXPORT_SYMBOL(ceph_con_keepalive);
3103 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3105 struct ceph_msg_data *data;
3107 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3110 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3113 INIT_LIST_HEAD(&data->links);
3118 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3123 WARN_ON(!list_empty(&data->links));
3124 if (data->type == CEPH_MSG_DATA_PAGELIST)
3125 ceph_pagelist_release(data->pagelist);
3126 kmem_cache_free(ceph_msg_data_cache, data);
3129 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3130 size_t length, size_t alignment)
3132 struct ceph_msg_data *data;
3137 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3139 data->pages = pages;
3140 data->length = length;
3141 data->alignment = alignment & ~PAGE_MASK;
3143 list_add_tail(&data->links, &msg->data);
3144 msg->data_length += length;
3146 EXPORT_SYMBOL(ceph_msg_data_add_pages);
3148 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3149 struct ceph_pagelist *pagelist)
3151 struct ceph_msg_data *data;
3154 BUG_ON(!pagelist->length);
3156 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3158 data->pagelist = pagelist;
3160 list_add_tail(&data->links, &msg->data);
3161 msg->data_length += pagelist->length;
3163 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3166 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
3169 struct ceph_msg_data *data;
3173 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3176 data->bio_length = length;
3178 list_add_tail(&data->links, &msg->data);
3179 msg->data_length += length;
3181 EXPORT_SYMBOL(ceph_msg_data_add_bio);
3182 #endif /* CONFIG_BLOCK */
3185 * construct a new message with given type, size
3186 * the new msg has a ref count of 1.
3188 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3193 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3197 m->hdr.type = cpu_to_le16(type);
3198 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3199 m->hdr.front_len = cpu_to_le32(front_len);
3201 INIT_LIST_HEAD(&m->list_head);
3202 kref_init(&m->kref);
3203 INIT_LIST_HEAD(&m->data);
3207 m->front.iov_base = ceph_kvmalloc(front_len, flags);
3208 if (m->front.iov_base == NULL) {
3209 dout("ceph_msg_new can't allocate %d bytes\n",
3214 m->front.iov_base = NULL;
3216 m->front_alloc_len = m->front.iov_len = front_len;
3218 dout("ceph_msg_new %p front %d\n", m, front_len);
3225 pr_err("msg_new can't create type %d front %d\n", type,
3229 dout("msg_new can't create type %d front %d\n", type,
3234 EXPORT_SYMBOL(ceph_msg_new);
3237 * Allocate "middle" portion of a message, if it is needed and wasn't
3238 * allocated by alloc_msg. This allows us to read a small fixed-size
3239 * per-type header in the front and then gracefully fail (i.e.,
3240 * propagate the error to the caller based on info in the front) when
3241 * the middle is too large.
3243 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3245 int type = le16_to_cpu(msg->hdr.type);
3246 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3248 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3249 ceph_msg_type_name(type), middle_len);
3250 BUG_ON(!middle_len);
3251 BUG_ON(msg->middle);
3253 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3260 * Allocate a message for receiving an incoming message on a
3261 * connection, and save the result in con->in_msg. Uses the
3262 * connection's private alloc_msg op if available.
3264 * Returns 0 on success, or a negative error code.
3266 * On success, if we set *skip = 1:
3267 * - the next message should be skipped and ignored.
3268 * - con->in_msg == NULL
3269 * or if we set *skip = 0:
3270 * - con->in_msg is non-null.
3271 * On error (ENOMEM, EAGAIN, ...),
3272 * - con->in_msg == NULL
3274 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3276 struct ceph_msg_header *hdr = &con->in_hdr;
3277 int middle_len = le32_to_cpu(hdr->middle_len);
3278 struct ceph_msg *msg;
3281 BUG_ON(con->in_msg != NULL);
3282 BUG_ON(!con->ops->alloc_msg);
3284 mutex_unlock(&con->mutex);
3285 msg = con->ops->alloc_msg(con, hdr, skip);
3286 mutex_lock(&con->mutex);
3287 if (con->state != CON_STATE_OPEN) {
3295 con->in_msg->con = con->ops->get(con);
3296 BUG_ON(con->in_msg->con == NULL);
3299 * Null message pointer means either we should skip
3300 * this message or we couldn't allocate memory. The
3301 * former is not an error.
3305 con->error_msg = "error allocating memory for incoming message";
3309 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3311 if (middle_len && !con->in_msg->middle) {
3312 ret = ceph_alloc_middle(con, con->in_msg);
3314 ceph_msg_put(con->in_msg);
3324 * Free a generically kmalloc'd message.
3326 static void ceph_msg_free(struct ceph_msg *m)
3328 dout("%s %p\n", __func__, m);
3329 kvfree(m->front.iov_base);
3330 kmem_cache_free(ceph_msg_cache, m);
3333 static void ceph_msg_release(struct kref *kref)
3335 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3337 struct list_head *links;
3338 struct list_head *next;
3340 dout("%s %p\n", __func__, m);
3341 WARN_ON(!list_empty(&m->list_head));
3343 /* drop middle, data, if any */
3345 ceph_buffer_put(m->middle);
3349 list_splice_init(&m->data, &data);
3350 list_for_each_safe(links, next, &data) {
3351 struct ceph_msg_data *data;
3353 data = list_entry(links, struct ceph_msg_data, links);
3354 list_del_init(links);
3355 ceph_msg_data_destroy(data);
3360 ceph_msgpool_put(m->pool, m);
3365 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3367 dout("%s %p (was %d)\n", __func__, msg,
3368 atomic_read(&msg->kref.refcount));
3369 kref_get(&msg->kref);
3372 EXPORT_SYMBOL(ceph_msg_get);
3374 void ceph_msg_put(struct ceph_msg *msg)
3376 dout("%s %p (was %d)\n", __func__, msg,
3377 atomic_read(&msg->kref.refcount));
3378 kref_put(&msg->kref, ceph_msg_release);
3380 EXPORT_SYMBOL(ceph_msg_put);
3382 void ceph_msg_dump(struct ceph_msg *msg)
3384 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3385 msg->front_alloc_len, msg->data_length);
3386 print_hex_dump(KERN_DEBUG, "header: ",
3387 DUMP_PREFIX_OFFSET, 16, 1,
3388 &msg->hdr, sizeof(msg->hdr), true);
3389 print_hex_dump(KERN_DEBUG, " front: ",
3390 DUMP_PREFIX_OFFSET, 16, 1,
3391 msg->front.iov_base, msg->front.iov_len, true);
3393 print_hex_dump(KERN_DEBUG, "middle: ",
3394 DUMP_PREFIX_OFFSET, 16, 1,
3395 msg->middle->vec.iov_base,
3396 msg->middle->vec.iov_len, true);
3397 print_hex_dump(KERN_DEBUG, "footer: ",
3398 DUMP_PREFIX_OFFSET, 16, 1,
3399 &msg->footer, sizeof(msg->footer), true);
3401 EXPORT_SYMBOL(ceph_msg_dump);