1 #include <linux/ceph/ceph_debug.h>
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/nsproxy.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/socket.h>
13 #include <linux/string.h>
15 #include <linux/bio.h>
16 #endif /* CONFIG_BLOCK */
17 #include <linux/dns_resolver.h>
20 #include <linux/ceph/ceph_features.h>
21 #include <linux/ceph/libceph.h>
22 #include <linux/ceph/messenger.h>
23 #include <linux/ceph/decode.h>
24 #include <linux/ceph/pagelist.h>
25 #include <linux/export.h>
27 #define list_entry_next(pos, member) \
28 list_entry(pos->member.next, typeof(*pos), member)
31 * Ceph uses the messenger to exchange ceph_msg messages with other
32 * hosts in the system. The messenger provides ordered and reliable
33 * delivery. We tolerate TCP disconnects by reconnecting (with
34 * exponential backoff) in the case of a fault (disconnection, bad
35 * crc, protocol error). Acks allow sent messages to be discarded by
40 * We track the state of the socket on a given connection using
41 * values defined below. The transition to a new socket state is
42 * handled by a function which verifies we aren't coming from an
46 * | NEW* | transient initial state
48 * | con_sock_state_init()
51 * | CLOSED | initialized, but no socket (and no
52 * ---------- TCP connection)
54 * | \ con_sock_state_connecting()
55 * | ----------------------
57 * + con_sock_state_closed() \
58 * |+--------------------------- \
61 * | | CLOSING | socket event; \ \
62 * | ----------- await close \ \
65 * | + con_sock_state_closing() \ |
67 * | / --------------- | |
70 * | / -----------------| CONNECTING | socket created, TCP
71 * | | / -------------- connect initiated
72 * | | | con_sock_state_connected()
75 * | CONNECTED | TCP connection established
78 * State values for ceph_connection->sock_state; NEW is assumed to be 0.
81 #define CON_SOCK_STATE_NEW 0 /* -> CLOSED */
82 #define CON_SOCK_STATE_CLOSED 1 /* -> CONNECTING */
83 #define CON_SOCK_STATE_CONNECTING 2 /* -> CONNECTED or -> CLOSING */
84 #define CON_SOCK_STATE_CONNECTED 3 /* -> CLOSING or -> CLOSED */
85 #define CON_SOCK_STATE_CLOSING 4 /* -> CLOSED */
90 #define CON_STATE_CLOSED 1 /* -> PREOPEN */
91 #define CON_STATE_PREOPEN 2 /* -> CONNECTING, CLOSED */
92 #define CON_STATE_CONNECTING 3 /* -> NEGOTIATING, CLOSED */
93 #define CON_STATE_NEGOTIATING 4 /* -> OPEN, CLOSED */
94 #define CON_STATE_OPEN 5 /* -> STANDBY, CLOSED */
95 #define CON_STATE_STANDBY 6 /* -> PREOPEN, CLOSED */
98 * ceph_connection flag bits
100 #define CON_FLAG_LOSSYTX 0 /* we can close channel or drop
101 * messages on errors */
102 #define CON_FLAG_KEEPALIVE_PENDING 1 /* we need to send a keepalive */
103 #define CON_FLAG_WRITE_PENDING 2 /* we have data ready to send */
104 #define CON_FLAG_SOCK_CLOSED 3 /* socket state changed to closed */
105 #define CON_FLAG_BACKOFF 4 /* need to retry queuing delayed work */
107 static bool con_flag_valid(unsigned long con_flag)
110 case CON_FLAG_LOSSYTX:
111 case CON_FLAG_KEEPALIVE_PENDING:
112 case CON_FLAG_WRITE_PENDING:
113 case CON_FLAG_SOCK_CLOSED:
114 case CON_FLAG_BACKOFF:
121 static void con_flag_clear(struct ceph_connection *con, unsigned long con_flag)
123 BUG_ON(!con_flag_valid(con_flag));
125 clear_bit(con_flag, &con->flags);
128 static void con_flag_set(struct ceph_connection *con, unsigned long con_flag)
130 BUG_ON(!con_flag_valid(con_flag));
132 set_bit(con_flag, &con->flags);
135 static bool con_flag_test(struct ceph_connection *con, unsigned long con_flag)
137 BUG_ON(!con_flag_valid(con_flag));
139 return test_bit(con_flag, &con->flags);
142 static bool con_flag_test_and_clear(struct ceph_connection *con,
143 unsigned long con_flag)
145 BUG_ON(!con_flag_valid(con_flag));
147 return test_and_clear_bit(con_flag, &con->flags);
150 static bool con_flag_test_and_set(struct ceph_connection *con,
151 unsigned long con_flag)
153 BUG_ON(!con_flag_valid(con_flag));
155 return test_and_set_bit(con_flag, &con->flags);
158 /* Slab caches for frequently-allocated structures */
160 static struct kmem_cache *ceph_msg_cache;
161 static struct kmem_cache *ceph_msg_data_cache;
163 /* static tag bytes (protocol control messages) */
164 static char tag_msg = CEPH_MSGR_TAG_MSG;
165 static char tag_ack = CEPH_MSGR_TAG_ACK;
166 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
167 static char tag_keepalive2 = CEPH_MSGR_TAG_KEEPALIVE2;
169 #ifdef CONFIG_LOCKDEP
170 static struct lock_class_key socket_class;
174 * When skipping (ignoring) a block of input we read it into a "skip
175 * buffer," which is this many bytes in size.
177 #define SKIP_BUF_SIZE 1024
179 static void queue_con(struct ceph_connection *con);
180 static void cancel_con(struct ceph_connection *con);
181 static void ceph_con_workfn(struct work_struct *);
182 static void con_fault(struct ceph_connection *con);
185 * Nicely render a sockaddr as a string. An array of formatted
186 * strings is used, to approximate reentrancy.
188 #define ADDR_STR_COUNT_LOG 5 /* log2(# address strings in array) */
189 #define ADDR_STR_COUNT (1 << ADDR_STR_COUNT_LOG)
190 #define ADDR_STR_COUNT_MASK (ADDR_STR_COUNT - 1)
191 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
193 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
194 static atomic_t addr_str_seq = ATOMIC_INIT(0);
196 static struct page *zero_page; /* used in certain error cases */
198 const char *ceph_pr_addr(const struct sockaddr_storage *ss)
202 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
203 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
205 i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
208 switch (ss->ss_family) {
210 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%hu", &in4->sin_addr,
211 ntohs(in4->sin_port));
215 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%hu", &in6->sin6_addr,
216 ntohs(in6->sin6_port));
220 snprintf(s, MAX_ADDR_STR_LEN, "(unknown sockaddr family %hu)",
226 EXPORT_SYMBOL(ceph_pr_addr);
228 static void encode_my_addr(struct ceph_messenger *msgr)
230 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
231 ceph_encode_addr(&msgr->my_enc_addr);
235 * work queue for all reading and writing to/from the socket.
237 static struct workqueue_struct *ceph_msgr_wq;
239 static int ceph_msgr_slab_init(void)
241 BUG_ON(ceph_msg_cache);
242 ceph_msg_cache = kmem_cache_create("ceph_msg",
243 sizeof (struct ceph_msg),
244 __alignof__(struct ceph_msg), 0, NULL);
249 BUG_ON(ceph_msg_data_cache);
250 ceph_msg_data_cache = kmem_cache_create("ceph_msg_data",
251 sizeof (struct ceph_msg_data),
252 __alignof__(struct ceph_msg_data),
254 if (ceph_msg_data_cache)
257 kmem_cache_destroy(ceph_msg_cache);
258 ceph_msg_cache = NULL;
263 static void ceph_msgr_slab_exit(void)
265 BUG_ON(!ceph_msg_data_cache);
266 kmem_cache_destroy(ceph_msg_data_cache);
267 ceph_msg_data_cache = NULL;
269 BUG_ON(!ceph_msg_cache);
270 kmem_cache_destroy(ceph_msg_cache);
271 ceph_msg_cache = NULL;
274 static void _ceph_msgr_exit(void)
277 destroy_workqueue(ceph_msgr_wq);
281 BUG_ON(zero_page == NULL);
282 page_cache_release(zero_page);
285 ceph_msgr_slab_exit();
288 int ceph_msgr_init(void)
290 if (ceph_msgr_slab_init())
293 BUG_ON(zero_page != NULL);
294 zero_page = ZERO_PAGE(0);
295 page_cache_get(zero_page);
298 * The number of active work items is limited by the number of
299 * connections, so leave @max_active at default.
301 ceph_msgr_wq = alloc_workqueue("ceph-msgr", WQ_MEM_RECLAIM, 0);
305 pr_err("msgr_init failed to create workqueue\n");
310 EXPORT_SYMBOL(ceph_msgr_init);
312 void ceph_msgr_exit(void)
314 BUG_ON(ceph_msgr_wq == NULL);
318 EXPORT_SYMBOL(ceph_msgr_exit);
320 void ceph_msgr_flush(void)
322 flush_workqueue(ceph_msgr_wq);
324 EXPORT_SYMBOL(ceph_msgr_flush);
326 /* Connection socket state transition functions */
328 static void con_sock_state_init(struct ceph_connection *con)
332 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
333 if (WARN_ON(old_state != CON_SOCK_STATE_NEW))
334 printk("%s: unexpected old state %d\n", __func__, old_state);
335 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
336 CON_SOCK_STATE_CLOSED);
339 static void con_sock_state_connecting(struct ceph_connection *con)
343 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTING);
344 if (WARN_ON(old_state != CON_SOCK_STATE_CLOSED))
345 printk("%s: unexpected old state %d\n", __func__, old_state);
346 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
347 CON_SOCK_STATE_CONNECTING);
350 static void con_sock_state_connected(struct ceph_connection *con)
354 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CONNECTED);
355 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING))
356 printk("%s: unexpected old state %d\n", __func__, old_state);
357 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
358 CON_SOCK_STATE_CONNECTED);
361 static void con_sock_state_closing(struct ceph_connection *con)
365 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSING);
366 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTING &&
367 old_state != CON_SOCK_STATE_CONNECTED &&
368 old_state != CON_SOCK_STATE_CLOSING))
369 printk("%s: unexpected old state %d\n", __func__, old_state);
370 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
371 CON_SOCK_STATE_CLOSING);
374 static void con_sock_state_closed(struct ceph_connection *con)
378 old_state = atomic_xchg(&con->sock_state, CON_SOCK_STATE_CLOSED);
379 if (WARN_ON(old_state != CON_SOCK_STATE_CONNECTED &&
380 old_state != CON_SOCK_STATE_CLOSING &&
381 old_state != CON_SOCK_STATE_CONNECTING &&
382 old_state != CON_SOCK_STATE_CLOSED))
383 printk("%s: unexpected old state %d\n", __func__, old_state);
384 dout("%s con %p sock %d -> %d\n", __func__, con, old_state,
385 CON_SOCK_STATE_CLOSED);
389 * socket callback functions
392 /* data available on socket, or listen socket received a connect */
393 static void ceph_sock_data_ready(struct sock *sk)
395 struct ceph_connection *con = sk->sk_user_data;
396 if (atomic_read(&con->msgr->stopping)) {
400 if (sk->sk_state != TCP_CLOSE_WAIT) {
401 dout("%s on %p state = %lu, queueing work\n", __func__,
407 /* socket has buffer space for writing */
408 static void ceph_sock_write_space(struct sock *sk)
410 struct ceph_connection *con = sk->sk_user_data;
412 /* only queue to workqueue if there is data we want to write,
413 * and there is sufficient space in the socket buffer to accept
414 * more data. clear SOCK_NOSPACE so that ceph_sock_write_space()
415 * doesn't get called again until try_write() fills the socket
416 * buffer. See net/ipv4/tcp_input.c:tcp_check_space()
417 * and net/core/stream.c:sk_stream_write_space().
419 if (con_flag_test(con, CON_FLAG_WRITE_PENDING)) {
420 if (sk_stream_is_writeable(sk)) {
421 dout("%s %p queueing write work\n", __func__, con);
422 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
426 dout("%s %p nothing to write\n", __func__, con);
430 /* socket's state has changed */
431 static void ceph_sock_state_change(struct sock *sk)
433 struct ceph_connection *con = sk->sk_user_data;
435 dout("%s %p state = %lu sk_state = %u\n", __func__,
436 con, con->state, sk->sk_state);
438 switch (sk->sk_state) {
440 dout("%s TCP_CLOSE\n", __func__);
442 dout("%s TCP_CLOSE_WAIT\n", __func__);
443 con_sock_state_closing(con);
444 con_flag_set(con, CON_FLAG_SOCK_CLOSED);
447 case TCP_ESTABLISHED:
448 dout("%s TCP_ESTABLISHED\n", __func__);
449 con_sock_state_connected(con);
452 default: /* Everything else is uninteresting */
458 * set up socket callbacks
460 static void set_sock_callbacks(struct socket *sock,
461 struct ceph_connection *con)
463 struct sock *sk = sock->sk;
464 sk->sk_user_data = con;
465 sk->sk_data_ready = ceph_sock_data_ready;
466 sk->sk_write_space = ceph_sock_write_space;
467 sk->sk_state_change = ceph_sock_state_change;
476 * initiate connection to a remote socket.
478 static int ceph_tcp_connect(struct ceph_connection *con)
480 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
482 unsigned int noio_flag;
487 /* sock_create_kern() allocates with GFP_KERNEL */
488 noio_flag = memalloc_noio_save();
489 ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
490 SOCK_STREAM, IPPROTO_TCP, &sock);
491 memalloc_noio_restore(noio_flag);
494 sock->sk->sk_allocation = GFP_NOFS;
496 #ifdef CONFIG_LOCKDEP
497 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
500 set_sock_callbacks(sock, con);
502 dout("connect %s\n", ceph_pr_addr(&con->peer_addr.in_addr));
504 con_sock_state_connecting(con);
505 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
507 if (ret == -EINPROGRESS) {
508 dout("connect %s EINPROGRESS sk_state = %u\n",
509 ceph_pr_addr(&con->peer_addr.in_addr),
511 } else if (ret < 0) {
512 pr_err("connect %s error %d\n",
513 ceph_pr_addr(&con->peer_addr.in_addr), ret);
518 if (ceph_test_opt(from_msgr(con->msgr), TCP_NODELAY)) {
521 ret = kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
522 (char *)&optval, sizeof(optval));
524 pr_err("kernel_setsockopt(TCP_NODELAY) failed: %d",
532 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
534 struct kvec iov = {buf, len};
535 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
538 r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
544 static int ceph_tcp_recvpage(struct socket *sock, struct page *page,
545 int page_offset, size_t length)
550 BUG_ON(page_offset + length > PAGE_SIZE);
554 ret = ceph_tcp_recvmsg(sock, kaddr + page_offset, length);
561 * write something. @more is true if caller will be sending more data
564 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
565 size_t kvlen, size_t len, int more)
567 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
571 msg.msg_flags |= MSG_MORE;
573 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
575 r = kernel_sendmsg(sock, &msg, iov, kvlen, len);
581 static int __ceph_tcp_sendpage(struct socket *sock, struct page *page,
582 int offset, size_t size, bool more)
584 int flags = MSG_DONTWAIT | MSG_NOSIGNAL | (more ? MSG_MORE : MSG_EOR);
587 ret = kernel_sendpage(sock, page, offset, size, flags);
594 static int ceph_tcp_sendpage(struct socket *sock, struct page *page,
595 int offset, size_t size, bool more)
600 /* sendpage cannot properly handle pages with page_count == 0,
601 * we need to fallback to sendmsg if that's the case */
602 if (page_count(page) >= 1)
603 return __ceph_tcp_sendpage(sock, page, offset, size, more);
605 iov.iov_base = kmap(page) + offset;
607 ret = ceph_tcp_sendmsg(sock, &iov, 1, size, more);
614 * Shutdown/close the socket for the given connection.
616 static int con_close_socket(struct ceph_connection *con)
620 dout("con_close_socket on %p sock %p\n", con, con->sock);
622 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
623 sock_release(con->sock);
628 * Forcibly clear the SOCK_CLOSED flag. It gets set
629 * independent of the connection mutex, and we could have
630 * received a socket close event before we had the chance to
631 * shut the socket down.
633 con_flag_clear(con, CON_FLAG_SOCK_CLOSED);
635 con_sock_state_closed(con);
640 * Reset a connection. Discard all incoming and outgoing messages
641 * and clear *_seq state.
643 static void ceph_msg_remove(struct ceph_msg *msg)
645 list_del_init(&msg->list_head);
649 static void ceph_msg_remove_list(struct list_head *head)
651 while (!list_empty(head)) {
652 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
654 ceph_msg_remove(msg);
658 static void reset_connection(struct ceph_connection *con)
660 /* reset connection, out_queue, msg_ and connect_seq */
661 /* discard existing out_queue and msg_seq */
662 dout("reset_connection %p\n", con);
663 ceph_msg_remove_list(&con->out_queue);
664 ceph_msg_remove_list(&con->out_sent);
667 BUG_ON(con->in_msg->con != con);
668 ceph_msg_put(con->in_msg);
672 con->connect_seq = 0;
675 BUG_ON(con->out_msg->con != con);
676 ceph_msg_put(con->out_msg);
680 con->in_seq_acked = 0;
686 * mark a peer down. drop any open connections.
688 void ceph_con_close(struct ceph_connection *con)
690 mutex_lock(&con->mutex);
691 dout("con_close %p peer %s\n", con,
692 ceph_pr_addr(&con->peer_addr.in_addr));
693 con->state = CON_STATE_CLOSED;
695 con_flag_clear(con, CON_FLAG_LOSSYTX); /* so we retry next connect */
696 con_flag_clear(con, CON_FLAG_KEEPALIVE_PENDING);
697 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
698 con_flag_clear(con, CON_FLAG_BACKOFF);
700 reset_connection(con);
701 con->peer_global_seq = 0;
703 con_close_socket(con);
704 mutex_unlock(&con->mutex);
706 EXPORT_SYMBOL(ceph_con_close);
709 * Reopen a closed connection, with a new peer address.
711 void ceph_con_open(struct ceph_connection *con,
712 __u8 entity_type, __u64 entity_num,
713 struct ceph_entity_addr *addr)
715 mutex_lock(&con->mutex);
716 dout("con_open %p %s\n", con, ceph_pr_addr(&addr->in_addr));
718 WARN_ON(con->state != CON_STATE_CLOSED);
719 con->state = CON_STATE_PREOPEN;
721 con->peer_name.type = (__u8) entity_type;
722 con->peer_name.num = cpu_to_le64(entity_num);
724 memcpy(&con->peer_addr, addr, sizeof(*addr));
725 con->delay = 0; /* reset backoff memory */
726 mutex_unlock(&con->mutex);
729 EXPORT_SYMBOL(ceph_con_open);
732 * return true if this connection ever successfully opened
734 bool ceph_con_opened(struct ceph_connection *con)
736 return con->connect_seq > 0;
740 * initialize a new connection.
742 void ceph_con_init(struct ceph_connection *con, void *private,
743 const struct ceph_connection_operations *ops,
744 struct ceph_messenger *msgr)
746 dout("con_init %p\n", con);
747 memset(con, 0, sizeof(*con));
748 con->private = private;
752 con_sock_state_init(con);
754 mutex_init(&con->mutex);
755 INIT_LIST_HEAD(&con->out_queue);
756 INIT_LIST_HEAD(&con->out_sent);
757 INIT_DELAYED_WORK(&con->work, ceph_con_workfn);
759 con->state = CON_STATE_CLOSED;
761 EXPORT_SYMBOL(ceph_con_init);
765 * We maintain a global counter to order connection attempts. Get
766 * a unique seq greater than @gt.
768 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
772 spin_lock(&msgr->global_seq_lock);
773 if (msgr->global_seq < gt)
774 msgr->global_seq = gt;
775 ret = ++msgr->global_seq;
776 spin_unlock(&msgr->global_seq_lock);
780 static void con_out_kvec_reset(struct ceph_connection *con)
782 BUG_ON(con->out_skip);
784 con->out_kvec_left = 0;
785 con->out_kvec_bytes = 0;
786 con->out_kvec_cur = &con->out_kvec[0];
789 static void con_out_kvec_add(struct ceph_connection *con,
790 size_t size, void *data)
792 int index = con->out_kvec_left;
794 BUG_ON(con->out_skip);
795 BUG_ON(index >= ARRAY_SIZE(con->out_kvec));
797 con->out_kvec[index].iov_len = size;
798 con->out_kvec[index].iov_base = data;
799 con->out_kvec_left++;
800 con->out_kvec_bytes += size;
804 * Chop off a kvec from the end. Return residual number of bytes for
805 * that kvec, i.e. how many bytes would have been written if the kvec
808 static int con_out_kvec_skip(struct ceph_connection *con)
810 int off = con->out_kvec_cur - con->out_kvec;
813 if (con->out_kvec_bytes > 0) {
814 skip = con->out_kvec[off + con->out_kvec_left - 1].iov_len;
815 BUG_ON(con->out_kvec_bytes < skip);
816 BUG_ON(!con->out_kvec_left);
817 con->out_kvec_bytes -= skip;
818 con->out_kvec_left--;
827 * For a bio data item, a piece is whatever remains of the next
828 * entry in the current bio iovec, or the first entry in the next
831 static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
834 struct ceph_msg_data *data = cursor->data;
837 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
842 cursor->resid = min(length, data->bio_length);
844 cursor->bvec_iter = bio->bi_iter;
846 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
849 static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
853 struct ceph_msg_data *data = cursor->data;
855 struct bio_vec bio_vec;
857 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
862 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
864 *page_offset = (size_t) bio_vec.bv_offset;
865 BUG_ON(*page_offset >= PAGE_SIZE);
866 if (cursor->last_piece) /* pagelist offset is always 0 */
867 *length = cursor->resid;
869 *length = (size_t) bio_vec.bv_len;
870 BUG_ON(*length > cursor->resid);
871 BUG_ON(*page_offset + *length > PAGE_SIZE);
873 return bio_vec.bv_page;
876 static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
880 struct bio_vec bio_vec;
882 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
887 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
889 /* Advance the cursor offset */
891 BUG_ON(cursor->resid < bytes);
892 cursor->resid -= bytes;
894 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
896 if (bytes < bio_vec.bv_len)
897 return false; /* more bytes to process in this segment */
899 /* Move on to the next segment, and possibly the next bio */
901 if (!cursor->bvec_iter.bi_size) {
905 cursor->bvec_iter = bio->bi_iter;
907 memset(&cursor->bvec_iter, 0,
908 sizeof(cursor->bvec_iter));
911 if (!cursor->last_piece) {
912 BUG_ON(!cursor->resid);
914 /* A short read is OK, so use <= rather than == */
915 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
916 cursor->last_piece = true;
921 #endif /* CONFIG_BLOCK */
924 * For a page array, a piece comes from the first page in the array
925 * that has not already been fully consumed.
927 static void ceph_msg_data_pages_cursor_init(struct ceph_msg_data_cursor *cursor,
930 struct ceph_msg_data *data = cursor->data;
933 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
935 BUG_ON(!data->pages);
936 BUG_ON(!data->length);
938 cursor->resid = min(length, data->length);
939 page_count = calc_pages_for(data->alignment, (u64)data->length);
940 cursor->page_offset = data->alignment & ~PAGE_MASK;
941 cursor->page_index = 0;
942 BUG_ON(page_count > (int)USHRT_MAX);
943 cursor->page_count = (unsigned short)page_count;
944 BUG_ON(length > SIZE_MAX - cursor->page_offset);
945 cursor->last_piece = cursor->page_offset + cursor->resid <= PAGE_SIZE;
949 ceph_msg_data_pages_next(struct ceph_msg_data_cursor *cursor,
950 size_t *page_offset, size_t *length)
952 struct ceph_msg_data *data = cursor->data;
954 BUG_ON(data->type != CEPH_MSG_DATA_PAGES);
956 BUG_ON(cursor->page_index >= cursor->page_count);
957 BUG_ON(cursor->page_offset >= PAGE_SIZE);
959 *page_offset = cursor->page_offset;
960 if (cursor->last_piece)
961 *length = cursor->resid;
963 *length = PAGE_SIZE - *page_offset;
965 return data->pages[cursor->page_index];
968 static bool ceph_msg_data_pages_advance(struct ceph_msg_data_cursor *cursor,
971 BUG_ON(cursor->data->type != CEPH_MSG_DATA_PAGES);
973 BUG_ON(cursor->page_offset + bytes > PAGE_SIZE);
975 /* Advance the cursor page offset */
977 cursor->resid -= bytes;
978 cursor->page_offset = (cursor->page_offset + bytes) & ~PAGE_MASK;
979 if (!bytes || cursor->page_offset)
980 return false; /* more bytes to process in the current page */
983 return false; /* no more data */
985 /* Move on to the next page; offset is already at 0 */
987 BUG_ON(cursor->page_index >= cursor->page_count);
988 cursor->page_index++;
989 cursor->last_piece = cursor->resid <= PAGE_SIZE;
995 * For a pagelist, a piece is whatever remains to be consumed in the
996 * first page in the list, or the front of the next page.
999 ceph_msg_data_pagelist_cursor_init(struct ceph_msg_data_cursor *cursor,
1002 struct ceph_msg_data *data = cursor->data;
1003 struct ceph_pagelist *pagelist;
1006 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1008 pagelist = data->pagelist;
1012 return; /* pagelist can be assigned but empty */
1014 BUG_ON(list_empty(&pagelist->head));
1015 page = list_first_entry(&pagelist->head, struct page, lru);
1017 cursor->resid = min(length, pagelist->length);
1018 cursor->page = page;
1020 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1023 static struct page *
1024 ceph_msg_data_pagelist_next(struct ceph_msg_data_cursor *cursor,
1025 size_t *page_offset, size_t *length)
1027 struct ceph_msg_data *data = cursor->data;
1028 struct ceph_pagelist *pagelist;
1030 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1032 pagelist = data->pagelist;
1035 BUG_ON(!cursor->page);
1036 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1038 /* offset of first page in pagelist is always 0 */
1039 *page_offset = cursor->offset & ~PAGE_MASK;
1040 if (cursor->last_piece)
1041 *length = cursor->resid;
1043 *length = PAGE_SIZE - *page_offset;
1045 return cursor->page;
1048 static bool ceph_msg_data_pagelist_advance(struct ceph_msg_data_cursor *cursor,
1051 struct ceph_msg_data *data = cursor->data;
1052 struct ceph_pagelist *pagelist;
1054 BUG_ON(data->type != CEPH_MSG_DATA_PAGELIST);
1056 pagelist = data->pagelist;
1059 BUG_ON(cursor->offset + cursor->resid != pagelist->length);
1060 BUG_ON((cursor->offset & ~PAGE_MASK) + bytes > PAGE_SIZE);
1062 /* Advance the cursor offset */
1064 cursor->resid -= bytes;
1065 cursor->offset += bytes;
1066 /* offset of first page in pagelist is always 0 */
1067 if (!bytes || cursor->offset & ~PAGE_MASK)
1068 return false; /* more bytes to process in the current page */
1071 return false; /* no more data */
1073 /* Move on to the next page */
1075 BUG_ON(list_is_last(&cursor->page->lru, &pagelist->head));
1076 cursor->page = list_entry_next(cursor->page, lru);
1077 cursor->last_piece = cursor->resid <= PAGE_SIZE;
1083 * Message data is handled (sent or received) in pieces, where each
1084 * piece resides on a single page. The network layer might not
1085 * consume an entire piece at once. A data item's cursor keeps
1086 * track of which piece is next to process and how much remains to
1087 * be processed in that piece. It also tracks whether the current
1088 * piece is the last one in the data item.
1090 static void __ceph_msg_data_cursor_init(struct ceph_msg_data_cursor *cursor)
1092 size_t length = cursor->total_resid;
1094 switch (cursor->data->type) {
1095 case CEPH_MSG_DATA_PAGELIST:
1096 ceph_msg_data_pagelist_cursor_init(cursor, length);
1098 case CEPH_MSG_DATA_PAGES:
1099 ceph_msg_data_pages_cursor_init(cursor, length);
1102 case CEPH_MSG_DATA_BIO:
1103 ceph_msg_data_bio_cursor_init(cursor, length);
1105 #endif /* CONFIG_BLOCK */
1106 case CEPH_MSG_DATA_NONE:
1111 cursor->need_crc = true;
1114 static void ceph_msg_data_cursor_init(struct ceph_msg *msg, size_t length)
1116 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1117 struct ceph_msg_data *data;
1120 BUG_ON(length > msg->data_length);
1121 BUG_ON(list_empty(&msg->data));
1123 cursor->data_head = &msg->data;
1124 cursor->total_resid = length;
1125 data = list_first_entry(&msg->data, struct ceph_msg_data, links);
1126 cursor->data = data;
1128 __ceph_msg_data_cursor_init(cursor);
1132 * Return the page containing the next piece to process for a given
1133 * data item, and supply the page offset and length of that piece.
1134 * Indicate whether this is the last piece in this data item.
1136 static struct page *ceph_msg_data_next(struct ceph_msg_data_cursor *cursor,
1137 size_t *page_offset, size_t *length,
1142 switch (cursor->data->type) {
1143 case CEPH_MSG_DATA_PAGELIST:
1144 page = ceph_msg_data_pagelist_next(cursor, page_offset, length);
1146 case CEPH_MSG_DATA_PAGES:
1147 page = ceph_msg_data_pages_next(cursor, page_offset, length);
1150 case CEPH_MSG_DATA_BIO:
1151 page = ceph_msg_data_bio_next(cursor, page_offset, length);
1153 #endif /* CONFIG_BLOCK */
1154 case CEPH_MSG_DATA_NONE:
1160 BUG_ON(*page_offset + *length > PAGE_SIZE);
1163 *last_piece = cursor->last_piece;
1169 * Returns true if the result moves the cursor on to the next piece
1172 static bool ceph_msg_data_advance(struct ceph_msg_data_cursor *cursor,
1177 BUG_ON(bytes > cursor->resid);
1178 switch (cursor->data->type) {
1179 case CEPH_MSG_DATA_PAGELIST:
1180 new_piece = ceph_msg_data_pagelist_advance(cursor, bytes);
1182 case CEPH_MSG_DATA_PAGES:
1183 new_piece = ceph_msg_data_pages_advance(cursor, bytes);
1186 case CEPH_MSG_DATA_BIO:
1187 new_piece = ceph_msg_data_bio_advance(cursor, bytes);
1189 #endif /* CONFIG_BLOCK */
1190 case CEPH_MSG_DATA_NONE:
1195 cursor->total_resid -= bytes;
1197 if (!cursor->resid && cursor->total_resid) {
1198 WARN_ON(!cursor->last_piece);
1199 BUG_ON(list_is_last(&cursor->data->links, cursor->data_head));
1200 cursor->data = list_entry_next(cursor->data, links);
1201 __ceph_msg_data_cursor_init(cursor);
1204 cursor->need_crc = new_piece;
1209 static size_t sizeof_footer(struct ceph_connection *con)
1211 return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
1212 sizeof(struct ceph_msg_footer) :
1213 sizeof(struct ceph_msg_footer_old);
1216 static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
1221 /* Initialize data cursor */
1223 ceph_msg_data_cursor_init(msg, (size_t)data_len);
1227 * Prepare footer for currently outgoing message, and finish things
1228 * off. Assumes out_kvec* are already valid.. we just add on to the end.
1230 static void prepare_write_message_footer(struct ceph_connection *con)
1232 struct ceph_msg *m = con->out_msg;
1233 int v = con->out_kvec_left;
1235 m->footer.flags |= CEPH_MSG_FOOTER_COMPLETE;
1237 dout("prepare_write_message_footer %p\n", con);
1238 con->out_kvec[v].iov_base = &m->footer;
1239 if (con->peer_features & CEPH_FEATURE_MSG_AUTH) {
1240 if (con->ops->sign_message)
1241 con->ops->sign_message(m);
1244 con->out_kvec[v].iov_len = sizeof(m->footer);
1245 con->out_kvec_bytes += sizeof(m->footer);
1247 m->old_footer.flags = m->footer.flags;
1248 con->out_kvec[v].iov_len = sizeof(m->old_footer);
1249 con->out_kvec_bytes += sizeof(m->old_footer);
1251 con->out_kvec_left++;
1252 con->out_more = m->more_to_follow;
1253 con->out_msg_done = true;
1257 * Prepare headers for the next outgoing message.
1259 static void prepare_write_message(struct ceph_connection *con)
1264 con_out_kvec_reset(con);
1265 con->out_msg_done = false;
1267 /* Sneak an ack in there first? If we can get it into the same
1268 * TCP packet that's a good thing. */
1269 if (con->in_seq > con->in_seq_acked) {
1270 con->in_seq_acked = con->in_seq;
1271 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1272 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1273 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1274 &con->out_temp_ack);
1277 BUG_ON(list_empty(&con->out_queue));
1278 m = list_first_entry(&con->out_queue, struct ceph_msg, list_head);
1280 BUG_ON(m->con != con);
1282 /* put message on sent list */
1284 list_move_tail(&m->list_head, &con->out_sent);
1287 * only assign outgoing seq # if we haven't sent this message
1288 * yet. if it is requeued, resend with it's original seq.
1290 if (m->needs_out_seq) {
1291 m->hdr.seq = cpu_to_le64(++con->out_seq);
1292 m->needs_out_seq = false;
1294 WARN_ON(m->data_length != le32_to_cpu(m->hdr.data_len));
1296 dout("prepare_write_message %p seq %lld type %d len %d+%d+%zd\n",
1297 m, con->out_seq, le16_to_cpu(m->hdr.type),
1298 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
1300 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
1302 /* tag + hdr + front + middle */
1303 con_out_kvec_add(con, sizeof (tag_msg), &tag_msg);
1304 con_out_kvec_add(con, sizeof(con->out_hdr), &con->out_hdr);
1305 con_out_kvec_add(con, m->front.iov_len, m->front.iov_base);
1308 con_out_kvec_add(con, m->middle->vec.iov_len,
1309 m->middle->vec.iov_base);
1311 /* fill in hdr crc and finalize hdr */
1312 crc = crc32c(0, &m->hdr, offsetof(struct ceph_msg_header, crc));
1313 con->out_msg->hdr.crc = cpu_to_le32(crc);
1314 memcpy(&con->out_hdr, &con->out_msg->hdr, sizeof(con->out_hdr));
1316 /* fill in front and middle crc, footer */
1317 crc = crc32c(0, m->front.iov_base, m->front.iov_len);
1318 con->out_msg->footer.front_crc = cpu_to_le32(crc);
1320 crc = crc32c(0, m->middle->vec.iov_base,
1321 m->middle->vec.iov_len);
1322 con->out_msg->footer.middle_crc = cpu_to_le32(crc);
1324 con->out_msg->footer.middle_crc = 0;
1325 dout("%s front_crc %u middle_crc %u\n", __func__,
1326 le32_to_cpu(con->out_msg->footer.front_crc),
1327 le32_to_cpu(con->out_msg->footer.middle_crc));
1328 con->out_msg->footer.flags = 0;
1330 /* is there a data payload? */
1331 con->out_msg->footer.data_crc = 0;
1332 if (m->data_length) {
1333 prepare_message_data(con->out_msg, m->data_length);
1334 con->out_more = 1; /* data + footer will follow */
1336 /* no, queue up footer too and be done */
1337 prepare_write_message_footer(con);
1340 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1346 static void prepare_write_ack(struct ceph_connection *con)
1348 dout("prepare_write_ack %p %llu -> %llu\n", con,
1349 con->in_seq_acked, con->in_seq);
1350 con->in_seq_acked = con->in_seq;
1352 con_out_kvec_reset(con);
1354 con_out_kvec_add(con, sizeof (tag_ack), &tag_ack);
1356 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1357 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1358 &con->out_temp_ack);
1360 con->out_more = 1; /* more will follow.. eventually.. */
1361 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1365 * Prepare to share the seq during handshake
1367 static void prepare_write_seq(struct ceph_connection *con)
1369 dout("prepare_write_seq %p %llu -> %llu\n", con,
1370 con->in_seq_acked, con->in_seq);
1371 con->in_seq_acked = con->in_seq;
1373 con_out_kvec_reset(con);
1375 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
1376 con_out_kvec_add(con, sizeof (con->out_temp_ack),
1377 &con->out_temp_ack);
1379 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1383 * Prepare to write keepalive byte.
1385 static void prepare_write_keepalive(struct ceph_connection *con)
1387 dout("prepare_write_keepalive %p\n", con);
1388 con_out_kvec_reset(con);
1389 if (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2) {
1390 struct timespec now = CURRENT_TIME;
1392 con_out_kvec_add(con, sizeof(tag_keepalive2), &tag_keepalive2);
1393 ceph_encode_timespec(&con->out_temp_keepalive2, &now);
1394 con_out_kvec_add(con, sizeof(con->out_temp_keepalive2),
1395 &con->out_temp_keepalive2);
1397 con_out_kvec_add(con, sizeof(tag_keepalive), &tag_keepalive);
1399 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1403 * Connection negotiation.
1406 static struct ceph_auth_handshake *get_connect_authorizer(struct ceph_connection *con,
1409 struct ceph_auth_handshake *auth;
1411 if (!con->ops->get_authorizer) {
1412 con->out_connect.authorizer_protocol = CEPH_AUTH_UNKNOWN;
1413 con->out_connect.authorizer_len = 0;
1417 /* Can't hold the mutex while getting authorizer */
1418 mutex_unlock(&con->mutex);
1419 auth = con->ops->get_authorizer(con, auth_proto, con->auth_retry);
1420 mutex_lock(&con->mutex);
1424 if (con->state != CON_STATE_NEGOTIATING)
1425 return ERR_PTR(-EAGAIN);
1427 con->auth_reply_buf = auth->authorizer_reply_buf;
1428 con->auth_reply_buf_len = auth->authorizer_reply_buf_len;
1433 * We connected to a peer and are saying hello.
1435 static void prepare_write_banner(struct ceph_connection *con)
1437 con_out_kvec_add(con, strlen(CEPH_BANNER), CEPH_BANNER);
1438 con_out_kvec_add(con, sizeof (con->msgr->my_enc_addr),
1439 &con->msgr->my_enc_addr);
1442 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1445 static int prepare_write_connect(struct ceph_connection *con)
1447 unsigned int global_seq = get_global_seq(con->msgr, 0);
1450 struct ceph_auth_handshake *auth;
1452 switch (con->peer_name.type) {
1453 case CEPH_ENTITY_TYPE_MON:
1454 proto = CEPH_MONC_PROTOCOL;
1456 case CEPH_ENTITY_TYPE_OSD:
1457 proto = CEPH_OSDC_PROTOCOL;
1459 case CEPH_ENTITY_TYPE_MDS:
1460 proto = CEPH_MDSC_PROTOCOL;
1466 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
1467 con->connect_seq, global_seq, proto);
1469 con->out_connect.features =
1470 cpu_to_le64(from_msgr(con->msgr)->supported_features);
1471 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
1472 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
1473 con->out_connect.global_seq = cpu_to_le32(global_seq);
1474 con->out_connect.protocol_version = cpu_to_le32(proto);
1475 con->out_connect.flags = 0;
1477 auth_proto = CEPH_AUTH_UNKNOWN;
1478 auth = get_connect_authorizer(con, &auth_proto);
1480 return PTR_ERR(auth);
1482 con->out_connect.authorizer_protocol = cpu_to_le32(auth_proto);
1483 con->out_connect.authorizer_len = auth ?
1484 cpu_to_le32(auth->authorizer_buf_len) : 0;
1486 con_out_kvec_add(con, sizeof (con->out_connect),
1488 if (auth && auth->authorizer_buf_len)
1489 con_out_kvec_add(con, auth->authorizer_buf_len,
1490 auth->authorizer_buf);
1493 con_flag_set(con, CON_FLAG_WRITE_PENDING);
1499 * write as much of pending kvecs to the socket as we can.
1501 * 0 -> socket full, but more to do
1504 static int write_partial_kvec(struct ceph_connection *con)
1508 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
1509 while (con->out_kvec_bytes > 0) {
1510 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
1511 con->out_kvec_left, con->out_kvec_bytes,
1515 con->out_kvec_bytes -= ret;
1516 if (con->out_kvec_bytes == 0)
1519 /* account for full iov entries consumed */
1520 while (ret >= con->out_kvec_cur->iov_len) {
1521 BUG_ON(!con->out_kvec_left);
1522 ret -= con->out_kvec_cur->iov_len;
1523 con->out_kvec_cur++;
1524 con->out_kvec_left--;
1526 /* and for a partially-consumed entry */
1528 con->out_kvec_cur->iov_len -= ret;
1529 con->out_kvec_cur->iov_base += ret;
1532 con->out_kvec_left = 0;
1535 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
1536 con->out_kvec_bytes, con->out_kvec_left, ret);
1537 return ret; /* done! */
1540 static u32 ceph_crc32c_page(u32 crc, struct page *page,
1541 unsigned int page_offset,
1542 unsigned int length)
1547 BUG_ON(kaddr == NULL);
1548 crc = crc32c(crc, kaddr + page_offset, length);
1554 * Write as much message data payload as we can. If we finish, queue
1556 * 1 -> done, footer is now queued in out_kvec[].
1557 * 0 -> socket full, but more to do
1560 static int write_partial_message_data(struct ceph_connection *con)
1562 struct ceph_msg *msg = con->out_msg;
1563 struct ceph_msg_data_cursor *cursor = &msg->cursor;
1564 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
1567 dout("%s %p msg %p\n", __func__, con, msg);
1569 if (list_empty(&msg->data))
1573 * Iterate through each page that contains data to be
1574 * written, and send as much as possible for each.
1576 * If we are calculating the data crc (the default), we will
1577 * need to map the page. If we have no pages, they have
1578 * been revoked, so use the zero page.
1580 crc = do_datacrc ? le32_to_cpu(msg->footer.data_crc) : 0;
1581 while (cursor->resid) {
1589 page = ceph_msg_data_next(cursor, &page_offset, &length,
1591 ret = ceph_tcp_sendpage(con->sock, page, page_offset,
1592 length, !last_piece);
1595 msg->footer.data_crc = cpu_to_le32(crc);
1599 if (do_datacrc && cursor->need_crc)
1600 crc = ceph_crc32c_page(crc, page, page_offset, length);
1601 need_crc = ceph_msg_data_advance(cursor, (size_t)ret);
1604 dout("%s %p msg %p done\n", __func__, con, msg);
1606 /* prepare and queue up footer, too */
1608 msg->footer.data_crc = cpu_to_le32(crc);
1610 msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
1611 con_out_kvec_reset(con);
1612 prepare_write_message_footer(con);
1614 return 1; /* must return > 0 to indicate success */
1620 static int write_partial_skip(struct ceph_connection *con)
1624 dout("%s %p %d left\n", __func__, con, con->out_skip);
1625 while (con->out_skip > 0) {
1626 size_t size = min(con->out_skip, (int) PAGE_CACHE_SIZE);
1628 ret = ceph_tcp_sendpage(con->sock, zero_page, 0, size, true);
1631 con->out_skip -= ret;
1639 * Prepare to read connection handshake, or an ack.
1641 static void prepare_read_banner(struct ceph_connection *con)
1643 dout("prepare_read_banner %p\n", con);
1644 con->in_base_pos = 0;
1647 static void prepare_read_connect(struct ceph_connection *con)
1649 dout("prepare_read_connect %p\n", con);
1650 con->in_base_pos = 0;
1653 static void prepare_read_ack(struct ceph_connection *con)
1655 dout("prepare_read_ack %p\n", con);
1656 con->in_base_pos = 0;
1659 static void prepare_read_seq(struct ceph_connection *con)
1661 dout("prepare_read_seq %p\n", con);
1662 con->in_base_pos = 0;
1663 con->in_tag = CEPH_MSGR_TAG_SEQ;
1666 static void prepare_read_tag(struct ceph_connection *con)
1668 dout("prepare_read_tag %p\n", con);
1669 con->in_base_pos = 0;
1670 con->in_tag = CEPH_MSGR_TAG_READY;
1673 static void prepare_read_keepalive_ack(struct ceph_connection *con)
1675 dout("prepare_read_keepalive_ack %p\n", con);
1676 con->in_base_pos = 0;
1680 * Prepare to read a message.
1682 static int prepare_read_message(struct ceph_connection *con)
1684 dout("prepare_read_message %p\n", con);
1685 BUG_ON(con->in_msg != NULL);
1686 con->in_base_pos = 0;
1687 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
1692 static int read_partial(struct ceph_connection *con,
1693 int end, int size, void *object)
1695 while (con->in_base_pos < end) {
1696 int left = end - con->in_base_pos;
1697 int have = size - left;
1698 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
1701 con->in_base_pos += ret;
1708 * Read all or part of the connect-side handshake on a new connection
1710 static int read_partial_banner(struct ceph_connection *con)
1716 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
1719 size = strlen(CEPH_BANNER);
1721 ret = read_partial(con, end, size, con->in_banner);
1725 size = sizeof (con->actual_peer_addr);
1727 ret = read_partial(con, end, size, &con->actual_peer_addr);
1731 size = sizeof (con->peer_addr_for_me);
1733 ret = read_partial(con, end, size, &con->peer_addr_for_me);
1741 static int read_partial_connect(struct ceph_connection *con)
1747 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
1749 size = sizeof (con->in_reply);
1751 ret = read_partial(con, end, size, &con->in_reply);
1755 size = le32_to_cpu(con->in_reply.authorizer_len);
1757 ret = read_partial(con, end, size, con->auth_reply_buf);
1761 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1762 con, (int)con->in_reply.tag,
1763 le32_to_cpu(con->in_reply.connect_seq),
1764 le32_to_cpu(con->in_reply.global_seq));
1771 * Verify the hello banner looks okay.
1773 static int verify_hello(struct ceph_connection *con)
1775 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1776 pr_err("connect to %s got bad banner\n",
1777 ceph_pr_addr(&con->peer_addr.in_addr));
1778 con->error_msg = "protocol error, bad banner";
1784 static bool addr_is_blank(struct sockaddr_storage *ss)
1786 struct in_addr *addr = &((struct sockaddr_in *)ss)->sin_addr;
1787 struct in6_addr *addr6 = &((struct sockaddr_in6 *)ss)->sin6_addr;
1789 switch (ss->ss_family) {
1791 return addr->s_addr == htonl(INADDR_ANY);
1793 return ipv6_addr_any(addr6);
1799 static int addr_port(struct sockaddr_storage *ss)
1801 switch (ss->ss_family) {
1803 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1805 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1810 static void addr_set_port(struct sockaddr_storage *ss, int p)
1812 switch (ss->ss_family) {
1814 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1817 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1823 * Unlike other *_pton function semantics, zero indicates success.
1825 static int ceph_pton(const char *str, size_t len, struct sockaddr_storage *ss,
1826 char delim, const char **ipend)
1828 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
1829 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
1831 memset(ss, 0, sizeof(*ss));
1833 if (in4_pton(str, len, (u8 *)&in4->sin_addr.s_addr, delim, ipend)) {
1834 ss->ss_family = AF_INET;
1838 if (in6_pton(str, len, (u8 *)&in6->sin6_addr.s6_addr, delim, ipend)) {
1839 ss->ss_family = AF_INET6;
1847 * Extract hostname string and resolve using kernel DNS facility.
1849 #ifdef CONFIG_CEPH_LIB_USE_DNS_RESOLVER
1850 static int ceph_dns_resolve_name(const char *name, size_t namelen,
1851 struct sockaddr_storage *ss, char delim, const char **ipend)
1853 const char *end, *delim_p;
1854 char *colon_p, *ip_addr = NULL;
1858 * The end of the hostname occurs immediately preceding the delimiter or
1859 * the port marker (':') where the delimiter takes precedence.
1861 delim_p = memchr(name, delim, namelen);
1862 colon_p = memchr(name, ':', namelen);
1864 if (delim_p && colon_p)
1865 end = delim_p < colon_p ? delim_p : colon_p;
1866 else if (!delim_p && colon_p)
1870 if (!end) /* case: hostname:/ */
1871 end = name + namelen;
1877 /* do dns_resolve upcall */
1878 ip_len = dns_query(NULL, name, end - name, NULL, &ip_addr, NULL);
1880 ret = ceph_pton(ip_addr, ip_len, ss, -1, NULL);
1888 pr_info("resolve '%.*s' (ret=%d): %s\n", (int)(end - name), name,
1889 ret, ret ? "failed" : ceph_pr_addr(ss));
1894 static inline int ceph_dns_resolve_name(const char *name, size_t namelen,
1895 struct sockaddr_storage *ss, char delim, const char **ipend)
1902 * Parse a server name (IP or hostname). If a valid IP address is not found
1903 * then try to extract a hostname to resolve using userspace DNS upcall.
1905 static int ceph_parse_server_name(const char *name, size_t namelen,
1906 struct sockaddr_storage *ss, char delim, const char **ipend)
1910 ret = ceph_pton(name, namelen, ss, delim, ipend);
1912 ret = ceph_dns_resolve_name(name, namelen, ss, delim, ipend);
1918 * Parse an ip[:port] list into an addr array. Use the default
1919 * monitor port if a port isn't specified.
1921 int ceph_parse_ips(const char *c, const char *end,
1922 struct ceph_entity_addr *addr,
1923 int max_count, int *count)
1925 int i, ret = -EINVAL;
1928 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1929 for (i = 0; i < max_count; i++) {
1931 struct sockaddr_storage *ss = &addr[i].in_addr;
1940 ret = ceph_parse_server_name(p, end - p, ss, delim, &ipend);
1949 dout("missing matching ']'\n");
1956 if (p < end && *p == ':') {
1959 while (p < end && *p >= '0' && *p <= '9') {
1960 port = (port * 10) + (*p - '0');
1964 port = CEPH_MON_PORT;
1965 else if (port > 65535)
1968 port = CEPH_MON_PORT;
1971 addr_set_port(ss, port);
1973 dout("parse_ips got %s\n", ceph_pr_addr(ss));
1990 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1993 EXPORT_SYMBOL(ceph_parse_ips);
1995 static int process_banner(struct ceph_connection *con)
1997 dout("process_banner on %p\n", con);
1999 if (verify_hello(con) < 0)
2002 ceph_decode_addr(&con->actual_peer_addr);
2003 ceph_decode_addr(&con->peer_addr_for_me);
2006 * Make sure the other end is who we wanted. note that the other
2007 * end may not yet know their ip address, so if it's 0.0.0.0, give
2008 * them the benefit of the doubt.
2010 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
2011 sizeof(con->peer_addr)) != 0 &&
2012 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
2013 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
2014 pr_warn("wrong peer, want %s/%d, got %s/%d\n",
2015 ceph_pr_addr(&con->peer_addr.in_addr),
2016 (int)le32_to_cpu(con->peer_addr.nonce),
2017 ceph_pr_addr(&con->actual_peer_addr.in_addr),
2018 (int)le32_to_cpu(con->actual_peer_addr.nonce));
2019 con->error_msg = "wrong peer at address";
2024 * did we learn our address?
2026 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
2027 int port = addr_port(&con->msgr->inst.addr.in_addr);
2029 memcpy(&con->msgr->inst.addr.in_addr,
2030 &con->peer_addr_for_me.in_addr,
2031 sizeof(con->peer_addr_for_me.in_addr));
2032 addr_set_port(&con->msgr->inst.addr.in_addr, port);
2033 encode_my_addr(con->msgr);
2034 dout("process_banner learned my addr is %s\n",
2035 ceph_pr_addr(&con->msgr->inst.addr.in_addr));
2041 static int process_connect(struct ceph_connection *con)
2043 u64 sup_feat = from_msgr(con->msgr)->supported_features;
2044 u64 req_feat = from_msgr(con->msgr)->required_features;
2045 u64 server_feat = ceph_sanitize_features(
2046 le64_to_cpu(con->in_reply.features));
2049 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
2051 if (con->auth_reply_buf) {
2053 * Any connection that defines ->get_authorizer()
2054 * should also define ->verify_authorizer_reply().
2055 * See get_connect_authorizer().
2057 ret = con->ops->verify_authorizer_reply(con, 0);
2059 con->error_msg = "bad authorize reply";
2064 switch (con->in_reply.tag) {
2065 case CEPH_MSGR_TAG_FEATURES:
2066 pr_err("%s%lld %s feature set mismatch,"
2067 " my %llx < server's %llx, missing %llx\n",
2068 ENTITY_NAME(con->peer_name),
2069 ceph_pr_addr(&con->peer_addr.in_addr),
2070 sup_feat, server_feat, server_feat & ~sup_feat);
2071 con->error_msg = "missing required protocol features";
2072 reset_connection(con);
2075 case CEPH_MSGR_TAG_BADPROTOVER:
2076 pr_err("%s%lld %s protocol version mismatch,"
2077 " my %d != server's %d\n",
2078 ENTITY_NAME(con->peer_name),
2079 ceph_pr_addr(&con->peer_addr.in_addr),
2080 le32_to_cpu(con->out_connect.protocol_version),
2081 le32_to_cpu(con->in_reply.protocol_version));
2082 con->error_msg = "protocol version mismatch";
2083 reset_connection(con);
2086 case CEPH_MSGR_TAG_BADAUTHORIZER:
2088 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
2090 if (con->auth_retry == 2) {
2091 con->error_msg = "connect authorization failure";
2094 con_out_kvec_reset(con);
2095 ret = prepare_write_connect(con);
2098 prepare_read_connect(con);
2101 case CEPH_MSGR_TAG_RESETSESSION:
2103 * If we connected with a large connect_seq but the peer
2104 * has no record of a session with us (no connection, or
2105 * connect_seq == 0), they will send RESETSESION to indicate
2106 * that they must have reset their session, and may have
2109 dout("process_connect got RESET peer seq %u\n",
2110 le32_to_cpu(con->in_reply.connect_seq));
2111 pr_err("%s%lld %s connection reset\n",
2112 ENTITY_NAME(con->peer_name),
2113 ceph_pr_addr(&con->peer_addr.in_addr));
2114 reset_connection(con);
2115 con_out_kvec_reset(con);
2116 ret = prepare_write_connect(con);
2119 prepare_read_connect(con);
2121 /* Tell ceph about it. */
2122 mutex_unlock(&con->mutex);
2123 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
2124 if (con->ops->peer_reset)
2125 con->ops->peer_reset(con);
2126 mutex_lock(&con->mutex);
2127 if (con->state != CON_STATE_NEGOTIATING)
2131 case CEPH_MSGR_TAG_RETRY_SESSION:
2133 * If we sent a smaller connect_seq than the peer has, try
2134 * again with a larger value.
2136 dout("process_connect got RETRY_SESSION my seq %u, peer %u\n",
2137 le32_to_cpu(con->out_connect.connect_seq),
2138 le32_to_cpu(con->in_reply.connect_seq));
2139 con->connect_seq = le32_to_cpu(con->in_reply.connect_seq);
2140 con_out_kvec_reset(con);
2141 ret = prepare_write_connect(con);
2144 prepare_read_connect(con);
2147 case CEPH_MSGR_TAG_RETRY_GLOBAL:
2149 * If we sent a smaller global_seq than the peer has, try
2150 * again with a larger value.
2152 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
2153 con->peer_global_seq,
2154 le32_to_cpu(con->in_reply.global_seq));
2155 get_global_seq(con->msgr,
2156 le32_to_cpu(con->in_reply.global_seq));
2157 con_out_kvec_reset(con);
2158 ret = prepare_write_connect(con);
2161 prepare_read_connect(con);
2164 case CEPH_MSGR_TAG_SEQ:
2165 case CEPH_MSGR_TAG_READY:
2166 if (req_feat & ~server_feat) {
2167 pr_err("%s%lld %s protocol feature mismatch,"
2168 " my required %llx > server's %llx, need %llx\n",
2169 ENTITY_NAME(con->peer_name),
2170 ceph_pr_addr(&con->peer_addr.in_addr),
2171 req_feat, server_feat, req_feat & ~server_feat);
2172 con->error_msg = "missing required protocol features";
2173 reset_connection(con);
2177 WARN_ON(con->state != CON_STATE_NEGOTIATING);
2178 con->state = CON_STATE_OPEN;
2179 con->auth_retry = 0; /* we authenticated; clear flag */
2180 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
2182 con->peer_features = server_feat;
2183 dout("process_connect got READY gseq %d cseq %d (%d)\n",
2184 con->peer_global_seq,
2185 le32_to_cpu(con->in_reply.connect_seq),
2187 WARN_ON(con->connect_seq !=
2188 le32_to_cpu(con->in_reply.connect_seq));
2190 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
2191 con_flag_set(con, CON_FLAG_LOSSYTX);
2193 con->delay = 0; /* reset backoff memory */
2195 if (con->in_reply.tag == CEPH_MSGR_TAG_SEQ) {
2196 prepare_write_seq(con);
2197 prepare_read_seq(con);
2199 prepare_read_tag(con);
2203 case CEPH_MSGR_TAG_WAIT:
2205 * If there is a connection race (we are opening
2206 * connections to each other), one of us may just have
2207 * to WAIT. This shouldn't happen if we are the
2210 con->error_msg = "protocol error, got WAIT as client";
2214 con->error_msg = "protocol error, garbage tag during connect";
2222 * read (part of) an ack
2224 static int read_partial_ack(struct ceph_connection *con)
2226 int size = sizeof (con->in_temp_ack);
2229 return read_partial(con, end, size, &con->in_temp_ack);
2233 * We can finally discard anything that's been acked.
2235 static void process_ack(struct ceph_connection *con)
2238 u64 ack = le64_to_cpu(con->in_temp_ack);
2241 while (!list_empty(&con->out_sent)) {
2242 m = list_first_entry(&con->out_sent, struct ceph_msg,
2244 seq = le64_to_cpu(m->hdr.seq);
2247 dout("got ack for seq %llu type %d at %p\n", seq,
2248 le16_to_cpu(m->hdr.type), m);
2249 m->ack_stamp = jiffies;
2252 prepare_read_tag(con);
2256 static int read_partial_message_section(struct ceph_connection *con,
2257 struct kvec *section,
2258 unsigned int sec_len, u32 *crc)
2264 while (section->iov_len < sec_len) {
2265 BUG_ON(section->iov_base == NULL);
2266 left = sec_len - section->iov_len;
2267 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
2268 section->iov_len, left);
2271 section->iov_len += ret;
2273 if (section->iov_len == sec_len)
2274 *crc = crc32c(0, section->iov_base, section->iov_len);
2279 static int read_partial_msg_data(struct ceph_connection *con)
2281 struct ceph_msg *msg = con->in_msg;
2282 struct ceph_msg_data_cursor *cursor = &msg->cursor;
2283 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2291 if (list_empty(&msg->data))
2295 crc = con->in_data_crc;
2296 while (cursor->resid) {
2297 page = ceph_msg_data_next(cursor, &page_offset, &length, NULL);
2298 ret = ceph_tcp_recvpage(con->sock, page, page_offset, length);
2301 con->in_data_crc = crc;
2307 crc = ceph_crc32c_page(crc, page, page_offset, ret);
2308 (void) ceph_msg_data_advance(cursor, (size_t)ret);
2311 con->in_data_crc = crc;
2313 return 1; /* must return > 0 to indicate success */
2317 * read (part of) a message.
2319 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip);
2321 static int read_partial_message(struct ceph_connection *con)
2323 struct ceph_msg *m = con->in_msg;
2327 unsigned int front_len, middle_len, data_len;
2328 bool do_datacrc = !ceph_test_opt(from_msgr(con->msgr), NOCRC);
2329 bool need_sign = (con->peer_features & CEPH_FEATURE_MSG_AUTH);
2333 dout("read_partial_message con %p msg %p\n", con, m);
2336 size = sizeof (con->in_hdr);
2338 ret = read_partial(con, end, size, &con->in_hdr);
2342 crc = crc32c(0, &con->in_hdr, offsetof(struct ceph_msg_header, crc));
2343 if (cpu_to_le32(crc) != con->in_hdr.crc) {
2344 pr_err("read_partial_message bad hdr crc %u != expected %u\n",
2345 crc, con->in_hdr.crc);
2349 front_len = le32_to_cpu(con->in_hdr.front_len);
2350 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
2352 middle_len = le32_to_cpu(con->in_hdr.middle_len);
2353 if (middle_len > CEPH_MSG_MAX_MIDDLE_LEN)
2355 data_len = le32_to_cpu(con->in_hdr.data_len);
2356 if (data_len > CEPH_MSG_MAX_DATA_LEN)
2360 seq = le64_to_cpu(con->in_hdr.seq);
2361 if ((s64)seq - (s64)con->in_seq < 1) {
2362 pr_info("skipping %s%lld %s seq %lld expected %lld\n",
2363 ENTITY_NAME(con->peer_name),
2364 ceph_pr_addr(&con->peer_addr.in_addr),
2365 seq, con->in_seq + 1);
2366 con->in_base_pos = -front_len - middle_len - data_len -
2368 con->in_tag = CEPH_MSGR_TAG_READY;
2370 } else if ((s64)seq - (s64)con->in_seq > 1) {
2371 pr_err("read_partial_message bad seq %lld expected %lld\n",
2372 seq, con->in_seq + 1);
2373 con->error_msg = "bad message sequence # for incoming message";
2377 /* allocate message? */
2381 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
2382 front_len, data_len);
2383 ret = ceph_con_in_msg_alloc(con, &skip);
2387 BUG_ON(!con->in_msg ^ skip);
2389 /* skip this message */
2390 dout("alloc_msg said skip message\n");
2391 con->in_base_pos = -front_len - middle_len - data_len -
2393 con->in_tag = CEPH_MSGR_TAG_READY;
2398 BUG_ON(!con->in_msg);
2399 BUG_ON(con->in_msg->con != con);
2401 m->front.iov_len = 0; /* haven't read it yet */
2403 m->middle->vec.iov_len = 0;
2405 /* prepare for data payload, if any */
2408 prepare_message_data(con->in_msg, data_len);
2412 ret = read_partial_message_section(con, &m->front, front_len,
2413 &con->in_front_crc);
2419 ret = read_partial_message_section(con, &m->middle->vec,
2421 &con->in_middle_crc);
2428 ret = read_partial_msg_data(con);
2435 size = sizeof(m->footer);
2437 size = sizeof(m->old_footer);
2440 ret = read_partial(con, end, size, &m->footer);
2445 m->footer.flags = m->old_footer.flags;
2449 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
2450 m, front_len, m->footer.front_crc, middle_len,
2451 m->footer.middle_crc, data_len, m->footer.data_crc);
2454 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
2455 pr_err("read_partial_message %p front crc %u != exp. %u\n",
2456 m, con->in_front_crc, m->footer.front_crc);
2459 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
2460 pr_err("read_partial_message %p middle crc %u != exp %u\n",
2461 m, con->in_middle_crc, m->footer.middle_crc);
2465 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
2466 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
2467 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
2468 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
2472 if (need_sign && con->ops->check_message_signature &&
2473 con->ops->check_message_signature(m)) {
2474 pr_err("read_partial_message %p signature check failed\n", m);
2478 return 1; /* done! */
2482 * Process message. This happens in the worker thread. The callback should
2483 * be careful not to do anything that waits on other incoming messages or it
2486 static void process_message(struct ceph_connection *con)
2488 struct ceph_msg *msg = con->in_msg;
2490 BUG_ON(con->in_msg->con != con);
2493 /* if first message, set peer_name */
2494 if (con->peer_name.type == 0)
2495 con->peer_name = msg->hdr.src;
2498 mutex_unlock(&con->mutex);
2500 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
2501 msg, le64_to_cpu(msg->hdr.seq),
2502 ENTITY_NAME(msg->hdr.src),
2503 le16_to_cpu(msg->hdr.type),
2504 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2505 le32_to_cpu(msg->hdr.front_len),
2506 le32_to_cpu(msg->hdr.data_len),
2507 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
2508 con->ops->dispatch(con, msg);
2510 mutex_lock(&con->mutex);
2513 static int read_keepalive_ack(struct ceph_connection *con)
2515 struct ceph_timespec ceph_ts;
2516 size_t size = sizeof(ceph_ts);
2517 int ret = read_partial(con, size, size, &ceph_ts);
2520 ceph_decode_timespec(&con->last_keepalive_ack, &ceph_ts);
2521 prepare_read_tag(con);
2526 * Write something to the socket. Called in a worker thread when the
2527 * socket appears to be writeable and we have something ready to send.
2529 static int try_write(struct ceph_connection *con)
2533 dout("try_write start %p state %lu\n", con, con->state);
2536 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
2538 /* open the socket first? */
2539 if (con->state == CON_STATE_PREOPEN) {
2541 con->state = CON_STATE_CONNECTING;
2543 con_out_kvec_reset(con);
2544 prepare_write_banner(con);
2545 prepare_read_banner(con);
2547 BUG_ON(con->in_msg);
2548 con->in_tag = CEPH_MSGR_TAG_READY;
2549 dout("try_write initiating connect on %p new state %lu\n",
2551 ret = ceph_tcp_connect(con);
2553 con->error_msg = "connect error";
2559 /* kvec data queued? */
2560 if (con->out_kvec_left) {
2561 ret = write_partial_kvec(con);
2565 if (con->out_skip) {
2566 ret = write_partial_skip(con);
2573 if (con->out_msg_done) {
2574 ceph_msg_put(con->out_msg);
2575 con->out_msg = NULL; /* we're done with this one */
2579 ret = write_partial_message_data(con);
2581 goto more_kvec; /* we need to send the footer, too! */
2585 dout("try_write write_partial_message_data err %d\n",
2592 if (con->state == CON_STATE_OPEN) {
2593 if (con_flag_test_and_clear(con, CON_FLAG_KEEPALIVE_PENDING)) {
2594 prepare_write_keepalive(con);
2597 /* is anything else pending? */
2598 if (!list_empty(&con->out_queue)) {
2599 prepare_write_message(con);
2602 if (con->in_seq > con->in_seq_acked) {
2603 prepare_write_ack(con);
2608 /* Nothing to do! */
2609 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2610 dout("try_write nothing else to write.\n");
2613 dout("try_write done on %p ret %d\n", con, ret);
2620 * Read what we can from the socket.
2622 static int try_read(struct ceph_connection *con)
2627 dout("try_read start on %p state %lu\n", con, con->state);
2628 if (con->state != CON_STATE_CONNECTING &&
2629 con->state != CON_STATE_NEGOTIATING &&
2630 con->state != CON_STATE_OPEN)
2635 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
2638 if (con->state == CON_STATE_CONNECTING) {
2639 dout("try_read connecting\n");
2640 ret = read_partial_banner(con);
2643 ret = process_banner(con);
2647 con->state = CON_STATE_NEGOTIATING;
2650 * Received banner is good, exchange connection info.
2651 * Do not reset out_kvec, as sending our banner raced
2652 * with receiving peer banner after connect completed.
2654 ret = prepare_write_connect(con);
2657 prepare_read_connect(con);
2659 /* Send connection info before awaiting response */
2663 if (con->state == CON_STATE_NEGOTIATING) {
2664 dout("try_read negotiating\n");
2665 ret = read_partial_connect(con);
2668 ret = process_connect(con);
2674 WARN_ON(con->state != CON_STATE_OPEN);
2676 if (con->in_base_pos < 0) {
2678 * skipping + discarding content.
2680 * FIXME: there must be a better way to do this!
2682 static char buf[SKIP_BUF_SIZE];
2683 int skip = min((int) sizeof (buf), -con->in_base_pos);
2685 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
2686 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
2689 con->in_base_pos += ret;
2690 if (con->in_base_pos)
2693 if (con->in_tag == CEPH_MSGR_TAG_READY) {
2697 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
2700 dout("try_read got tag %d\n", (int)con->in_tag);
2701 switch (con->in_tag) {
2702 case CEPH_MSGR_TAG_MSG:
2703 prepare_read_message(con);
2705 case CEPH_MSGR_TAG_ACK:
2706 prepare_read_ack(con);
2708 case CEPH_MSGR_TAG_KEEPALIVE2_ACK:
2709 prepare_read_keepalive_ack(con);
2711 case CEPH_MSGR_TAG_CLOSE:
2712 con_close_socket(con);
2713 con->state = CON_STATE_CLOSED;
2719 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
2720 ret = read_partial_message(con);
2724 con->error_msg = "bad crc/signature";
2730 con->error_msg = "io error";
2735 if (con->in_tag == CEPH_MSGR_TAG_READY)
2737 process_message(con);
2738 if (con->state == CON_STATE_OPEN)
2739 prepare_read_tag(con);
2742 if (con->in_tag == CEPH_MSGR_TAG_ACK ||
2743 con->in_tag == CEPH_MSGR_TAG_SEQ) {
2745 * the final handshake seq exchange is semantically
2746 * equivalent to an ACK
2748 ret = read_partial_ack(con);
2754 if (con->in_tag == CEPH_MSGR_TAG_KEEPALIVE2_ACK) {
2755 ret = read_keepalive_ack(con);
2762 dout("try_read done on %p ret %d\n", con, ret);
2766 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
2767 con->error_msg = "protocol error, garbage tag";
2774 * Atomically queue work on a connection after the specified delay.
2775 * Bump @con reference to avoid races with connection teardown.
2776 * Returns 0 if work was queued, or an error code otherwise.
2778 static int queue_con_delay(struct ceph_connection *con, unsigned long delay)
2780 if (!con->ops->get(con)) {
2781 dout("%s %p ref count 0\n", __func__, con);
2785 if (!queue_delayed_work(ceph_msgr_wq, &con->work, delay)) {
2786 dout("%s %p - already queued\n", __func__, con);
2791 dout("%s %p %lu\n", __func__, con, delay);
2795 static void queue_con(struct ceph_connection *con)
2797 (void) queue_con_delay(con, 0);
2800 static void cancel_con(struct ceph_connection *con)
2802 if (cancel_delayed_work(&con->work)) {
2803 dout("%s %p\n", __func__, con);
2808 static bool con_sock_closed(struct ceph_connection *con)
2810 if (!con_flag_test_and_clear(con, CON_FLAG_SOCK_CLOSED))
2814 case CON_STATE_ ## x: \
2815 con->error_msg = "socket closed (con state " #x ")"; \
2818 switch (con->state) {
2826 pr_warn("%s con %p unrecognized state %lu\n",
2827 __func__, con, con->state);
2828 con->error_msg = "unrecognized con state";
2837 static bool con_backoff(struct ceph_connection *con)
2841 if (!con_flag_test_and_clear(con, CON_FLAG_BACKOFF))
2844 ret = queue_con_delay(con, round_jiffies_relative(con->delay));
2846 dout("%s: con %p FAILED to back off %lu\n", __func__,
2848 BUG_ON(ret == -ENOENT);
2849 con_flag_set(con, CON_FLAG_BACKOFF);
2855 /* Finish fault handling; con->mutex must *not* be held here */
2857 static void con_fault_finish(struct ceph_connection *con)
2860 * in case we faulted due to authentication, invalidate our
2861 * current tickets so that we can get new ones.
2863 if (con->auth_retry && con->ops->invalidate_authorizer) {
2864 dout("calling invalidate_authorizer()\n");
2865 con->ops->invalidate_authorizer(con);
2868 if (con->ops->fault)
2869 con->ops->fault(con);
2873 * Do some work on a connection. Drop a connection ref when we're done.
2875 static void ceph_con_workfn(struct work_struct *work)
2877 struct ceph_connection *con = container_of(work, struct ceph_connection,
2881 mutex_lock(&con->mutex);
2885 if ((fault = con_sock_closed(con))) {
2886 dout("%s: con %p SOCK_CLOSED\n", __func__, con);
2889 if (con_backoff(con)) {
2890 dout("%s: con %p BACKOFF\n", __func__, con);
2893 if (con->state == CON_STATE_STANDBY) {
2894 dout("%s: con %p STANDBY\n", __func__, con);
2897 if (con->state == CON_STATE_CLOSED) {
2898 dout("%s: con %p CLOSED\n", __func__, con);
2902 if (con->state == CON_STATE_PREOPEN) {
2903 dout("%s: con %p PREOPEN\n", __func__, con);
2907 ret = try_read(con);
2911 if (!con->error_msg)
2912 con->error_msg = "socket error on read";
2917 ret = try_write(con);
2921 if (!con->error_msg)
2922 con->error_msg = "socket error on write";
2926 break; /* If we make it to here, we're done */
2930 mutex_unlock(&con->mutex);
2933 con_fault_finish(con);
2939 * Generic error/fault handler. A retry mechanism is used with
2940 * exponential backoff
2942 static void con_fault(struct ceph_connection *con)
2944 dout("fault %p state %lu to peer %s\n",
2945 con, con->state, ceph_pr_addr(&con->peer_addr.in_addr));
2947 pr_warn("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2948 ceph_pr_addr(&con->peer_addr.in_addr), con->error_msg);
2949 con->error_msg = NULL;
2951 WARN_ON(con->state != CON_STATE_CONNECTING &&
2952 con->state != CON_STATE_NEGOTIATING &&
2953 con->state != CON_STATE_OPEN);
2955 con_close_socket(con);
2957 if (con_flag_test(con, CON_FLAG_LOSSYTX)) {
2958 dout("fault on LOSSYTX channel, marking CLOSED\n");
2959 con->state = CON_STATE_CLOSED;
2964 BUG_ON(con->in_msg->con != con);
2965 ceph_msg_put(con->in_msg);
2969 /* Requeue anything that hasn't been acked */
2970 list_splice_init(&con->out_sent, &con->out_queue);
2972 /* If there are no messages queued or keepalive pending, place
2973 * the connection in a STANDBY state */
2974 if (list_empty(&con->out_queue) &&
2975 !con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING)) {
2976 dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con);
2977 con_flag_clear(con, CON_FLAG_WRITE_PENDING);
2978 con->state = CON_STATE_STANDBY;
2980 /* retry after a delay. */
2981 con->state = CON_STATE_PREOPEN;
2982 if (con->delay == 0)
2983 con->delay = BASE_DELAY_INTERVAL;
2984 else if (con->delay < MAX_DELAY_INTERVAL)
2986 con_flag_set(con, CON_FLAG_BACKOFF);
2994 * initialize a new messenger instance
2996 void ceph_messenger_init(struct ceph_messenger *msgr,
2997 struct ceph_entity_addr *myaddr)
2999 spin_lock_init(&msgr->global_seq_lock);
3002 msgr->inst.addr = *myaddr;
3004 /* select a random nonce */
3005 msgr->inst.addr.type = 0;
3006 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
3007 encode_my_addr(msgr);
3009 atomic_set(&msgr->stopping, 0);
3010 write_pnet(&msgr->net, get_net(current->nsproxy->net_ns));
3012 dout("%s %p\n", __func__, msgr);
3014 EXPORT_SYMBOL(ceph_messenger_init);
3016 void ceph_messenger_fini(struct ceph_messenger *msgr)
3018 put_net(read_pnet(&msgr->net));
3020 EXPORT_SYMBOL(ceph_messenger_fini);
3022 static void msg_con_set(struct ceph_msg *msg, struct ceph_connection *con)
3025 msg->con->ops->put(msg->con);
3027 msg->con = con ? con->ops->get(con) : NULL;
3028 BUG_ON(msg->con != con);
3031 static void clear_standby(struct ceph_connection *con)
3033 /* come back from STANDBY? */
3034 if (con->state == CON_STATE_STANDBY) {
3035 dout("clear_standby %p and ++connect_seq\n", con);
3036 con->state = CON_STATE_PREOPEN;
3038 WARN_ON(con_flag_test(con, CON_FLAG_WRITE_PENDING));
3039 WARN_ON(con_flag_test(con, CON_FLAG_KEEPALIVE_PENDING));
3044 * Queue up an outgoing message on the given connection.
3046 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
3049 msg->hdr.src = con->msgr->inst.name;
3050 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
3051 msg->needs_out_seq = true;
3053 mutex_lock(&con->mutex);
3055 if (con->state == CON_STATE_CLOSED) {
3056 dout("con_send %p closed, dropping %p\n", con, msg);
3058 mutex_unlock(&con->mutex);
3062 msg_con_set(msg, con);
3064 BUG_ON(!list_empty(&msg->list_head));
3065 list_add_tail(&msg->list_head, &con->out_queue);
3066 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
3067 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
3068 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
3069 le32_to_cpu(msg->hdr.front_len),
3070 le32_to_cpu(msg->hdr.middle_len),
3071 le32_to_cpu(msg->hdr.data_len));
3074 mutex_unlock(&con->mutex);
3076 /* if there wasn't anything waiting to send before, queue
3078 if (con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3081 EXPORT_SYMBOL(ceph_con_send);
3084 * Revoke a message that was previously queued for send
3086 void ceph_msg_revoke(struct ceph_msg *msg)
3088 struct ceph_connection *con = msg->con;
3091 dout("%s msg %p null con\n", __func__, msg);
3092 return; /* Message not in our possession */
3095 mutex_lock(&con->mutex);
3096 if (!list_empty(&msg->list_head)) {
3097 dout("%s %p msg %p - was on queue\n", __func__, con, msg);
3098 list_del_init(&msg->list_head);
3103 if (con->out_msg == msg) {
3104 BUG_ON(con->out_skip);
3106 if (con->out_msg_done) {
3107 con->out_skip += con_out_kvec_skip(con);
3109 BUG_ON(!msg->data_length);
3110 if (con->peer_features & CEPH_FEATURE_MSG_AUTH)
3111 con->out_skip += sizeof(msg->footer);
3113 con->out_skip += sizeof(msg->old_footer);
3115 /* data, middle, front */
3116 if (msg->data_length)
3117 con->out_skip += msg->cursor.total_resid;
3119 con->out_skip += con_out_kvec_skip(con);
3120 con->out_skip += con_out_kvec_skip(con);
3122 dout("%s %p msg %p - was sending, will write %d skip %d\n",
3123 __func__, con, msg, con->out_kvec_bytes, con->out_skip);
3125 con->out_msg = NULL;
3129 mutex_unlock(&con->mutex);
3133 * Revoke a message that we may be reading data into
3135 void ceph_msg_revoke_incoming(struct ceph_msg *msg)
3137 struct ceph_connection *con = msg->con;
3140 dout("%s msg %p null con\n", __func__, msg);
3141 return; /* Message not in our possession */
3144 mutex_lock(&con->mutex);
3145 if (con->in_msg == msg) {
3146 unsigned int front_len = le32_to_cpu(con->in_hdr.front_len);
3147 unsigned int middle_len = le32_to_cpu(con->in_hdr.middle_len);
3148 unsigned int data_len = le32_to_cpu(con->in_hdr.data_len);
3150 /* skip rest of message */
3151 dout("%s %p msg %p revoked\n", __func__, con, msg);
3152 con->in_base_pos = con->in_base_pos -
3153 sizeof(struct ceph_msg_header) -
3157 sizeof(struct ceph_msg_footer);
3158 ceph_msg_put(con->in_msg);
3160 con->in_tag = CEPH_MSGR_TAG_READY;
3163 dout("%s %p in_msg %p msg %p no-op\n",
3164 __func__, con, con->in_msg, msg);
3166 mutex_unlock(&con->mutex);
3170 * Queue a keepalive byte to ensure the tcp connection is alive.
3172 void ceph_con_keepalive(struct ceph_connection *con)
3174 dout("con_keepalive %p\n", con);
3175 mutex_lock(&con->mutex);
3177 mutex_unlock(&con->mutex);
3178 if (con_flag_test_and_set(con, CON_FLAG_KEEPALIVE_PENDING) == 0 &&
3179 con_flag_test_and_set(con, CON_FLAG_WRITE_PENDING) == 0)
3182 EXPORT_SYMBOL(ceph_con_keepalive);
3184 bool ceph_con_keepalive_expired(struct ceph_connection *con,
3185 unsigned long interval)
3188 (con->peer_features & CEPH_FEATURE_MSGR_KEEPALIVE2)) {
3189 struct timespec now = CURRENT_TIME;
3191 jiffies_to_timespec(interval, &ts);
3192 ts = timespec_add(con->last_keepalive_ack, ts);
3193 return timespec_compare(&now, &ts) >= 0;
3198 static struct ceph_msg_data *ceph_msg_data_create(enum ceph_msg_data_type type)
3200 struct ceph_msg_data *data;
3202 if (WARN_ON(!ceph_msg_data_type_valid(type)))
3205 data = kmem_cache_zalloc(ceph_msg_data_cache, GFP_NOFS);
3208 INIT_LIST_HEAD(&data->links);
3213 static void ceph_msg_data_destroy(struct ceph_msg_data *data)
3218 WARN_ON(!list_empty(&data->links));
3219 if (data->type == CEPH_MSG_DATA_PAGELIST)
3220 ceph_pagelist_release(data->pagelist);
3221 kmem_cache_free(ceph_msg_data_cache, data);
3224 void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
3225 size_t length, size_t alignment)
3227 struct ceph_msg_data *data;
3232 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGES);
3234 data->pages = pages;
3235 data->length = length;
3236 data->alignment = alignment & ~PAGE_MASK;
3238 list_add_tail(&data->links, &msg->data);
3239 msg->data_length += length;
3241 EXPORT_SYMBOL(ceph_msg_data_add_pages);
3243 void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
3244 struct ceph_pagelist *pagelist)
3246 struct ceph_msg_data *data;
3249 BUG_ON(!pagelist->length);
3251 data = ceph_msg_data_create(CEPH_MSG_DATA_PAGELIST);
3253 data->pagelist = pagelist;
3255 list_add_tail(&data->links, &msg->data);
3256 msg->data_length += pagelist->length;
3258 EXPORT_SYMBOL(ceph_msg_data_add_pagelist);
3261 void ceph_msg_data_add_bio(struct ceph_msg *msg, struct bio *bio,
3264 struct ceph_msg_data *data;
3268 data = ceph_msg_data_create(CEPH_MSG_DATA_BIO);
3271 data->bio_length = length;
3273 list_add_tail(&data->links, &msg->data);
3274 msg->data_length += length;
3276 EXPORT_SYMBOL(ceph_msg_data_add_bio);
3277 #endif /* CONFIG_BLOCK */
3280 * construct a new message with given type, size
3281 * the new msg has a ref count of 1.
3283 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags,
3288 m = kmem_cache_zalloc(ceph_msg_cache, flags);
3292 m->hdr.type = cpu_to_le16(type);
3293 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
3294 m->hdr.front_len = cpu_to_le32(front_len);
3296 INIT_LIST_HEAD(&m->list_head);
3297 kref_init(&m->kref);
3298 INIT_LIST_HEAD(&m->data);
3302 m->front.iov_base = ceph_kvmalloc(front_len, flags);
3303 if (m->front.iov_base == NULL) {
3304 dout("ceph_msg_new can't allocate %d bytes\n",
3309 m->front.iov_base = NULL;
3311 m->front_alloc_len = m->front.iov_len = front_len;
3313 dout("ceph_msg_new %p front %d\n", m, front_len);
3320 pr_err("msg_new can't create type %d front %d\n", type,
3324 dout("msg_new can't create type %d front %d\n", type,
3329 EXPORT_SYMBOL(ceph_msg_new);
3332 * Allocate "middle" portion of a message, if it is needed and wasn't
3333 * allocated by alloc_msg. This allows us to read a small fixed-size
3334 * per-type header in the front and then gracefully fail (i.e.,
3335 * propagate the error to the caller based on info in the front) when
3336 * the middle is too large.
3338 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
3340 int type = le16_to_cpu(msg->hdr.type);
3341 int middle_len = le32_to_cpu(msg->hdr.middle_len);
3343 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
3344 ceph_msg_type_name(type), middle_len);
3345 BUG_ON(!middle_len);
3346 BUG_ON(msg->middle);
3348 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
3355 * Allocate a message for receiving an incoming message on a
3356 * connection, and save the result in con->in_msg. Uses the
3357 * connection's private alloc_msg op if available.
3359 * Returns 0 on success, or a negative error code.
3361 * On success, if we set *skip = 1:
3362 * - the next message should be skipped and ignored.
3363 * - con->in_msg == NULL
3364 * or if we set *skip = 0:
3365 * - con->in_msg is non-null.
3366 * On error (ENOMEM, EAGAIN, ...),
3367 * - con->in_msg == NULL
3369 static int ceph_con_in_msg_alloc(struct ceph_connection *con, int *skip)
3371 struct ceph_msg_header *hdr = &con->in_hdr;
3372 int middle_len = le32_to_cpu(hdr->middle_len);
3373 struct ceph_msg *msg;
3376 BUG_ON(con->in_msg != NULL);
3377 BUG_ON(!con->ops->alloc_msg);
3379 mutex_unlock(&con->mutex);
3380 msg = con->ops->alloc_msg(con, hdr, skip);
3381 mutex_lock(&con->mutex);
3382 if (con->state != CON_STATE_OPEN) {
3389 msg_con_set(msg, con);
3393 * Null message pointer means either we should skip
3394 * this message or we couldn't allocate memory. The
3395 * former is not an error.
3400 con->error_msg = "error allocating memory for incoming message";
3403 memcpy(&con->in_msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
3405 if (middle_len && !con->in_msg->middle) {
3406 ret = ceph_alloc_middle(con, con->in_msg);
3408 ceph_msg_put(con->in_msg);
3418 * Free a generically kmalloc'd message.
3420 static void ceph_msg_free(struct ceph_msg *m)
3422 dout("%s %p\n", __func__, m);
3423 kvfree(m->front.iov_base);
3424 kmem_cache_free(ceph_msg_cache, m);
3427 static void ceph_msg_release(struct kref *kref)
3429 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
3431 struct list_head *links;
3432 struct list_head *next;
3434 dout("%s %p\n", __func__, m);
3435 WARN_ON(!list_empty(&m->list_head));
3437 msg_con_set(m, NULL);
3439 /* drop middle, data, if any */
3441 ceph_buffer_put(m->middle);
3445 list_splice_init(&m->data, &data);
3446 list_for_each_safe(links, next, &data) {
3447 struct ceph_msg_data *data;
3449 data = list_entry(links, struct ceph_msg_data, links);
3450 list_del_init(links);
3451 ceph_msg_data_destroy(data);
3456 ceph_msgpool_put(m->pool, m);
3461 struct ceph_msg *ceph_msg_get(struct ceph_msg *msg)
3463 dout("%s %p (was %d)\n", __func__, msg,
3464 atomic_read(&msg->kref.refcount));
3465 kref_get(&msg->kref);
3468 EXPORT_SYMBOL(ceph_msg_get);
3470 void ceph_msg_put(struct ceph_msg *msg)
3472 dout("%s %p (was %d)\n", __func__, msg,
3473 atomic_read(&msg->kref.refcount));
3474 kref_put(&msg->kref, ceph_msg_release);
3476 EXPORT_SYMBOL(ceph_msg_put);
3478 void ceph_msg_dump(struct ceph_msg *msg)
3480 pr_debug("msg_dump %p (front_alloc_len %d length %zd)\n", msg,
3481 msg->front_alloc_len, msg->data_length);
3482 print_hex_dump(KERN_DEBUG, "header: ",
3483 DUMP_PREFIX_OFFSET, 16, 1,
3484 &msg->hdr, sizeof(msg->hdr), true);
3485 print_hex_dump(KERN_DEBUG, " front: ",
3486 DUMP_PREFIX_OFFSET, 16, 1,
3487 msg->front.iov_base, msg->front.iov_len, true);
3489 print_hex_dump(KERN_DEBUG, "middle: ",
3490 DUMP_PREFIX_OFFSET, 16, 1,
3491 msg->middle->vec.iov_base,
3492 msg->middle->vec.iov_len, true);
3493 print_hex_dump(KERN_DEBUG, "footer: ",
3494 DUMP_PREFIX_OFFSET, 16, 1,
3495 &msg->footer, sizeof(msg->footer), true);
3497 EXPORT_SYMBOL(ceph_msg_dump);