4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
79 /* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
83 static struct page *page_chain_del(struct page **head, int n)
97 tmp = page_chain_next(page);
99 break; /* found sufficient pages */
101 /* insufficient pages, don't use any of them. */
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
114 /* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
121 while ((tmp = page_chain_next(page)))
128 static int page_chain_free(struct page *page)
132 page_chain_for_each_safe(page, tmp) {
139 static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
156 struct page *page = NULL;
157 struct page *tmp = NULL;
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
162 if (drbd_pp_vacant >= number) {
163 spin_lock(&drbd_pp_lock);
164 page = page_chain_del(&drbd_pp_pool, number);
166 drbd_pp_vacant -= number;
167 spin_unlock(&drbd_pp_lock);
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
179 set_page_private(tmp, (unsigned long)page);
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_alloc_pages will retry this
188 * function "soon". */
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
194 spin_unlock(&drbd_pp_lock);
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
202 struct drbd_peer_request *peer_req;
203 struct list_head *le, *tle;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le, tle, &mdev->net_ee) {
211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212 if (drbd_peer_req_has_active_page(peer_req))
214 list_move(le, to_be_freed);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
220 LIST_HEAD(reclaimed);
221 struct drbd_peer_request *peer_req, *t;
223 spin_lock_irq(&mdev->tconn->req_lock);
224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->tconn->req_lock);
227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228 drbd_free_net_peer_req(mdev, peer_req);
232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233 * @mdev: DRBD device.
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 * Returns a page chain linked via page->private.
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
246 struct page *page = NULL;
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
258 if (atomic_read(&mdev->pp_in_use) < mxb)
259 page = __drbd_alloc_pages(mdev, number);
261 while (page == NULL) {
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
264 drbd_kick_lo_and_reclaim_net(mdev);
266 if (atomic_read(&mdev->pp_in_use) < mxb) {
267 page = __drbd_alloc_pages(mdev, number);
275 if (signal_pending(current)) {
276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
282 finish_wait(&drbd_pp_wait, &wait);
285 atomic_add(number, &mdev->pp_in_use);
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302 i = page_chain_free(page);
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
309 spin_unlock(&drbd_pp_lock);
311 i = atomic_sub_return(i, a);
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315 wake_up(&drbd_pp_wait);
319 You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
322 You must not have the req_lock:
324 drbd_alloc_peer_req()
325 drbd_free_peer_reqs()
327 drbd_finish_peer_reqs()
329 drbd_wait_ee_list_empty()
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
336 struct drbd_peer_request *peer_req;
337 struct page *page = NULL;
338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
345 if (!(gfp_mask & __GFP_NOWARN))
346 dev_err(DEV, "%s: allocation failed\n", __func__);
351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
362 peer_req->epoch = NULL;
363 peer_req->w.mdev = mdev;
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
371 peer_req->block_id = id;
376 mempool_free(peer_req, drbd_ee_mempool);
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
385 drbd_free_pages(mdev, peer_req->pages, is_net);
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
393 LIST_HEAD(work_list);
394 struct drbd_peer_request *peer_req, *t;
396 int is_net = list == &mdev->net_ee;
398 spin_lock_irq(&mdev->tconn->req_lock);
399 list_splice_init(list, &work_list);
400 spin_unlock_irq(&mdev->tconn->req_lock);
402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403 __drbd_free_peer_req(mdev, peer_req, is_net);
410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
416 struct drbd_peer_request *peer_req, *t;
419 spin_lock_irq(&mdev->tconn->req_lock);
420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421 list_splice_init(&mdev->done_ee, &work_list);
422 spin_unlock_irq(&mdev->tconn->req_lock);
424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425 drbd_free_net_peer_req(mdev, peer_req);
427 /* possible callbacks here:
428 * e_end_block, and e_end_resync_block, e_send_discard_write.
429 * all ignore the last argument.
431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
434 /* list_del not necessary, next/prev members not touched */
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
438 drbd_free_peer_req(mdev, peer_req);
440 wake_up(&mdev->ee_wait);
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->tconn->req_lock);
456 finish_wait(&mdev->ee_wait, &wait);
457 spin_lock_irq(&mdev->tconn->req_lock);
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
464 spin_lock_irq(&mdev->tconn->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->tconn->req_lock);
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
476 struct msghdr msg = {
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
498 struct msghdr msg = {
500 .msg_iov = (struct iovec *)&iov,
501 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
509 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
514 * ECONNRESET other side closed the connection
515 * ERESTARTSYS (on sock) we got a signal
519 if (rv == -ECONNRESET)
520 conn_info(tconn, "sock was reset by peer\n");
521 else if (rv != -ERESTARTSYS)
522 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
524 } else if (rv == 0) {
525 conn_info(tconn, "sock was shut down by peer\n");
528 /* signal came in, or peer/link went down,
529 * after we read a partial message
531 /* D_ASSERT(signal_pending(current)); */
539 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
544 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
548 err = drbd_recv(tconn, buf, size);
557 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
561 err = drbd_recv_all(tconn, buf, size);
562 if (err && !signal_pending(current))
563 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
568 * On individual connections, the socket buffer size must be set prior to the
569 * listen(2) or connect(2) calls in order to have it take effect.
570 * This is our wrapper to do so.
572 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
575 /* open coded SO_SNDBUF, SO_RCVBUF */
577 sock->sk->sk_sndbuf = snd;
578 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
581 sock->sk->sk_rcvbuf = rcv;
582 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
586 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
590 struct sockaddr_in6 src_in6;
591 struct sockaddr_in6 peer_in6;
593 int err, peer_addr_len, my_addr_len;
594 int sndbuf_size, rcvbuf_size, connect_int;
595 int disconnect_on_error = 1;
598 nc = rcu_dereference(tconn->net_conf);
603 sndbuf_size = nc->sndbuf_size;
604 rcvbuf_size = nc->rcvbuf_size;
605 connect_int = nc->connect_int;
608 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
609 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
611 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
612 src_in6.sin6_port = 0;
614 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
616 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
617 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
619 what = "sock_create_kern";
620 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
621 SOCK_STREAM, IPPROTO_TCP, &sock);
627 sock->sk->sk_rcvtimeo =
628 sock->sk->sk_sndtimeo = connect_int * HZ;
629 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
631 /* explicitly bind to the configured IP as source IP
632 * for the outgoing connections.
633 * This is needed for multihomed hosts and to be
634 * able to use lo: interfaces for drbd.
635 * Make sure to use 0 as port number, so linux selects
636 * a free one dynamically.
638 what = "bind before connect";
639 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
643 /* connect may fail, peer not yet available.
644 * stay C_WF_CONNECTION, don't go Disconnecting! */
645 disconnect_on_error = 0;
647 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
665 conn_err(tconn, "%s failed, err = %d\n", what, err);
667 if (disconnect_on_error)
668 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
674 struct accept_wait_data {
675 struct drbd_tconn *tconn;
676 struct socket *s_listen;
677 struct completion door_bell;
678 void (*original_sk_state_change)(struct sock *sk);
682 static void incomming_connection(struct sock *sk)
684 struct accept_wait_data *ad = sk->sk_user_data;
685 struct drbd_tconn *tconn = ad->tconn;
687 if (sk->sk_state != TCP_ESTABLISHED)
688 conn_warn(tconn, "unexpected tcp state change. sk_state = %d\n", sk->sk_state);
690 write_lock_bh(&sk->sk_callback_lock);
691 sk->sk_state_change = ad->original_sk_state_change;
692 sk->sk_user_data = NULL;
693 write_unlock_bh(&sk->sk_callback_lock);
695 sk->sk_state_change(sk);
696 complete(&ad->door_bell);
699 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
701 int err, sndbuf_size, rcvbuf_size, my_addr_len;
702 struct sockaddr_in6 my_addr;
703 struct socket *s_listen;
708 nc = rcu_dereference(tconn->net_conf);
713 sndbuf_size = nc->sndbuf_size;
714 rcvbuf_size = nc->rcvbuf_size;
717 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
718 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
720 what = "sock_create_kern";
721 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
722 SOCK_STREAM, IPPROTO_TCP, &s_listen);
728 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
729 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
731 what = "bind before listen";
732 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
736 ad->s_listen = s_listen;
737 write_lock_bh(&s_listen->sk->sk_callback_lock);
738 ad->original_sk_state_change = s_listen->sk->sk_state_change;
739 s_listen->sk->sk_state_change = incomming_connection;
740 s_listen->sk->sk_user_data = ad;
741 write_unlock_bh(&s_listen->sk->sk_callback_lock);
744 err = s_listen->ops->listen(s_listen, 5);
751 sock_release(s_listen);
753 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
754 conn_err(tconn, "%s failed, err = %d\n", what, err);
755 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
762 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
764 int timeo, connect_int, err = 0;
765 struct socket *s_estab = NULL;
769 nc = rcu_dereference(tconn->net_conf);
774 connect_int = nc->connect_int;
777 timeo = connect_int * HZ;
778 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
780 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
784 err = kernel_accept(ad->s_listen, &s_estab, 0);
786 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
787 conn_err(tconn, "accept failed, err = %d\n", err);
788 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
795 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
797 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
798 enum drbd_packet cmd)
800 if (!conn_prepare_command(tconn, sock))
802 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
805 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
807 unsigned int header_size = drbd_header_size(tconn);
808 struct packet_info pi;
811 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
812 if (err != header_size) {
817 err = decode_header(tconn, tconn->data.rbuf, &pi);
824 * drbd_socket_okay() - Free the socket if its connection is not okay
825 * @sock: pointer to the pointer to the socket.
827 static int drbd_socket_okay(struct socket **sock)
835 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
837 if (rr > 0 || rr == -EAGAIN) {
845 /* Gets called if a connection is established, or if a new minor gets created
847 int drbd_connected(struct drbd_conf *mdev)
851 atomic_set(&mdev->packet_seq, 0);
854 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
855 &mdev->tconn->cstate_mutex :
856 &mdev->own_state_mutex;
858 err = drbd_send_sync_param(mdev);
860 err = drbd_send_sizes(mdev, 0, 0);
862 err = drbd_send_uuids(mdev);
864 err = drbd_send_current_state(mdev);
865 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
866 clear_bit(RESIZE_PENDING, &mdev->flags);
867 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
873 * 1 yes, we have a valid connection
874 * 0 oops, did not work out, please try again
875 * -1 peer talks different language,
876 * no point in trying again, please go standalone.
877 * -2 We do not have a network config...
879 static int conn_connect(struct drbd_tconn *tconn)
881 struct drbd_socket sock, msock;
882 struct drbd_conf *mdev;
884 int vnr, timeout, h, ok;
885 bool discard_my_data;
886 enum drbd_state_rv rv;
887 struct accept_wait_data ad = {
889 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
892 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
895 mutex_init(&sock.mutex);
896 sock.sbuf = tconn->data.sbuf;
897 sock.rbuf = tconn->data.rbuf;
899 mutex_init(&msock.mutex);
900 msock.sbuf = tconn->meta.sbuf;
901 msock.rbuf = tconn->meta.rbuf;
904 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
906 /* Assume that the peer only understands protocol 80 until we know better. */
907 tconn->agreed_pro_version = 80;
909 if (prepare_listen_socket(tconn, &ad))
915 s = drbd_try_connect(tconn);
919 send_first_packet(tconn, &sock, P_INITIAL_DATA);
920 } else if (!msock.socket) {
922 send_first_packet(tconn, &msock, P_INITIAL_META);
924 conn_err(tconn, "Logic error in conn_connect()\n");
925 goto out_release_sockets;
929 if (sock.socket && msock.socket) {
931 nc = rcu_dereference(tconn->net_conf);
932 timeout = nc->ping_timeo * HZ / 10;
934 schedule_timeout_interruptible(timeout);
935 ok = drbd_socket_okay(&sock.socket);
936 ok = drbd_socket_okay(&msock.socket) && ok;
942 s = drbd_wait_for_connect(tconn, &ad);
944 int fp = receive_first_packet(tconn, s);
945 drbd_socket_okay(&sock.socket);
946 drbd_socket_okay(&msock.socket);
950 conn_warn(tconn, "initial packet S crossed\n");
951 sock_release(sock.socket);
957 conn_warn(tconn, "initial packet M crossed\n");
958 sock_release(msock.socket);
961 set_bit(DISCARD_CONCURRENT, &tconn->flags);
964 conn_warn(tconn, "Error receiving initial packet\n");
971 if (tconn->cstate <= C_DISCONNECTING)
972 goto out_release_sockets;
973 if (signal_pending(current)) {
974 flush_signals(current);
976 if (get_t_state(&tconn->receiver) == EXITING)
977 goto out_release_sockets;
980 ok = drbd_socket_okay(&sock.socket);
981 ok = drbd_socket_okay(&msock.socket) && ok;
985 sock_release(ad.s_listen);
987 sock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
988 msock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
990 sock.socket->sk->sk_allocation = GFP_NOIO;
991 msock.socket->sk->sk_allocation = GFP_NOIO;
993 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
994 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
997 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
998 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
999 * first set it to the P_CONNECTION_FEATURES timeout,
1000 * which we set to 4x the configured ping_timeout. */
1002 nc = rcu_dereference(tconn->net_conf);
1004 sock.socket->sk->sk_sndtimeo =
1005 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1007 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1008 timeout = nc->timeout * HZ / 10;
1009 discard_my_data = nc->discard_my_data;
1012 msock.socket->sk->sk_sndtimeo = timeout;
1014 /* we don't want delays.
1015 * we use TCP_CORK where appropriate, though */
1016 drbd_tcp_nodelay(sock.socket);
1017 drbd_tcp_nodelay(msock.socket);
1019 tconn->data.socket = sock.socket;
1020 tconn->meta.socket = msock.socket;
1021 tconn->last_received = jiffies;
1023 h = drbd_do_features(tconn);
1027 if (tconn->cram_hmac_tfm) {
1028 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1029 switch (drbd_do_auth(tconn)) {
1031 conn_err(tconn, "Authentication of peer failed\n");
1034 conn_err(tconn, "Authentication of peer failed, trying again.\n");
1039 tconn->data.socket->sk->sk_sndtimeo = timeout;
1040 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1042 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1045 set_bit(STATE_SENT, &tconn->flags);
1048 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1049 kref_get(&mdev->kref);
1052 if (discard_my_data)
1053 set_bit(DISCARD_MY_DATA, &mdev->flags);
1055 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1057 drbd_connected(mdev);
1058 kref_put(&mdev->kref, &drbd_minor_destroy);
1063 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1064 if (rv < SS_SUCCESS) {
1065 clear_bit(STATE_SENT, &tconn->flags);
1069 drbd_thread_start(&tconn->asender);
1071 mutex_lock(&tconn->conf_update);
1072 /* The discard_my_data flag is a single-shot modifier to the next
1073 * connection attempt, the handshake of which is now well underway.
1074 * No need for rcu style copying of the whole struct
1075 * just to clear a single value. */
1076 tconn->net_conf->discard_my_data = 0;
1077 mutex_unlock(&tconn->conf_update);
1081 out_release_sockets:
1083 sock_release(ad.s_listen);
1085 sock_release(sock.socket);
1087 sock_release(msock.socket);
1091 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1093 unsigned int header_size = drbd_header_size(tconn);
1095 if (header_size == sizeof(struct p_header100) &&
1096 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1097 struct p_header100 *h = header;
1099 conn_err(tconn, "Header padding is not zero\n");
1102 pi->vnr = be16_to_cpu(h->volume);
1103 pi->cmd = be16_to_cpu(h->command);
1104 pi->size = be32_to_cpu(h->length);
1105 } else if (header_size == sizeof(struct p_header95) &&
1106 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1107 struct p_header95 *h = header;
1108 pi->cmd = be16_to_cpu(h->command);
1109 pi->size = be32_to_cpu(h->length);
1111 } else if (header_size == sizeof(struct p_header80) &&
1112 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1113 struct p_header80 *h = header;
1114 pi->cmd = be16_to_cpu(h->command);
1115 pi->size = be16_to_cpu(h->length);
1118 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1119 be32_to_cpu(*(__be32 *)header),
1120 tconn->agreed_pro_version);
1123 pi->data = header + header_size;
1127 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1129 void *buffer = tconn->data.rbuf;
1132 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1136 err = decode_header(tconn, buffer, pi);
1137 tconn->last_received = jiffies;
1142 static void drbd_flush(struct drbd_tconn *tconn)
1145 struct drbd_conf *mdev;
1148 if (tconn->write_ordering >= WO_bdev_flush) {
1150 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1151 if (!get_ldev(mdev))
1153 kref_get(&mdev->kref);
1156 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1159 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1160 /* would rather check on EOPNOTSUPP, but that is not reliable.
1161 * don't try again for ANY return value != 0
1162 * if (rv == -EOPNOTSUPP) */
1163 drbd_bump_write_ordering(tconn, WO_drain_io);
1166 kref_put(&mdev->kref, &drbd_minor_destroy);
1177 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1178 * @mdev: DRBD device.
1179 * @epoch: Epoch object.
1182 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1183 struct drbd_epoch *epoch,
1184 enum epoch_event ev)
1187 struct drbd_epoch *next_epoch;
1188 enum finish_epoch rv = FE_STILL_LIVE;
1190 spin_lock(&tconn->epoch_lock);
1194 epoch_size = atomic_read(&epoch->epoch_size);
1196 switch (ev & ~EV_CLEANUP) {
1198 atomic_dec(&epoch->active);
1200 case EV_GOT_BARRIER_NR:
1201 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1203 case EV_BECAME_LAST:
1208 if (epoch_size != 0 &&
1209 atomic_read(&epoch->active) == 0 &&
1210 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1211 if (!(ev & EV_CLEANUP)) {
1212 spin_unlock(&tconn->epoch_lock);
1213 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1214 spin_lock(&tconn->epoch_lock);
1217 /* FIXME: dec unacked on connection, once we have
1218 * something to count pending connection packets in. */
1219 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1220 dec_unacked(epoch->tconn);
1223 if (tconn->current_epoch != epoch) {
1224 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1225 list_del(&epoch->list);
1226 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1230 if (rv == FE_STILL_LIVE)
1234 atomic_set(&epoch->epoch_size, 0);
1235 /* atomic_set(&epoch->active, 0); is already zero */
1236 if (rv == FE_STILL_LIVE)
1247 spin_unlock(&tconn->epoch_lock);
1253 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1254 * @tconn: DRBD connection.
1255 * @wo: Write ordering method to try.
1257 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1259 struct disk_conf *dc;
1260 struct drbd_conf *mdev;
1261 enum write_ordering_e pwo;
1263 static char *write_ordering_str[] = {
1265 [WO_drain_io] = "drain",
1266 [WO_bdev_flush] = "flush",
1269 pwo = tconn->write_ordering;
1272 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1273 if (!get_ldev_if_state(mdev, D_ATTACHING))
1275 dc = rcu_dereference(mdev->ldev->disk_conf);
1277 if (wo == WO_bdev_flush && !dc->disk_flushes)
1279 if (wo == WO_drain_io && !dc->disk_drain)
1284 tconn->write_ordering = wo;
1285 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1286 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1290 * drbd_submit_peer_request()
1291 * @mdev: DRBD device.
1292 * @peer_req: peer request
1293 * @rw: flag field, see bio->bi_rw
1295 * May spread the pages to multiple bios,
1296 * depending on bio_add_page restrictions.
1298 * Returns 0 if all bios have been submitted,
1299 * -ENOMEM if we could not allocate enough bios,
1300 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1301 * single page to an empty bio (which should never happen and likely indicates
1302 * that the lower level IO stack is in some way broken). This has been observed
1303 * on certain Xen deployments.
1305 /* TODO allocate from our own bio_set. */
1306 int drbd_submit_peer_request(struct drbd_conf *mdev,
1307 struct drbd_peer_request *peer_req,
1308 const unsigned rw, const int fault_type)
1310 struct bio *bios = NULL;
1312 struct page *page = peer_req->pages;
1313 sector_t sector = peer_req->i.sector;
1314 unsigned ds = peer_req->i.size;
1315 unsigned n_bios = 0;
1316 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1319 /* In most cases, we will only need one bio. But in case the lower
1320 * level restrictions happen to be different at this offset on this
1321 * side than those of the sending peer, we may need to submit the
1322 * request in more than one bio.
1324 * Plain bio_alloc is good enough here, this is no DRBD internally
1325 * generated bio, but a bio allocated on behalf of the peer.
1328 bio = bio_alloc(GFP_NOIO, nr_pages);
1330 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1333 /* > peer_req->i.sector, unless this is the first bio */
1334 bio->bi_sector = sector;
1335 bio->bi_bdev = mdev->ldev->backing_bdev;
1337 bio->bi_private = peer_req;
1338 bio->bi_end_io = drbd_peer_request_endio;
1340 bio->bi_next = bios;
1344 page_chain_for_each(page) {
1345 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1346 if (!bio_add_page(bio, page, len, 0)) {
1347 /* A single page must always be possible!
1348 * But in case it fails anyways,
1349 * we deal with it, and complain (below). */
1350 if (bio->bi_vcnt == 0) {
1352 "bio_add_page failed for len=%u, "
1353 "bi_vcnt=0 (bi_sector=%llu)\n",
1354 len, (unsigned long long)bio->bi_sector);
1364 D_ASSERT(page == NULL);
1367 atomic_set(&peer_req->pending_bios, n_bios);
1370 bios = bios->bi_next;
1371 bio->bi_next = NULL;
1373 drbd_generic_make_request(mdev, fault_type, bio);
1380 bios = bios->bi_next;
1386 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1387 struct drbd_peer_request *peer_req)
1389 struct drbd_interval *i = &peer_req->i;
1391 drbd_remove_interval(&mdev->write_requests, i);
1392 drbd_clear_interval(i);
1394 /* Wake up any processes waiting for this peer request to complete. */
1396 wake_up(&mdev->misc_wait);
1399 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1401 struct drbd_conf *mdev;
1405 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1406 kref_get(&mdev->kref);
1408 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1409 kref_put(&mdev->kref, &drbd_minor_destroy);
1415 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1418 struct p_barrier *p = pi->data;
1419 struct drbd_epoch *epoch;
1421 /* FIXME these are unacked on connection,
1422 * not a specific (peer)device.
1424 tconn->current_epoch->barrier_nr = p->barrier;
1425 tconn->current_epoch->tconn = tconn;
1426 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1428 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1429 * the activity log, which means it would not be resynced in case the
1430 * R_PRIMARY crashes now.
1431 * Therefore we must send the barrier_ack after the barrier request was
1433 switch (tconn->write_ordering) {
1435 if (rv == FE_RECYCLED)
1438 /* receiver context, in the writeout path of the other node.
1439 * avoid potential distributed deadlock */
1440 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1444 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1449 conn_wait_active_ee_empty(tconn);
1452 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1453 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1460 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1465 atomic_set(&epoch->epoch_size, 0);
1466 atomic_set(&epoch->active, 0);
1468 spin_lock(&tconn->epoch_lock);
1469 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1470 list_add(&epoch->list, &tconn->current_epoch->list);
1471 tconn->current_epoch = epoch;
1474 /* The current_epoch got recycled while we allocated this one... */
1477 spin_unlock(&tconn->epoch_lock);
1482 /* used from receive_RSDataReply (recv_resync_read)
1483 * and from receive_Data */
1484 static struct drbd_peer_request *
1485 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1486 int data_size) __must_hold(local)
1488 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1489 struct drbd_peer_request *peer_req;
1492 void *dig_in = mdev->tconn->int_dig_in;
1493 void *dig_vv = mdev->tconn->int_dig_vv;
1494 unsigned long *data;
1497 if (mdev->tconn->peer_integrity_tfm) {
1498 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1500 * FIXME: Receive the incoming digest into the receive buffer
1501 * here, together with its struct p_data?
1503 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1509 if (!expect(IS_ALIGNED(data_size, 512)))
1511 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1514 /* even though we trust out peer,
1515 * we sometimes have to double check. */
1516 if (sector + (data_size>>9) > capacity) {
1517 dev_err(DEV, "request from peer beyond end of local disk: "
1518 "capacity: %llus < sector: %llus + size: %u\n",
1519 (unsigned long long)capacity,
1520 (unsigned long long)sector, data_size);
1524 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1525 * "criss-cross" setup, that might cause write-out on some other DRBD,
1526 * which in turn might block on the other node at this very place. */
1527 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1535 page = peer_req->pages;
1536 page_chain_for_each(page) {
1537 unsigned len = min_t(int, ds, PAGE_SIZE);
1539 err = drbd_recv_all_warn(mdev->tconn, data, len);
1540 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1541 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1542 data[0] = data[0] ^ (unsigned long)-1;
1546 drbd_free_peer_req(mdev, peer_req);
1553 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1554 if (memcmp(dig_in, dig_vv, dgs)) {
1555 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1556 (unsigned long long)sector, data_size);
1557 drbd_free_peer_req(mdev, peer_req);
1561 mdev->recv_cnt += data_size>>9;
1565 /* drbd_drain_block() just takes a data block
1566 * out of the socket input buffer, and discards it.
1568 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1577 page = drbd_alloc_pages(mdev, 1, 1);
1581 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1583 err = drbd_recv_all_warn(mdev->tconn, data, len);
1589 drbd_free_pages(mdev, page, 0);
1593 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1594 sector_t sector, int data_size)
1596 struct bio_vec *bvec;
1598 int dgs, err, i, expect;
1599 void *dig_in = mdev->tconn->int_dig_in;
1600 void *dig_vv = mdev->tconn->int_dig_vv;
1603 if (mdev->tconn->peer_integrity_tfm) {
1604 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1605 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1611 /* optimistically update recv_cnt. if receiving fails below,
1612 * we disconnect anyways, and counters will be reset. */
1613 mdev->recv_cnt += data_size>>9;
1615 bio = req->master_bio;
1616 D_ASSERT(sector == bio->bi_sector);
1618 bio_for_each_segment(bvec, bio, i) {
1619 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1620 expect = min_t(int, data_size, bvec->bv_len);
1621 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1622 kunmap(bvec->bv_page);
1625 data_size -= expect;
1629 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1630 if (memcmp(dig_in, dig_vv, dgs)) {
1631 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1636 D_ASSERT(data_size == 0);
1641 * e_end_resync_block() is called in asender context via
1642 * drbd_finish_peer_reqs().
1644 static int e_end_resync_block(struct drbd_work *w, int unused)
1646 struct drbd_peer_request *peer_req =
1647 container_of(w, struct drbd_peer_request, w);
1648 struct drbd_conf *mdev = w->mdev;
1649 sector_t sector = peer_req->i.sector;
1652 D_ASSERT(drbd_interval_empty(&peer_req->i));
1654 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1655 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1656 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1658 /* Record failure to sync */
1659 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1661 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1668 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1670 struct drbd_peer_request *peer_req;
1672 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1676 dec_rs_pending(mdev);
1679 /* corresponding dec_unacked() in e_end_resync_block()
1680 * respective _drbd_clear_done_ee */
1682 peer_req->w.cb = e_end_resync_block;
1684 spin_lock_irq(&mdev->tconn->req_lock);
1685 list_add(&peer_req->w.list, &mdev->sync_ee);
1686 spin_unlock_irq(&mdev->tconn->req_lock);
1688 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1689 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1692 /* don't care for the reason here */
1693 dev_err(DEV, "submit failed, triggering re-connect\n");
1694 spin_lock_irq(&mdev->tconn->req_lock);
1695 list_del(&peer_req->w.list);
1696 spin_unlock_irq(&mdev->tconn->req_lock);
1698 drbd_free_peer_req(mdev, peer_req);
1704 static struct drbd_request *
1705 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1706 sector_t sector, bool missing_ok, const char *func)
1708 struct drbd_request *req;
1710 /* Request object according to our peer */
1711 req = (struct drbd_request *)(unsigned long)id;
1712 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1715 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1716 (unsigned long)id, (unsigned long long)sector);
1721 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1723 struct drbd_conf *mdev;
1724 struct drbd_request *req;
1727 struct p_data *p = pi->data;
1729 mdev = vnr_to_mdev(tconn, pi->vnr);
1733 sector = be64_to_cpu(p->sector);
1735 spin_lock_irq(&mdev->tconn->req_lock);
1736 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1737 spin_unlock_irq(&mdev->tconn->req_lock);
1741 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1742 * special casing it there for the various failure cases.
1743 * still no race with drbd_fail_pending_reads */
1744 err = recv_dless_read(mdev, req, sector, pi->size);
1746 req_mod(req, DATA_RECEIVED);
1747 /* else: nothing. handled from drbd_disconnect...
1748 * I don't think we may complete this just yet
1749 * in case we are "on-disconnect: freeze" */
1754 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1756 struct drbd_conf *mdev;
1759 struct p_data *p = pi->data;
1761 mdev = vnr_to_mdev(tconn, pi->vnr);
1765 sector = be64_to_cpu(p->sector);
1766 D_ASSERT(p->block_id == ID_SYNCER);
1768 if (get_ldev(mdev)) {
1769 /* data is submitted to disk within recv_resync_read.
1770 * corresponding put_ldev done below on error,
1771 * or in drbd_peer_request_endio. */
1772 err = recv_resync_read(mdev, sector, pi->size);
1774 if (__ratelimit(&drbd_ratelimit_state))
1775 dev_err(DEV, "Can not write resync data to local disk.\n");
1777 err = drbd_drain_block(mdev, pi->size);
1779 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1782 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1787 static void restart_conflicting_writes(struct drbd_conf *mdev,
1788 sector_t sector, int size)
1790 struct drbd_interval *i;
1791 struct drbd_request *req;
1793 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1796 req = container_of(i, struct drbd_request, i);
1797 if (req->rq_state & RQ_LOCAL_PENDING ||
1798 !(req->rq_state & RQ_POSTPONED))
1800 /* as it is RQ_POSTPONED, this will cause it to
1801 * be queued on the retry workqueue. */
1802 __req_mod(req, DISCARD_WRITE, NULL);
1807 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1809 static int e_end_block(struct drbd_work *w, int cancel)
1811 struct drbd_peer_request *peer_req =
1812 container_of(w, struct drbd_peer_request, w);
1813 struct drbd_conf *mdev = w->mdev;
1814 sector_t sector = peer_req->i.sector;
1817 if (peer_req->flags & EE_SEND_WRITE_ACK) {
1818 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1819 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1820 mdev->state.conn <= C_PAUSED_SYNC_T &&
1821 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1822 P_RS_WRITE_ACK : P_WRITE_ACK;
1823 err = drbd_send_ack(mdev, pcmd, peer_req);
1824 if (pcmd == P_RS_WRITE_ACK)
1825 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1827 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1828 /* we expect it to be marked out of sync anyways...
1829 * maybe assert this? */
1833 /* we delete from the conflict detection hash _after_ we sent out the
1834 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1835 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1836 spin_lock_irq(&mdev->tconn->req_lock);
1837 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1838 drbd_remove_epoch_entry_interval(mdev, peer_req);
1839 if (peer_req->flags & EE_RESTART_REQUESTS)
1840 restart_conflicting_writes(mdev, sector, peer_req->i.size);
1841 spin_unlock_irq(&mdev->tconn->req_lock);
1843 D_ASSERT(drbd_interval_empty(&peer_req->i));
1845 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1850 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1852 struct drbd_conf *mdev = w->mdev;
1853 struct drbd_peer_request *peer_req =
1854 container_of(w, struct drbd_peer_request, w);
1857 err = drbd_send_ack(mdev, ack, peer_req);
1863 static int e_send_discard_write(struct drbd_work *w, int unused)
1865 return e_send_ack(w, P_DISCARD_WRITE);
1868 static int e_send_retry_write(struct drbd_work *w, int unused)
1870 struct drbd_tconn *tconn = w->mdev->tconn;
1872 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1873 P_RETRY_WRITE : P_DISCARD_WRITE);
1876 static bool seq_greater(u32 a, u32 b)
1879 * We assume 32-bit wrap-around here.
1880 * For 24-bit wrap-around, we would have to shift:
1883 return (s32)a - (s32)b > 0;
1886 static u32 seq_max(u32 a, u32 b)
1888 return seq_greater(a, b) ? a : b;
1891 static bool need_peer_seq(struct drbd_conf *mdev)
1893 struct drbd_tconn *tconn = mdev->tconn;
1897 * We only need to keep track of the last packet_seq number of our peer
1898 * if we are in dual-primary mode and we have the discard flag set; see
1899 * handle_write_conflicts().
1903 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1906 return tp && test_bit(DISCARD_CONCURRENT, &tconn->flags);
1909 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1911 unsigned int newest_peer_seq;
1913 if (need_peer_seq(mdev)) {
1914 spin_lock(&mdev->peer_seq_lock);
1915 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1916 mdev->peer_seq = newest_peer_seq;
1917 spin_unlock(&mdev->peer_seq_lock);
1918 /* wake up only if we actually changed mdev->peer_seq */
1919 if (peer_seq == newest_peer_seq)
1920 wake_up(&mdev->seq_wait);
1924 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1926 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1929 /* maybe change sync_ee into interval trees as well? */
1930 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1932 struct drbd_peer_request *rs_req;
1935 spin_lock_irq(&mdev->tconn->req_lock);
1936 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1937 if (overlaps(peer_req->i.sector, peer_req->i.size,
1938 rs_req->i.sector, rs_req->i.size)) {
1943 spin_unlock_irq(&mdev->tconn->req_lock);
1948 /* Called from receive_Data.
1949 * Synchronize packets on sock with packets on msock.
1951 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1952 * packet traveling on msock, they are still processed in the order they have
1955 * Note: we don't care for Ack packets overtaking P_DATA packets.
1957 * In case packet_seq is larger than mdev->peer_seq number, there are
1958 * outstanding packets on the msock. We wait for them to arrive.
1959 * In case we are the logically next packet, we update mdev->peer_seq
1960 * ourselves. Correctly handles 32bit wrap around.
1962 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1963 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1964 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1965 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1967 * returns 0 if we may process the packet,
1968 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1969 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1975 if (!need_peer_seq(mdev))
1978 spin_lock(&mdev->peer_seq_lock);
1980 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1981 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1985 if (signal_pending(current)) {
1989 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1990 spin_unlock(&mdev->peer_seq_lock);
1992 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1994 timeout = schedule_timeout(timeout);
1995 spin_lock(&mdev->peer_seq_lock);
1998 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
2002 spin_unlock(&mdev->peer_seq_lock);
2003 finish_wait(&mdev->seq_wait, &wait);
2007 /* see also bio_flags_to_wire()
2008 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2009 * flags and back. We may replicate to other kernel versions. */
2010 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2012 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2013 (dpf & DP_FUA ? REQ_FUA : 0) |
2014 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2015 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2018 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2021 struct drbd_interval *i;
2024 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2025 struct drbd_request *req;
2026 struct bio_and_error m;
2030 req = container_of(i, struct drbd_request, i);
2031 if (!(req->rq_state & RQ_POSTPONED))
2033 req->rq_state &= ~RQ_POSTPONED;
2034 __req_mod(req, NEG_ACKED, &m);
2035 spin_unlock_irq(&mdev->tconn->req_lock);
2037 complete_master_bio(mdev, &m);
2038 spin_lock_irq(&mdev->tconn->req_lock);
2043 static int handle_write_conflicts(struct drbd_conf *mdev,
2044 struct drbd_peer_request *peer_req)
2046 struct drbd_tconn *tconn = mdev->tconn;
2047 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
2048 sector_t sector = peer_req->i.sector;
2049 const unsigned int size = peer_req->i.size;
2050 struct drbd_interval *i;
2055 * Inserting the peer request into the write_requests tree will prevent
2056 * new conflicting local requests from being added.
2058 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2061 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2062 if (i == &peer_req->i)
2067 * Our peer has sent a conflicting remote request; this
2068 * should not happen in a two-node setup. Wait for the
2069 * earlier peer request to complete.
2071 err = drbd_wait_misc(mdev, i);
2077 equal = i->sector == sector && i->size == size;
2078 if (resolve_conflicts) {
2080 * If the peer request is fully contained within the
2081 * overlapping request, it can be discarded; otherwise,
2082 * it will be retried once all overlapping requests
2085 bool discard = i->sector <= sector && i->sector +
2086 (i->size >> 9) >= sector + (size >> 9);
2089 dev_alert(DEV, "Concurrent writes detected: "
2090 "local=%llus +%u, remote=%llus +%u, "
2091 "assuming %s came first\n",
2092 (unsigned long long)i->sector, i->size,
2093 (unsigned long long)sector, size,
2094 discard ? "local" : "remote");
2097 peer_req->w.cb = discard ? e_send_discard_write :
2099 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2100 wake_asender(mdev->tconn);
2105 struct drbd_request *req =
2106 container_of(i, struct drbd_request, i);
2109 dev_alert(DEV, "Concurrent writes detected: "
2110 "local=%llus +%u, remote=%llus +%u\n",
2111 (unsigned long long)i->sector, i->size,
2112 (unsigned long long)sector, size);
2114 if (req->rq_state & RQ_LOCAL_PENDING ||
2115 !(req->rq_state & RQ_POSTPONED)) {
2117 * Wait for the node with the discard flag to
2118 * decide if this request will be discarded or
2119 * retried. Requests that are discarded will
2120 * disappear from the write_requests tree.
2122 * In addition, wait for the conflicting
2123 * request to finish locally before submitting
2124 * the conflicting peer request.
2126 err = drbd_wait_misc(mdev, &req->i);
2128 _conn_request_state(mdev->tconn,
2129 NS(conn, C_TIMEOUT),
2131 fail_postponed_requests(mdev, sector, size);
2137 * Remember to restart the conflicting requests after
2138 * the new peer request has completed.
2140 peer_req->flags |= EE_RESTART_REQUESTS;
2147 drbd_remove_epoch_entry_interval(mdev, peer_req);
2151 /* mirrored write */
2152 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2154 struct drbd_conf *mdev;
2156 struct drbd_peer_request *peer_req;
2157 struct p_data *p = pi->data;
2158 u32 peer_seq = be32_to_cpu(p->seq_num);
2163 mdev = vnr_to_mdev(tconn, pi->vnr);
2167 if (!get_ldev(mdev)) {
2170 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2171 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2172 atomic_inc(&tconn->current_epoch->epoch_size);
2173 err2 = drbd_drain_block(mdev, pi->size);
2180 * Corresponding put_ldev done either below (on various errors), or in
2181 * drbd_peer_request_endio, if we successfully submit the data at the
2182 * end of this function.
2185 sector = be64_to_cpu(p->sector);
2186 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2192 peer_req->w.cb = e_end_block;
2194 dp_flags = be32_to_cpu(p->dp_flags);
2195 rw |= wire_flags_to_bio(mdev, dp_flags);
2196 if (peer_req->pages == NULL) {
2197 D_ASSERT(peer_req->i.size == 0);
2198 D_ASSERT(dp_flags & DP_FLUSH);
2201 if (dp_flags & DP_MAY_SET_IN_SYNC)
2202 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2204 spin_lock(&tconn->epoch_lock);
2205 peer_req->epoch = tconn->current_epoch;
2206 atomic_inc(&peer_req->epoch->epoch_size);
2207 atomic_inc(&peer_req->epoch->active);
2208 spin_unlock(&tconn->epoch_lock);
2211 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2214 peer_req->flags |= EE_IN_INTERVAL_TREE;
2215 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2217 goto out_interrupted;
2218 spin_lock_irq(&mdev->tconn->req_lock);
2219 err = handle_write_conflicts(mdev, peer_req);
2221 spin_unlock_irq(&mdev->tconn->req_lock);
2222 if (err == -ENOENT) {
2226 goto out_interrupted;
2229 spin_lock_irq(&mdev->tconn->req_lock);
2230 list_add(&peer_req->w.list, &mdev->active_ee);
2231 spin_unlock_irq(&mdev->tconn->req_lock);
2233 if (mdev->state.conn == C_SYNC_TARGET)
2234 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2236 if (mdev->tconn->agreed_pro_version < 100) {
2238 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2240 dp_flags |= DP_SEND_WRITE_ACK;
2243 dp_flags |= DP_SEND_RECEIVE_ACK;
2249 if (dp_flags & DP_SEND_WRITE_ACK) {
2250 peer_req->flags |= EE_SEND_WRITE_ACK;
2252 /* corresponding dec_unacked() in e_end_block()
2253 * respective _drbd_clear_done_ee */
2256 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2257 /* I really don't like it that the receiver thread
2258 * sends on the msock, but anyways */
2259 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2262 if (mdev->state.pdsk < D_INCONSISTENT) {
2263 /* In case we have the only disk of the cluster, */
2264 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2265 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2266 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2267 drbd_al_begin_io(mdev, &peer_req->i);
2270 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2274 /* don't care for the reason here */
2275 dev_err(DEV, "submit failed, triggering re-connect\n");
2276 spin_lock_irq(&mdev->tconn->req_lock);
2277 list_del(&peer_req->w.list);
2278 drbd_remove_epoch_entry_interval(mdev, peer_req);
2279 spin_unlock_irq(&mdev->tconn->req_lock);
2280 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2281 drbd_al_complete_io(mdev, &peer_req->i);
2284 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2286 drbd_free_peer_req(mdev, peer_req);
2290 /* We may throttle resync, if the lower device seems to be busy,
2291 * and current sync rate is above c_min_rate.
2293 * To decide whether or not the lower device is busy, we use a scheme similar
2294 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2295 * (more than 64 sectors) of activity we cannot account for with our own resync
2296 * activity, it obviously is "busy".
2298 * The current sync rate used here uses only the most recent two step marks,
2299 * to have a short time average so we can react faster.
2301 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2303 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2304 unsigned long db, dt, dbdt;
2305 struct lc_element *tmp;
2308 unsigned int c_min_rate;
2311 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2314 /* feature disabled? */
2315 if (c_min_rate == 0)
2318 spin_lock_irq(&mdev->al_lock);
2319 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2321 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2322 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2323 spin_unlock_irq(&mdev->al_lock);
2326 /* Do not slow down if app IO is already waiting for this extent */
2328 spin_unlock_irq(&mdev->al_lock);
2330 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2331 (int)part_stat_read(&disk->part0, sectors[1]) -
2332 atomic_read(&mdev->rs_sect_ev);
2334 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2335 unsigned long rs_left;
2338 mdev->rs_last_events = curr_events;
2340 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2342 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2344 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2345 rs_left = mdev->ov_left;
2347 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2349 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2352 db = mdev->rs_mark_left[i] - rs_left;
2353 dbdt = Bit2KB(db/dt);
2355 if (dbdt > c_min_rate)
2362 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2364 struct drbd_conf *mdev;
2367 struct drbd_peer_request *peer_req;
2368 struct digest_info *di = NULL;
2370 unsigned int fault_type;
2371 struct p_block_req *p = pi->data;
2373 mdev = vnr_to_mdev(tconn, pi->vnr);
2376 capacity = drbd_get_capacity(mdev->this_bdev);
2378 sector = be64_to_cpu(p->sector);
2379 size = be32_to_cpu(p->blksize);
2381 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2382 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2383 (unsigned long long)sector, size);
2386 if (sector + (size>>9) > capacity) {
2387 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2388 (unsigned long long)sector, size);
2392 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2395 case P_DATA_REQUEST:
2396 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2398 case P_RS_DATA_REQUEST:
2399 case P_CSUM_RS_REQUEST:
2401 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2405 dec_rs_pending(mdev);
2406 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2411 if (verb && __ratelimit(&drbd_ratelimit_state))
2412 dev_err(DEV, "Can not satisfy peer's read request, "
2413 "no local data.\n");
2415 /* drain possibly payload */
2416 return drbd_drain_block(mdev, pi->size);
2419 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2420 * "criss-cross" setup, that might cause write-out on some other DRBD,
2421 * which in turn might block on the other node at this very place. */
2422 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2429 case P_DATA_REQUEST:
2430 peer_req->w.cb = w_e_end_data_req;
2431 fault_type = DRBD_FAULT_DT_RD;
2432 /* application IO, don't drbd_rs_begin_io */
2435 case P_RS_DATA_REQUEST:
2436 peer_req->w.cb = w_e_end_rsdata_req;
2437 fault_type = DRBD_FAULT_RS_RD;
2438 /* used in the sector offset progress display */
2439 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2443 case P_CSUM_RS_REQUEST:
2444 fault_type = DRBD_FAULT_RS_RD;
2445 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2449 di->digest_size = pi->size;
2450 di->digest = (((char *)di)+sizeof(struct digest_info));
2452 peer_req->digest = di;
2453 peer_req->flags |= EE_HAS_DIGEST;
2455 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2458 if (pi->cmd == P_CSUM_RS_REQUEST) {
2459 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2460 peer_req->w.cb = w_e_end_csum_rs_req;
2461 /* used in the sector offset progress display */
2462 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2463 } else if (pi->cmd == P_OV_REPLY) {
2464 /* track progress, we may need to throttle */
2465 atomic_add(size >> 9, &mdev->rs_sect_in);
2466 peer_req->w.cb = w_e_end_ov_reply;
2467 dec_rs_pending(mdev);
2468 /* drbd_rs_begin_io done when we sent this request,
2469 * but accounting still needs to be done. */
2470 goto submit_for_resync;
2475 if (mdev->ov_start_sector == ~(sector_t)0 &&
2476 mdev->tconn->agreed_pro_version >= 90) {
2477 unsigned long now = jiffies;
2479 mdev->ov_start_sector = sector;
2480 mdev->ov_position = sector;
2481 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2482 mdev->rs_total = mdev->ov_left;
2483 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2484 mdev->rs_mark_left[i] = mdev->ov_left;
2485 mdev->rs_mark_time[i] = now;
2487 dev_info(DEV, "Online Verify start sector: %llu\n",
2488 (unsigned long long)sector);
2490 peer_req->w.cb = w_e_end_ov_req;
2491 fault_type = DRBD_FAULT_RS_RD;
2498 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2499 * wrt the receiver, but it is not as straightforward as it may seem.
2500 * Various places in the resync start and stop logic assume resync
2501 * requests are processed in order, requeuing this on the worker thread
2502 * introduces a bunch of new code for synchronization between threads.
2504 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2505 * "forever", throttling after drbd_rs_begin_io will lock that extent
2506 * for application writes for the same time. For now, just throttle
2507 * here, where the rest of the code expects the receiver to sleep for
2511 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2512 * this defers syncer requests for some time, before letting at least
2513 * on request through. The resync controller on the receiving side
2514 * will adapt to the incoming rate accordingly.
2516 * We cannot throttle here if remote is Primary/SyncTarget:
2517 * we would also throttle its application reads.
2518 * In that case, throttling is done on the SyncTarget only.
2520 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2521 schedule_timeout_uninterruptible(HZ/10);
2522 if (drbd_rs_begin_io(mdev, sector))
2526 atomic_add(size >> 9, &mdev->rs_sect_ev);
2530 spin_lock_irq(&mdev->tconn->req_lock);
2531 list_add_tail(&peer_req->w.list, &mdev->read_ee);
2532 spin_unlock_irq(&mdev->tconn->req_lock);
2534 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2537 /* don't care for the reason here */
2538 dev_err(DEV, "submit failed, triggering re-connect\n");
2539 spin_lock_irq(&mdev->tconn->req_lock);
2540 list_del(&peer_req->w.list);
2541 spin_unlock_irq(&mdev->tconn->req_lock);
2542 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2546 drbd_free_peer_req(mdev, peer_req);
2550 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2552 int self, peer, rv = -100;
2553 unsigned long ch_self, ch_peer;
2554 enum drbd_after_sb_p after_sb_0p;
2556 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2557 peer = mdev->p_uuid[UI_BITMAP] & 1;
2559 ch_peer = mdev->p_uuid[UI_SIZE];
2560 ch_self = mdev->comm_bm_set;
2563 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2565 switch (after_sb_0p) {
2567 case ASB_DISCARD_SECONDARY:
2568 case ASB_CALL_HELPER:
2570 dev_err(DEV, "Configuration error.\n");
2572 case ASB_DISCONNECT:
2574 case ASB_DISCARD_YOUNGER_PRI:
2575 if (self == 0 && peer == 1) {
2579 if (self == 1 && peer == 0) {
2583 /* Else fall through to one of the other strategies... */
2584 case ASB_DISCARD_OLDER_PRI:
2585 if (self == 0 && peer == 1) {
2589 if (self == 1 && peer == 0) {
2593 /* Else fall through to one of the other strategies... */
2594 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2595 "Using discard-least-changes instead\n");
2596 case ASB_DISCARD_ZERO_CHG:
2597 if (ch_peer == 0 && ch_self == 0) {
2598 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2602 if (ch_peer == 0) { rv = 1; break; }
2603 if (ch_self == 0) { rv = -1; break; }
2605 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2607 case ASB_DISCARD_LEAST_CHG:
2608 if (ch_self < ch_peer)
2610 else if (ch_self > ch_peer)
2612 else /* ( ch_self == ch_peer ) */
2613 /* Well, then use something else. */
2614 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2617 case ASB_DISCARD_LOCAL:
2620 case ASB_DISCARD_REMOTE:
2627 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2630 enum drbd_after_sb_p after_sb_1p;
2633 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2635 switch (after_sb_1p) {
2636 case ASB_DISCARD_YOUNGER_PRI:
2637 case ASB_DISCARD_OLDER_PRI:
2638 case ASB_DISCARD_LEAST_CHG:
2639 case ASB_DISCARD_LOCAL:
2640 case ASB_DISCARD_REMOTE:
2641 case ASB_DISCARD_ZERO_CHG:
2642 dev_err(DEV, "Configuration error.\n");
2644 case ASB_DISCONNECT:
2647 hg = drbd_asb_recover_0p(mdev);
2648 if (hg == -1 && mdev->state.role == R_SECONDARY)
2650 if (hg == 1 && mdev->state.role == R_PRIMARY)
2654 rv = drbd_asb_recover_0p(mdev);
2656 case ASB_DISCARD_SECONDARY:
2657 return mdev->state.role == R_PRIMARY ? 1 : -1;
2658 case ASB_CALL_HELPER:
2659 hg = drbd_asb_recover_0p(mdev);
2660 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2661 enum drbd_state_rv rv2;
2663 drbd_set_role(mdev, R_SECONDARY, 0);
2664 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2665 * we might be here in C_WF_REPORT_PARAMS which is transient.
2666 * we do not need to wait for the after state change work either. */
2667 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2668 if (rv2 != SS_SUCCESS) {
2669 drbd_khelper(mdev, "pri-lost-after-sb");
2671 dev_warn(DEV, "Successfully gave up primary role.\n");
2681 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2684 enum drbd_after_sb_p after_sb_2p;
2687 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2689 switch (after_sb_2p) {
2690 case ASB_DISCARD_YOUNGER_PRI:
2691 case ASB_DISCARD_OLDER_PRI:
2692 case ASB_DISCARD_LEAST_CHG:
2693 case ASB_DISCARD_LOCAL:
2694 case ASB_DISCARD_REMOTE:
2696 case ASB_DISCARD_SECONDARY:
2697 case ASB_DISCARD_ZERO_CHG:
2698 dev_err(DEV, "Configuration error.\n");
2701 rv = drbd_asb_recover_0p(mdev);
2703 case ASB_DISCONNECT:
2705 case ASB_CALL_HELPER:
2706 hg = drbd_asb_recover_0p(mdev);
2708 enum drbd_state_rv rv2;
2710 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2711 * we might be here in C_WF_REPORT_PARAMS which is transient.
2712 * we do not need to wait for the after state change work either. */
2713 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2714 if (rv2 != SS_SUCCESS) {
2715 drbd_khelper(mdev, "pri-lost-after-sb");
2717 dev_warn(DEV, "Successfully gave up primary role.\n");
2727 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2728 u64 bits, u64 flags)
2731 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2734 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2736 (unsigned long long)uuid[UI_CURRENT],
2737 (unsigned long long)uuid[UI_BITMAP],
2738 (unsigned long long)uuid[UI_HISTORY_START],
2739 (unsigned long long)uuid[UI_HISTORY_END],
2740 (unsigned long long)bits,
2741 (unsigned long long)flags);
2745 100 after split brain try auto recover
2746 2 C_SYNC_SOURCE set BitMap
2747 1 C_SYNC_SOURCE use BitMap
2749 -1 C_SYNC_TARGET use BitMap
2750 -2 C_SYNC_TARGET set BitMap
2751 -100 after split brain, disconnect
2752 -1000 unrelated data
2753 -1091 requires proto 91
2754 -1096 requires proto 96
2756 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2761 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2762 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2765 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2769 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2770 peer != UUID_JUST_CREATED)
2774 if (self != UUID_JUST_CREATED &&
2775 (peer == UUID_JUST_CREATED || peer == (u64)0))
2779 int rct, dc; /* roles at crash time */
2781 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2783 if (mdev->tconn->agreed_pro_version < 91)
2786 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2787 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2788 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2789 drbd_uuid_set_bm(mdev, 0UL);
2791 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2792 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2795 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2802 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2804 if (mdev->tconn->agreed_pro_version < 91)
2807 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2808 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2809 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2811 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2812 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2813 mdev->p_uuid[UI_BITMAP] = 0UL;
2815 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2818 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2825 /* Common power [off|failure] */
2826 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2827 (mdev->p_uuid[UI_FLAGS] & 2);
2828 /* lowest bit is set when we were primary,
2829 * next bit (weight 2) is set when peer was primary */
2833 case 0: /* !self_pri && !peer_pri */ return 0;
2834 case 1: /* self_pri && !peer_pri */ return 1;
2835 case 2: /* !self_pri && peer_pri */ return -1;
2836 case 3: /* self_pri && peer_pri */
2837 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2843 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2848 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2850 if (mdev->tconn->agreed_pro_version < 96 ?
2851 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2852 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2853 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2854 /* The last P_SYNC_UUID did not get though. Undo the last start of
2855 resync as sync source modifications of the peer's UUIDs. */
2857 if (mdev->tconn->agreed_pro_version < 91)
2860 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2861 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2863 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2864 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2871 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2872 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2873 peer = mdev->p_uuid[i] & ~((u64)1);
2879 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2880 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2885 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2887 if (mdev->tconn->agreed_pro_version < 96 ?
2888 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2889 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2890 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2891 /* The last P_SYNC_UUID did not get though. Undo the last start of
2892 resync as sync source modifications of our UUIDs. */
2894 if (mdev->tconn->agreed_pro_version < 91)
2897 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2898 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2900 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2901 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2902 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2910 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2911 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2912 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2918 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2919 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2920 if (self == peer && self != ((u64)0))
2924 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2925 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2926 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2927 peer = mdev->p_uuid[j] & ~((u64)1);
2936 /* drbd_sync_handshake() returns the new conn state on success, or
2937 CONN_MASK (-1) on failure.
2939 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2940 enum drbd_disk_state peer_disk) __must_hold(local)
2942 enum drbd_conns rv = C_MASK;
2943 enum drbd_disk_state mydisk;
2944 struct net_conf *nc;
2945 int hg, rule_nr, rr_conflict, tentative;
2947 mydisk = mdev->state.disk;
2948 if (mydisk == D_NEGOTIATING)
2949 mydisk = mdev->new_state_tmp.disk;
2951 dev_info(DEV, "drbd_sync_handshake:\n");
2952 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2953 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2954 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2956 hg = drbd_uuid_compare(mdev, &rule_nr);
2958 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2961 dev_alert(DEV, "Unrelated data, aborting!\n");
2965 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2969 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2970 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2971 int f = (hg == -100) || abs(hg) == 2;
2972 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2975 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2976 hg > 0 ? "source" : "target");
2980 drbd_khelper(mdev, "initial-split-brain");
2983 nc = rcu_dereference(mdev->tconn->net_conf);
2985 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2986 int pcount = (mdev->state.role == R_PRIMARY)
2987 + (peer_role == R_PRIMARY);
2988 int forced = (hg == -100);
2992 hg = drbd_asb_recover_0p(mdev);
2995 hg = drbd_asb_recover_1p(mdev);
2998 hg = drbd_asb_recover_2p(mdev);
3001 if (abs(hg) < 100) {
3002 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3003 "automatically solved. Sync from %s node\n",
3004 pcount, (hg < 0) ? "peer" : "this");
3006 dev_warn(DEV, "Doing a full sync, since"
3007 " UUIDs where ambiguous.\n");
3014 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3016 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3020 dev_warn(DEV, "Split-Brain detected, manually solved. "
3021 "Sync from %s node\n",
3022 (hg < 0) ? "peer" : "this");
3024 rr_conflict = nc->rr_conflict;
3025 tentative = nc->tentative;
3029 /* FIXME this log message is not correct if we end up here
3030 * after an attempted attach on a diskless node.
3031 * We just refuse to attach -- well, we drop the "connection"
3032 * to that disk, in a way... */
3033 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3034 drbd_khelper(mdev, "split-brain");
3038 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3039 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3043 if (hg < 0 && /* by intention we do not use mydisk here. */
3044 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3045 switch (rr_conflict) {
3046 case ASB_CALL_HELPER:
3047 drbd_khelper(mdev, "pri-lost");
3049 case ASB_DISCONNECT:
3050 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3053 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3058 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3060 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3062 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3063 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3064 abs(hg) >= 2 ? "full" : "bit-map based");
3069 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3070 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3071 BM_LOCKED_SET_ALLOWED))
3075 if (hg > 0) { /* become sync source. */
3077 } else if (hg < 0) { /* become sync target */
3081 if (drbd_bm_total_weight(mdev)) {
3082 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3083 drbd_bm_total_weight(mdev));
3090 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3092 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3093 if (peer == ASB_DISCARD_REMOTE)
3094 return ASB_DISCARD_LOCAL;
3096 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3097 if (peer == ASB_DISCARD_LOCAL)
3098 return ASB_DISCARD_REMOTE;
3100 /* everything else is valid if they are equal on both sides. */
3104 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3106 struct p_protocol *p = pi->data;
3107 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3108 int p_proto, p_discard_my_data, p_two_primaries, cf;
3109 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3110 char integrity_alg[SHARED_SECRET_MAX] = "";
3111 struct crypto_hash *peer_integrity_tfm = NULL;
3112 void *int_dig_in = NULL, *int_dig_vv = NULL;
3114 p_proto = be32_to_cpu(p->protocol);
3115 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3116 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3117 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
3118 p_two_primaries = be32_to_cpu(p->two_primaries);
3119 cf = be32_to_cpu(p->conn_flags);
3120 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3122 if (tconn->agreed_pro_version >= 87) {
3125 if (pi->size > sizeof(integrity_alg))
3127 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3130 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3133 if (pi->cmd != P_PROTOCOL_UPDATE) {
3134 clear_bit(CONN_DRY_RUN, &tconn->flags);
3136 if (cf & CF_DRY_RUN)
3137 set_bit(CONN_DRY_RUN, &tconn->flags);
3140 nc = rcu_dereference(tconn->net_conf);
3142 if (p_proto != nc->wire_protocol) {
3143 conn_err(tconn, "incompatible %s settings\n", "protocol");
3144 goto disconnect_rcu_unlock;
3147 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3148 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3149 goto disconnect_rcu_unlock;
3152 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3153 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3154 goto disconnect_rcu_unlock;
3157 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3158 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3159 goto disconnect_rcu_unlock;
3162 if (p_discard_my_data && nc->discard_my_data) {
3163 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3164 goto disconnect_rcu_unlock;
3167 if (p_two_primaries != nc->two_primaries) {
3168 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3169 goto disconnect_rcu_unlock;
3172 if (strcmp(integrity_alg, nc->integrity_alg)) {
3173 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3174 goto disconnect_rcu_unlock;
3180 if (integrity_alg[0]) {
3184 * We can only change the peer data integrity algorithm
3185 * here. Changing our own data integrity algorithm
3186 * requires that we send a P_PROTOCOL_UPDATE packet at
3187 * the same time; otherwise, the peer has no way to
3188 * tell between which packets the algorithm should
3192 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3193 if (!peer_integrity_tfm) {
3194 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3199 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3200 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3201 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3202 if (!(int_dig_in && int_dig_vv)) {
3203 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3208 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3209 if (!new_net_conf) {
3210 conn_err(tconn, "Allocation of new net_conf failed\n");
3214 mutex_lock(&tconn->data.mutex);
3215 mutex_lock(&tconn->conf_update);
3216 old_net_conf = tconn->net_conf;
3217 *new_net_conf = *old_net_conf;
3219 new_net_conf->wire_protocol = p_proto;
3220 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3221 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3222 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3223 new_net_conf->two_primaries = p_two_primaries;
3225 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3226 mutex_unlock(&tconn->conf_update);
3227 mutex_unlock(&tconn->data.mutex);
3229 crypto_free_hash(tconn->peer_integrity_tfm);
3230 kfree(tconn->int_dig_in);
3231 kfree(tconn->int_dig_vv);
3232 tconn->peer_integrity_tfm = peer_integrity_tfm;
3233 tconn->int_dig_in = int_dig_in;
3234 tconn->int_dig_vv = int_dig_vv;
3236 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3237 conn_info(tconn, "peer data-integrity-alg: %s\n",
3238 integrity_alg[0] ? integrity_alg : "(none)");
3241 kfree(old_net_conf);
3244 disconnect_rcu_unlock:
3247 crypto_free_hash(peer_integrity_tfm);
3250 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3255 * input: alg name, feature name
3256 * return: NULL (alg name was "")
3257 * ERR_PTR(error) if something goes wrong
3258 * or the crypto hash ptr, if it worked out ok. */
3259 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3260 const char *alg, const char *name)
3262 struct crypto_hash *tfm;
3267 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3269 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3270 alg, name, PTR_ERR(tfm));
3276 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3278 void *buffer = tconn->data.rbuf;
3279 int size = pi->size;
3282 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3283 s = drbd_recv(tconn, buffer, s);
3297 * config_unknown_volume - device configuration command for unknown volume
3299 * When a device is added to an existing connection, the node on which the
3300 * device is added first will send configuration commands to its peer but the
3301 * peer will not know about the device yet. It will warn and ignore these
3302 * commands. Once the device is added on the second node, the second node will
3303 * send the same device configuration commands, but in the other direction.
3305 * (We can also end up here if drbd is misconfigured.)
3307 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3309 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3310 cmdname(pi->cmd), pi->vnr);
3311 return ignore_remaining_packet(tconn, pi);
3314 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3316 struct drbd_conf *mdev;
3317 struct p_rs_param_95 *p;
3318 unsigned int header_size, data_size, exp_max_sz;
3319 struct crypto_hash *verify_tfm = NULL;
3320 struct crypto_hash *csums_tfm = NULL;
3321 struct net_conf *old_net_conf, *new_net_conf = NULL;
3322 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3323 const int apv = tconn->agreed_pro_version;
3324 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3328 mdev = vnr_to_mdev(tconn, pi->vnr);
3330 return config_unknown_volume(tconn, pi);
3332 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3333 : apv == 88 ? sizeof(struct p_rs_param)
3335 : apv <= 94 ? sizeof(struct p_rs_param_89)
3336 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3338 if (pi->size > exp_max_sz) {
3339 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3340 pi->size, exp_max_sz);
3345 header_size = sizeof(struct p_rs_param);
3346 data_size = pi->size - header_size;
3347 } else if (apv <= 94) {
3348 header_size = sizeof(struct p_rs_param_89);
3349 data_size = pi->size - header_size;
3350 D_ASSERT(data_size == 0);
3352 header_size = sizeof(struct p_rs_param_95);
3353 data_size = pi->size - header_size;
3354 D_ASSERT(data_size == 0);
3357 /* initialize verify_alg and csums_alg */
3359 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3361 err = drbd_recv_all(mdev->tconn, p, header_size);
3365 mutex_lock(&mdev->tconn->conf_update);
3366 old_net_conf = mdev->tconn->net_conf;
3367 if (get_ldev(mdev)) {
3368 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3369 if (!new_disk_conf) {
3371 mutex_unlock(&mdev->tconn->conf_update);
3372 dev_err(DEV, "Allocation of new disk_conf failed\n");
3376 old_disk_conf = mdev->ldev->disk_conf;
3377 *new_disk_conf = *old_disk_conf;
3379 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3384 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3385 dev_err(DEV, "verify-alg of wrong size, "
3386 "peer wants %u, accepting only up to %u byte\n",
3387 data_size, SHARED_SECRET_MAX);
3392 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3395 /* we expect NUL terminated string */
3396 /* but just in case someone tries to be evil */
3397 D_ASSERT(p->verify_alg[data_size-1] == 0);
3398 p->verify_alg[data_size-1] = 0;
3400 } else /* apv >= 89 */ {
3401 /* we still expect NUL terminated strings */
3402 /* but just in case someone tries to be evil */
3403 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3404 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3405 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3406 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3409 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3410 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3411 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3412 old_net_conf->verify_alg, p->verify_alg);
3415 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3416 p->verify_alg, "verify-alg");
3417 if (IS_ERR(verify_tfm)) {
3423 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3424 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3425 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3426 old_net_conf->csums_alg, p->csums_alg);
3429 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3430 p->csums_alg, "csums-alg");
3431 if (IS_ERR(csums_tfm)) {
3437 if (apv > 94 && new_disk_conf) {
3438 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3439 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3440 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3441 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3443 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3444 if (fifo_size != mdev->rs_plan_s->size) {
3445 new_plan = fifo_alloc(fifo_size);
3447 dev_err(DEV, "kmalloc of fifo_buffer failed");
3454 if (verify_tfm || csums_tfm) {
3455 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3456 if (!new_net_conf) {
3457 dev_err(DEV, "Allocation of new net_conf failed\n");
3461 *new_net_conf = *old_net_conf;
3464 strcpy(new_net_conf->verify_alg, p->verify_alg);
3465 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3466 crypto_free_hash(mdev->tconn->verify_tfm);
3467 mdev->tconn->verify_tfm = verify_tfm;
3468 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3471 strcpy(new_net_conf->csums_alg, p->csums_alg);
3472 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3473 crypto_free_hash(mdev->tconn->csums_tfm);
3474 mdev->tconn->csums_tfm = csums_tfm;
3475 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3477 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3481 if (new_disk_conf) {
3482 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3487 old_plan = mdev->rs_plan_s;
3488 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3491 mutex_unlock(&mdev->tconn->conf_update);
3494 kfree(old_net_conf);
3495 kfree(old_disk_conf);
3501 if (new_disk_conf) {
3503 kfree(new_disk_conf);
3505 mutex_unlock(&mdev->tconn->conf_update);
3510 if (new_disk_conf) {
3512 kfree(new_disk_conf);
3514 mutex_unlock(&mdev->tconn->conf_update);
3515 /* just for completeness: actually not needed,
3516 * as this is not reached if csums_tfm was ok. */
3517 crypto_free_hash(csums_tfm);
3518 /* but free the verify_tfm again, if csums_tfm did not work out */
3519 crypto_free_hash(verify_tfm);
3520 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3524 /* warn if the arguments differ by more than 12.5% */
3525 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3526 const char *s, sector_t a, sector_t b)
3529 if (a == 0 || b == 0)
3531 d = (a > b) ? (a - b) : (b - a);
3532 if (d > (a>>3) || d > (b>>3))
3533 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3534 (unsigned long long)a, (unsigned long long)b);
3537 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3539 struct drbd_conf *mdev;
3540 struct p_sizes *p = pi->data;
3541 enum determine_dev_size dd = unchanged;
3542 sector_t p_size, p_usize, my_usize;
3543 int ldsc = 0; /* local disk size changed */
3544 enum dds_flags ddsf;
3546 mdev = vnr_to_mdev(tconn, pi->vnr);
3548 return config_unknown_volume(tconn, pi);
3550 p_size = be64_to_cpu(p->d_size);
3551 p_usize = be64_to_cpu(p->u_size);
3553 /* just store the peer's disk size for now.
3554 * we still need to figure out whether we accept that. */
3555 mdev->p_size = p_size;
3557 if (get_ldev(mdev)) {
3559 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3562 warn_if_differ_considerably(mdev, "lower level device sizes",
3563 p_size, drbd_get_max_capacity(mdev->ldev));
3564 warn_if_differ_considerably(mdev, "user requested size",
3567 /* if this is the first connect, or an otherwise expected
3568 * param exchange, choose the minimum */
3569 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3570 p_usize = min_not_zero(my_usize, p_usize);
3572 /* Never shrink a device with usable data during connect.
3573 But allow online shrinking if we are connected. */
3574 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3575 drbd_get_capacity(mdev->this_bdev) &&
3576 mdev->state.disk >= D_OUTDATED &&
3577 mdev->state.conn < C_CONNECTED) {
3578 dev_err(DEV, "The peer's disk size is too small!\n");
3579 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3584 if (my_usize != p_usize) {
3585 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3587 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3588 if (!new_disk_conf) {
3589 dev_err(DEV, "Allocation of new disk_conf failed\n");
3594 mutex_lock(&mdev->tconn->conf_update);
3595 old_disk_conf = mdev->ldev->disk_conf;
3596 *new_disk_conf = *old_disk_conf;
3597 new_disk_conf->disk_size = p_usize;
3599 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3600 mutex_unlock(&mdev->tconn->conf_update);
3602 kfree(old_disk_conf);
3604 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3605 (unsigned long)my_usize);
3611 ddsf = be16_to_cpu(p->dds_flags);
3612 if (get_ldev(mdev)) {
3613 dd = drbd_determine_dev_size(mdev, ddsf);
3615 if (dd == dev_size_error)
3619 /* I am diskless, need to accept the peer's size. */
3620 drbd_set_my_capacity(mdev, p_size);
3623 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3624 drbd_reconsider_max_bio_size(mdev);
3626 if (get_ldev(mdev)) {
3627 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3628 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3635 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3636 if (be64_to_cpu(p->c_size) !=
3637 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3638 /* we have different sizes, probably peer
3639 * needs to know my new size... */
3640 drbd_send_sizes(mdev, 0, ddsf);
3642 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3643 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3644 if (mdev->state.pdsk >= D_INCONSISTENT &&
3645 mdev->state.disk >= D_INCONSISTENT) {
3646 if (ddsf & DDSF_NO_RESYNC)
3647 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3649 resync_after_online_grow(mdev);
3651 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3658 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3660 struct drbd_conf *mdev;
3661 struct p_uuids *p = pi->data;
3663 int i, updated_uuids = 0;
3665 mdev = vnr_to_mdev(tconn, pi->vnr);
3667 return config_unknown_volume(tconn, pi);
3669 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3671 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3672 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3674 kfree(mdev->p_uuid);
3675 mdev->p_uuid = p_uuid;
3677 if (mdev->state.conn < C_CONNECTED &&
3678 mdev->state.disk < D_INCONSISTENT &&
3679 mdev->state.role == R_PRIMARY &&
3680 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3681 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3682 (unsigned long long)mdev->ed_uuid);
3683 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3687 if (get_ldev(mdev)) {
3688 int skip_initial_sync =
3689 mdev->state.conn == C_CONNECTED &&
3690 mdev->tconn->agreed_pro_version >= 90 &&
3691 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3692 (p_uuid[UI_FLAGS] & 8);
3693 if (skip_initial_sync) {
3694 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3695 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3696 "clear_n_write from receive_uuids",
3697 BM_LOCKED_TEST_ALLOWED);
3698 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3699 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3700 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3706 } else if (mdev->state.disk < D_INCONSISTENT &&
3707 mdev->state.role == R_PRIMARY) {
3708 /* I am a diskless primary, the peer just created a new current UUID
3710 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3713 /* Before we test for the disk state, we should wait until an eventually
3714 ongoing cluster wide state change is finished. That is important if
3715 we are primary and are detaching from our disk. We need to see the
3716 new disk state... */
3717 mutex_lock(mdev->state_mutex);
3718 mutex_unlock(mdev->state_mutex);
3719 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3720 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3723 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3729 * convert_state() - Converts the peer's view of the cluster state to our point of view
3730 * @ps: The state as seen by the peer.
3732 static union drbd_state convert_state(union drbd_state ps)
3734 union drbd_state ms;
3736 static enum drbd_conns c_tab[] = {
3737 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3738 [C_CONNECTED] = C_CONNECTED,
3740 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3741 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3742 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3743 [C_VERIFY_S] = C_VERIFY_T,
3749 ms.conn = c_tab[ps.conn];
3754 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3759 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3761 struct drbd_conf *mdev;
3762 struct p_req_state *p = pi->data;
3763 union drbd_state mask, val;
3764 enum drbd_state_rv rv;
3766 mdev = vnr_to_mdev(tconn, pi->vnr);
3770 mask.i = be32_to_cpu(p->mask);
3771 val.i = be32_to_cpu(p->val);
3773 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
3774 mutex_is_locked(mdev->state_mutex)) {
3775 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3779 mask = convert_state(mask);
3780 val = convert_state(val);
3782 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3783 drbd_send_sr_reply(mdev, rv);
3790 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3792 struct p_req_state *p = pi->data;
3793 union drbd_state mask, val;
3794 enum drbd_state_rv rv;
3796 mask.i = be32_to_cpu(p->mask);
3797 val.i = be32_to_cpu(p->val);
3799 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3800 mutex_is_locked(&tconn->cstate_mutex)) {
3801 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3805 mask = convert_state(mask);
3806 val = convert_state(val);
3808 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3809 conn_send_sr_reply(tconn, rv);
3814 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3816 struct drbd_conf *mdev;
3817 struct p_state *p = pi->data;
3818 union drbd_state os, ns, peer_state;
3819 enum drbd_disk_state real_peer_disk;
3820 enum chg_state_flags cs_flags;
3823 mdev = vnr_to_mdev(tconn, pi->vnr);
3825 return config_unknown_volume(tconn, pi);
3827 peer_state.i = be32_to_cpu(p->state);
3829 real_peer_disk = peer_state.disk;
3830 if (peer_state.disk == D_NEGOTIATING) {
3831 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3832 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3835 spin_lock_irq(&mdev->tconn->req_lock);
3837 os = ns = drbd_read_state(mdev);
3838 spin_unlock_irq(&mdev->tconn->req_lock);
3840 /* If some other part of the code (asender thread, timeout)
3841 * already decided to close the connection again,
3842 * we must not "re-establish" it here. */
3843 if (os.conn <= C_TEAR_DOWN)
3846 /* If this is the "end of sync" confirmation, usually the peer disk
3847 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3848 * set) resync started in PausedSyncT, or if the timing of pause-/
3849 * unpause-sync events has been "just right", the peer disk may
3850 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3852 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3853 real_peer_disk == D_UP_TO_DATE &&
3854 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3855 /* If we are (becoming) SyncSource, but peer is still in sync
3856 * preparation, ignore its uptodate-ness to avoid flapping, it
3857 * will change to inconsistent once the peer reaches active
3859 * It may have changed syncer-paused flags, however, so we
3860 * cannot ignore this completely. */
3861 if (peer_state.conn > C_CONNECTED &&
3862 peer_state.conn < C_SYNC_SOURCE)
3863 real_peer_disk = D_INCONSISTENT;
3865 /* if peer_state changes to connected at the same time,
3866 * it explicitly notifies us that it finished resync.
3867 * Maybe we should finish it up, too? */
3868 else if (os.conn >= C_SYNC_SOURCE &&
3869 peer_state.conn == C_CONNECTED) {
3870 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3871 drbd_resync_finished(mdev);
3876 /* explicit verify finished notification, stop sector reached. */
3877 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3878 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3879 ov_out_of_sync_print(mdev);
3880 drbd_resync_finished(mdev);
3884 /* peer says his disk is inconsistent, while we think it is uptodate,
3885 * and this happens while the peer still thinks we have a sync going on,
3886 * but we think we are already done with the sync.
3887 * We ignore this to avoid flapping pdsk.
3888 * This should not happen, if the peer is a recent version of drbd. */
3889 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3890 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3891 real_peer_disk = D_UP_TO_DATE;
3893 if (ns.conn == C_WF_REPORT_PARAMS)
3894 ns.conn = C_CONNECTED;
3896 if (peer_state.conn == C_AHEAD)
3899 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3900 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3901 int cr; /* consider resync */
3903 /* if we established a new connection */
3904 cr = (os.conn < C_CONNECTED);
3905 /* if we had an established connection
3906 * and one of the nodes newly attaches a disk */
3907 cr |= (os.conn == C_CONNECTED &&
3908 (peer_state.disk == D_NEGOTIATING ||
3909 os.disk == D_NEGOTIATING));
3910 /* if we have both been inconsistent, and the peer has been
3911 * forced to be UpToDate with --overwrite-data */
3912 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3913 /* if we had been plain connected, and the admin requested to
3914 * start a sync by "invalidate" or "invalidate-remote" */
3915 cr |= (os.conn == C_CONNECTED &&
3916 (peer_state.conn >= C_STARTING_SYNC_S &&
3917 peer_state.conn <= C_WF_BITMAP_T));
3920 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3923 if (ns.conn == C_MASK) {
3924 ns.conn = C_CONNECTED;
3925 if (mdev->state.disk == D_NEGOTIATING) {
3926 drbd_force_state(mdev, NS(disk, D_FAILED));
3927 } else if (peer_state.disk == D_NEGOTIATING) {
3928 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3929 peer_state.disk = D_DISKLESS;
3930 real_peer_disk = D_DISKLESS;
3932 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3934 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3935 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3941 spin_lock_irq(&mdev->tconn->req_lock);
3942 if (os.i != drbd_read_state(mdev).i)
3944 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3945 ns.peer = peer_state.role;
3946 ns.pdsk = real_peer_disk;
3947 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3948 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3949 ns.disk = mdev->new_state_tmp.disk;
3950 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3951 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3952 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3953 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3954 for temporal network outages! */
3955 spin_unlock_irq(&mdev->tconn->req_lock);
3956 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3957 tl_clear(mdev->tconn);
3958 drbd_uuid_new_current(mdev);
3959 clear_bit(NEW_CUR_UUID, &mdev->flags);
3960 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3963 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3964 ns = drbd_read_state(mdev);
3965 spin_unlock_irq(&mdev->tconn->req_lock);
3967 if (rv < SS_SUCCESS) {
3968 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3972 if (os.conn > C_WF_REPORT_PARAMS) {
3973 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3974 peer_state.disk != D_NEGOTIATING ) {
3975 /* we want resync, peer has not yet decided to sync... */
3976 /* Nowadays only used when forcing a node into primary role and
3977 setting its disk to UpToDate with that */
3978 drbd_send_uuids(mdev);
3979 drbd_send_current_state(mdev);
3983 clear_bit(DISCARD_MY_DATA, &mdev->flags);
3985 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3990 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3992 struct drbd_conf *mdev;
3993 struct p_rs_uuid *p = pi->data;
3995 mdev = vnr_to_mdev(tconn, pi->vnr);
3999 wait_event(mdev->misc_wait,
4000 mdev->state.conn == C_WF_SYNC_UUID ||
4001 mdev->state.conn == C_BEHIND ||
4002 mdev->state.conn < C_CONNECTED ||
4003 mdev->state.disk < D_NEGOTIATING);
4005 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4007 /* Here the _drbd_uuid_ functions are right, current should
4008 _not_ be rotated into the history */
4009 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4010 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4011 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4013 drbd_print_uuids(mdev, "updated sync uuid");
4014 drbd_start_resync(mdev, C_SYNC_TARGET);
4018 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4024 * receive_bitmap_plain
4026 * Return 0 when done, 1 when another iteration is needed, and a negative error
4027 * code upon failure.
4030 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4031 unsigned long *p, struct bm_xfer_ctx *c)
4033 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4034 drbd_header_size(mdev->tconn);
4035 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4036 c->bm_words - c->word_offset);
4037 unsigned int want = num_words * sizeof(*p);
4041 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4046 err = drbd_recv_all(mdev->tconn, p, want);
4050 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4052 c->word_offset += num_words;
4053 c->bit_offset = c->word_offset * BITS_PER_LONG;
4054 if (c->bit_offset > c->bm_bits)
4055 c->bit_offset = c->bm_bits;
4060 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4062 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4065 static int dcbp_get_start(struct p_compressed_bm *p)
4067 return (p->encoding & 0x80) != 0;
4070 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4072 return (p->encoding >> 4) & 0x7;
4078 * Return 0 when done, 1 when another iteration is needed, and a negative error
4079 * code upon failure.
4082 recv_bm_rle_bits(struct drbd_conf *mdev,
4083 struct p_compressed_bm *p,
4084 struct bm_xfer_ctx *c,
4087 struct bitstream bs;
4091 unsigned long s = c->bit_offset;
4093 int toggle = dcbp_get_start(p);
4097 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4099 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4103 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4104 bits = vli_decode_bits(&rl, look_ahead);
4110 if (e >= c->bm_bits) {
4111 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4114 _drbd_bm_set_bits(mdev, s, e);
4118 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4119 have, bits, look_ahead,
4120 (unsigned int)(bs.cur.b - p->code),
4121 (unsigned int)bs.buf_len);
4124 look_ahead >>= bits;
4127 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4130 look_ahead |= tmp << have;
4135 bm_xfer_ctx_bit_to_word_offset(c);
4137 return (s != c->bm_bits);
4143 * Return 0 when done, 1 when another iteration is needed, and a negative error
4144 * code upon failure.
4147 decode_bitmap_c(struct drbd_conf *mdev,
4148 struct p_compressed_bm *p,
4149 struct bm_xfer_ctx *c,
4152 if (dcbp_get_code(p) == RLE_VLI_Bits)
4153 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4155 /* other variants had been implemented for evaluation,
4156 * but have been dropped as this one turned out to be "best"
4157 * during all our tests. */
4159 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4160 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4164 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4165 const char *direction, struct bm_xfer_ctx *c)
4167 /* what would it take to transfer it "plaintext" */
4168 unsigned int header_size = drbd_header_size(mdev->tconn);
4169 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4170 unsigned int plain =
4171 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4172 c->bm_words * sizeof(unsigned long);
4173 unsigned int total = c->bytes[0] + c->bytes[1];
4176 /* total can not be zero. but just in case: */
4180 /* don't report if not compressed */
4184 /* total < plain. check for overflow, still */
4185 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4186 : (1000 * total / plain);
4192 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4193 "total %u; compression: %u.%u%%\n",
4195 c->bytes[1], c->packets[1],
4196 c->bytes[0], c->packets[0],
4197 total, r/10, r % 10);
4200 /* Since we are processing the bitfield from lower addresses to higher,
4201 it does not matter if the process it in 32 bit chunks or 64 bit
4202 chunks as long as it is little endian. (Understand it as byte stream,
4203 beginning with the lowest byte...) If we would use big endian
4204 we would need to process it from the highest address to the lowest,
4205 in order to be agnostic to the 32 vs 64 bits issue.
4207 returns 0 on failure, 1 if we successfully received it. */
4208 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4210 struct drbd_conf *mdev;
4211 struct bm_xfer_ctx c;
4214 mdev = vnr_to_mdev(tconn, pi->vnr);
4218 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4219 /* you are supposed to send additional out-of-sync information
4220 * if you actually set bits during this phase */
4222 c = (struct bm_xfer_ctx) {
4223 .bm_bits = drbd_bm_bits(mdev),
4224 .bm_words = drbd_bm_words(mdev),
4228 if (pi->cmd == P_BITMAP)
4229 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4230 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4231 /* MAYBE: sanity check that we speak proto >= 90,
4232 * and the feature is enabled! */
4233 struct p_compressed_bm *p = pi->data;
4235 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4236 dev_err(DEV, "ReportCBitmap packet too large\n");
4240 if (pi->size <= sizeof(*p)) {
4241 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4245 err = drbd_recv_all(mdev->tconn, p, pi->size);
4248 err = decode_bitmap_c(mdev, p, &c, pi->size);
4250 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4255 c.packets[pi->cmd == P_BITMAP]++;
4256 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4263 err = drbd_recv_header(mdev->tconn, pi);
4268 INFO_bm_xfer_stats(mdev, "receive", &c);
4270 if (mdev->state.conn == C_WF_BITMAP_T) {
4271 enum drbd_state_rv rv;
4273 err = drbd_send_bitmap(mdev);
4276 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4277 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4278 D_ASSERT(rv == SS_SUCCESS);
4279 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4280 /* admin may have requested C_DISCONNECTING,
4281 * other threads may have noticed network errors */
4282 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4283 drbd_conn_str(mdev->state.conn));
4288 drbd_bm_unlock(mdev);
4289 if (!err && mdev->state.conn == C_WF_BITMAP_S)
4290 drbd_start_resync(mdev, C_SYNC_SOURCE);
4294 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4296 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4299 return ignore_remaining_packet(tconn, pi);
4302 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4304 /* Make sure we've acked all the TCP data associated
4305 * with the data requests being unplugged */
4306 drbd_tcp_quickack(tconn->data.socket);
4311 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4313 struct drbd_conf *mdev;
4314 struct p_block_desc *p = pi->data;
4316 mdev = vnr_to_mdev(tconn, pi->vnr);
4320 switch (mdev->state.conn) {
4321 case C_WF_SYNC_UUID:
4326 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4327 drbd_conn_str(mdev->state.conn));
4330 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4338 int (*fn)(struct drbd_tconn *, struct packet_info *);
4341 static struct data_cmd drbd_cmd_handler[] = {
4342 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4343 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4344 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4345 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4346 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4347 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4348 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
4349 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4350 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4351 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4352 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
4353 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4354 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4355 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4356 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4357 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4358 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4359 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4360 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4362 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4363 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4364 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4365 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4368 static void drbdd(struct drbd_tconn *tconn)
4370 struct packet_info pi;
4371 size_t shs; /* sub header size */
4374 while (get_t_state(&tconn->receiver) == RUNNING) {
4375 struct data_cmd *cmd;
4377 drbd_thread_current_set_cpu(&tconn->receiver);
4378 if (drbd_recv_header(tconn, &pi))
4381 cmd = &drbd_cmd_handler[pi.cmd];
4382 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4383 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4384 cmdname(pi.cmd), pi.cmd);
4388 shs = cmd->pkt_size;
4389 if (pi.size > shs && !cmd->expect_payload) {
4390 conn_err(tconn, "No payload expected %s l:%d\n",
4391 cmdname(pi.cmd), pi.size);
4396 err = drbd_recv_all_warn(tconn, pi.data, shs);
4402 err = cmd->fn(tconn, &pi);
4404 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4405 cmdname(pi.cmd), err, pi.size);
4412 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4415 void conn_flush_workqueue(struct drbd_tconn *tconn)
4417 struct drbd_wq_barrier barr;
4419 barr.w.cb = w_prev_work_done;
4420 barr.w.tconn = tconn;
4421 init_completion(&barr.done);
4422 drbd_queue_work(&tconn->sender_work, &barr.w);
4423 wait_for_completion(&barr.done);
4426 static void conn_disconnect(struct drbd_tconn *tconn)
4428 struct drbd_conf *mdev;
4432 if (tconn->cstate == C_STANDALONE)
4435 /* We are about to start the cleanup after connection loss.
4436 * Make sure drbd_make_request knows about that.
4437 * Usually we should be in some network failure state already,
4438 * but just in case we are not, we fix it up here.
4440 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4442 /* asender does not clean up anything. it must not interfere, either */
4443 drbd_thread_stop(&tconn->asender);
4444 drbd_free_sock(tconn);
4447 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4448 kref_get(&mdev->kref);
4450 drbd_disconnected(mdev);
4451 kref_put(&mdev->kref, &drbd_minor_destroy);
4456 if (!list_empty(&tconn->current_epoch->list))
4457 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4458 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4459 atomic_set(&tconn->current_epoch->epoch_size, 0);
4460 tconn->send.seen_any_write_yet = false;
4462 conn_info(tconn, "Connection closed\n");
4464 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4465 conn_try_outdate_peer_async(tconn);
4467 spin_lock_irq(&tconn->req_lock);
4469 if (oc >= C_UNCONNECTED)
4470 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4472 spin_unlock_irq(&tconn->req_lock);
4474 if (oc == C_DISCONNECTING)
4475 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4478 static int drbd_disconnected(struct drbd_conf *mdev)
4482 /* wait for current activity to cease. */
4483 spin_lock_irq(&mdev->tconn->req_lock);
4484 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4485 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4486 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4487 spin_unlock_irq(&mdev->tconn->req_lock);
4489 /* We do not have data structures that would allow us to
4490 * get the rs_pending_cnt down to 0 again.
4491 * * On C_SYNC_TARGET we do not have any data structures describing
4492 * the pending RSDataRequest's we have sent.
4493 * * On C_SYNC_SOURCE there is no data structure that tracks
4494 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4495 * And no, it is not the sum of the reference counts in the
4496 * resync_LRU. The resync_LRU tracks the whole operation including
4497 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4499 drbd_rs_cancel_all(mdev);
4501 mdev->rs_failed = 0;
4502 atomic_set(&mdev->rs_pending_cnt, 0);
4503 wake_up(&mdev->misc_wait);
4505 del_timer_sync(&mdev->resync_timer);
4506 resync_timer_fn((unsigned long)mdev);
4508 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4509 * w_make_resync_request etc. which may still be on the worker queue
4510 * to be "canceled" */
4511 drbd_flush_workqueue(mdev);
4513 drbd_finish_peer_reqs(mdev);
4515 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4516 might have issued a work again. The one before drbd_finish_peer_reqs() is
4517 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4518 drbd_flush_workqueue(mdev);
4520 kfree(mdev->p_uuid);
4521 mdev->p_uuid = NULL;
4523 if (!drbd_suspended(mdev))
4524 tl_clear(mdev->tconn);
4528 /* serialize with bitmap writeout triggered by the state change,
4530 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4532 /* tcp_close and release of sendpage pages can be deferred. I don't
4533 * want to use SO_LINGER, because apparently it can be deferred for
4534 * more than 20 seconds (longest time I checked).
4536 * Actually we don't care for exactly when the network stack does its
4537 * put_page(), but release our reference on these pages right here.
4539 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4541 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4542 i = atomic_read(&mdev->pp_in_use_by_net);
4544 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4545 i = atomic_read(&mdev->pp_in_use);
4547 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4549 D_ASSERT(list_empty(&mdev->read_ee));
4550 D_ASSERT(list_empty(&mdev->active_ee));
4551 D_ASSERT(list_empty(&mdev->sync_ee));
4552 D_ASSERT(list_empty(&mdev->done_ee));
4558 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4559 * we can agree on is stored in agreed_pro_version.
4561 * feature flags and the reserved array should be enough room for future
4562 * enhancements of the handshake protocol, and possible plugins...
4564 * for now, they are expected to be zero, but ignored.
4566 static int drbd_send_features(struct drbd_tconn *tconn)
4568 struct drbd_socket *sock;
4569 struct p_connection_features *p;
4571 sock = &tconn->data;
4572 p = conn_prepare_command(tconn, sock);
4575 memset(p, 0, sizeof(*p));
4576 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4577 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4578 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4583 * 1 yes, we have a valid connection
4584 * 0 oops, did not work out, please try again
4585 * -1 peer talks different language,
4586 * no point in trying again, please go standalone.
4588 static int drbd_do_features(struct drbd_tconn *tconn)
4590 /* ASSERT current == tconn->receiver ... */
4591 struct p_connection_features *p;
4592 const int expect = sizeof(struct p_connection_features);
4593 struct packet_info pi;
4596 err = drbd_send_features(tconn);
4600 err = drbd_recv_header(tconn, &pi);
4604 if (pi.cmd != P_CONNECTION_FEATURES) {
4605 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4606 cmdname(pi.cmd), pi.cmd);
4610 if (pi.size != expect) {
4611 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4617 err = drbd_recv_all_warn(tconn, p, expect);
4621 p->protocol_min = be32_to_cpu(p->protocol_min);
4622 p->protocol_max = be32_to_cpu(p->protocol_max);
4623 if (p->protocol_max == 0)
4624 p->protocol_max = p->protocol_min;
4626 if (PRO_VERSION_MAX < p->protocol_min ||
4627 PRO_VERSION_MIN > p->protocol_max)
4630 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4632 conn_info(tconn, "Handshake successful: "
4633 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4638 conn_err(tconn, "incompatible DRBD dialects: "
4639 "I support %d-%d, peer supports %d-%d\n",
4640 PRO_VERSION_MIN, PRO_VERSION_MAX,
4641 p->protocol_min, p->protocol_max);
4645 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4646 static int drbd_do_auth(struct drbd_tconn *tconn)
4648 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4649 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4653 #define CHALLENGE_LEN 64
4657 0 - failed, try again (network error),
4658 -1 - auth failed, don't try again.
4661 static int drbd_do_auth(struct drbd_tconn *tconn)
4663 struct drbd_socket *sock;
4664 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4665 struct scatterlist sg;
4666 char *response = NULL;
4667 char *right_response = NULL;
4668 char *peers_ch = NULL;
4669 unsigned int key_len;
4670 char secret[SHARED_SECRET_MAX]; /* 64 byte */
4671 unsigned int resp_size;
4672 struct hash_desc desc;
4673 struct packet_info pi;
4674 struct net_conf *nc;
4677 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4680 nc = rcu_dereference(tconn->net_conf);
4681 key_len = strlen(nc->shared_secret);
4682 memcpy(secret, nc->shared_secret, key_len);
4685 desc.tfm = tconn->cram_hmac_tfm;
4688 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4690 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4695 get_random_bytes(my_challenge, CHALLENGE_LEN);
4697 sock = &tconn->data;
4698 if (!conn_prepare_command(tconn, sock)) {
4702 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4703 my_challenge, CHALLENGE_LEN);
4707 err = drbd_recv_header(tconn, &pi);
4713 if (pi.cmd != P_AUTH_CHALLENGE) {
4714 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4715 cmdname(pi.cmd), pi.cmd);
4720 if (pi.size > CHALLENGE_LEN * 2) {
4721 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4726 peers_ch = kmalloc(pi.size, GFP_NOIO);
4727 if (peers_ch == NULL) {
4728 conn_err(tconn, "kmalloc of peers_ch failed\n");
4733 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4739 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4740 response = kmalloc(resp_size, GFP_NOIO);
4741 if (response == NULL) {
4742 conn_err(tconn, "kmalloc of response failed\n");
4747 sg_init_table(&sg, 1);
4748 sg_set_buf(&sg, peers_ch, pi.size);
4750 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4752 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4757 if (!conn_prepare_command(tconn, sock)) {
4761 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4762 response, resp_size);
4766 err = drbd_recv_header(tconn, &pi);
4772 if (pi.cmd != P_AUTH_RESPONSE) {
4773 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4774 cmdname(pi.cmd), pi.cmd);
4779 if (pi.size != resp_size) {
4780 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4785 err = drbd_recv_all_warn(tconn, response , resp_size);
4791 right_response = kmalloc(resp_size, GFP_NOIO);
4792 if (right_response == NULL) {
4793 conn_err(tconn, "kmalloc of right_response failed\n");
4798 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4800 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4802 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4807 rv = !memcmp(response, right_response, resp_size);
4810 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4818 kfree(right_response);
4824 int drbdd_init(struct drbd_thread *thi)
4826 struct drbd_tconn *tconn = thi->tconn;
4829 conn_info(tconn, "receiver (re)started\n");
4832 h = conn_connect(tconn);
4834 conn_disconnect(tconn);
4835 schedule_timeout_interruptible(HZ);
4838 conn_warn(tconn, "Discarding network configuration.\n");
4839 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4846 conn_disconnect(tconn);
4848 conn_info(tconn, "receiver terminated\n");
4852 /* ********* acknowledge sender ******** */
4854 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4856 struct p_req_state_reply *p = pi->data;
4857 int retcode = be32_to_cpu(p->retcode);
4859 if (retcode >= SS_SUCCESS) {
4860 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4862 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4863 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4864 drbd_set_st_err_str(retcode), retcode);
4866 wake_up(&tconn->ping_wait);
4871 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4873 struct drbd_conf *mdev;
4874 struct p_req_state_reply *p = pi->data;
4875 int retcode = be32_to_cpu(p->retcode);
4877 mdev = vnr_to_mdev(tconn, pi->vnr);
4881 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4882 D_ASSERT(tconn->agreed_pro_version < 100);
4883 return got_conn_RqSReply(tconn, pi);
4886 if (retcode >= SS_SUCCESS) {
4887 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4889 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4890 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4891 drbd_set_st_err_str(retcode), retcode);
4893 wake_up(&mdev->state_wait);
4898 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4900 return drbd_send_ping_ack(tconn);
4904 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4906 /* restore idle timeout */
4907 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4908 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4909 wake_up(&tconn->ping_wait);
4914 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4916 struct drbd_conf *mdev;
4917 struct p_block_ack *p = pi->data;
4918 sector_t sector = be64_to_cpu(p->sector);
4919 int blksize = be32_to_cpu(p->blksize);
4921 mdev = vnr_to_mdev(tconn, pi->vnr);
4925 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4927 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4929 if (get_ldev(mdev)) {
4930 drbd_rs_complete_io(mdev, sector);
4931 drbd_set_in_sync(mdev, sector, blksize);
4932 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4933 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4936 dec_rs_pending(mdev);
4937 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4943 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4944 struct rb_root *root, const char *func,
4945 enum drbd_req_event what, bool missing_ok)
4947 struct drbd_request *req;
4948 struct bio_and_error m;
4950 spin_lock_irq(&mdev->tconn->req_lock);
4951 req = find_request(mdev, root, id, sector, missing_ok, func);
4952 if (unlikely(!req)) {
4953 spin_unlock_irq(&mdev->tconn->req_lock);
4956 __req_mod(req, what, &m);
4957 spin_unlock_irq(&mdev->tconn->req_lock);
4960 complete_master_bio(mdev, &m);
4964 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4966 struct drbd_conf *mdev;
4967 struct p_block_ack *p = pi->data;
4968 sector_t sector = be64_to_cpu(p->sector);
4969 int blksize = be32_to_cpu(p->blksize);
4970 enum drbd_req_event what;
4972 mdev = vnr_to_mdev(tconn, pi->vnr);
4976 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4978 if (p->block_id == ID_SYNCER) {
4979 drbd_set_in_sync(mdev, sector, blksize);
4980 dec_rs_pending(mdev);
4984 case P_RS_WRITE_ACK:
4985 what = WRITE_ACKED_BY_PEER_AND_SIS;
4988 what = WRITE_ACKED_BY_PEER;
4991 what = RECV_ACKED_BY_PEER;
4993 case P_DISCARD_WRITE:
4994 what = DISCARD_WRITE;
4997 what = POSTPONE_WRITE;
5003 return validate_req_change_req_state(mdev, p->block_id, sector,
5004 &mdev->write_requests, __func__,
5008 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5010 struct drbd_conf *mdev;
5011 struct p_block_ack *p = pi->data;
5012 sector_t sector = be64_to_cpu(p->sector);
5013 int size = be32_to_cpu(p->blksize);
5016 mdev = vnr_to_mdev(tconn, pi->vnr);
5020 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5022 if (p->block_id == ID_SYNCER) {
5023 dec_rs_pending(mdev);
5024 drbd_rs_failed_io(mdev, sector, size);
5028 err = validate_req_change_req_state(mdev, p->block_id, sector,
5029 &mdev->write_requests, __func__,
5032 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5033 The master bio might already be completed, therefore the
5034 request is no longer in the collision hash. */
5035 /* In Protocol B we might already have got a P_RECV_ACK
5036 but then get a P_NEG_ACK afterwards. */
5037 drbd_set_out_of_sync(mdev, sector, size);
5042 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5044 struct drbd_conf *mdev;
5045 struct p_block_ack *p = pi->data;
5046 sector_t sector = be64_to_cpu(p->sector);
5048 mdev = vnr_to_mdev(tconn, pi->vnr);
5052 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5054 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5055 (unsigned long long)sector, be32_to_cpu(p->blksize));
5057 return validate_req_change_req_state(mdev, p->block_id, sector,
5058 &mdev->read_requests, __func__,
5062 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5064 struct drbd_conf *mdev;
5067 struct p_block_ack *p = pi->data;
5069 mdev = vnr_to_mdev(tconn, pi->vnr);
5073 sector = be64_to_cpu(p->sector);
5074 size = be32_to_cpu(p->blksize);
5076 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5078 dec_rs_pending(mdev);
5080 if (get_ldev_if_state(mdev, D_FAILED)) {
5081 drbd_rs_complete_io(mdev, sector);
5083 case P_NEG_RS_DREPLY:
5084 drbd_rs_failed_io(mdev, sector, size);
5096 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5098 struct p_barrier_ack *p = pi->data;
5099 struct drbd_conf *mdev;
5102 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5105 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5106 if (mdev->state.conn == C_AHEAD &&
5107 atomic_read(&mdev->ap_in_flight) == 0 &&
5108 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5109 mdev->start_resync_timer.expires = jiffies + HZ;
5110 add_timer(&mdev->start_resync_timer);
5118 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5120 struct drbd_conf *mdev;
5121 struct p_block_ack *p = pi->data;
5122 struct drbd_work *w;
5126 mdev = vnr_to_mdev(tconn, pi->vnr);
5130 sector = be64_to_cpu(p->sector);
5131 size = be32_to_cpu(p->blksize);
5133 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5135 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5136 drbd_ov_out_of_sync_found(mdev, sector, size);
5138 ov_out_of_sync_print(mdev);
5140 if (!get_ldev(mdev))
5143 drbd_rs_complete_io(mdev, sector);
5144 dec_rs_pending(mdev);
5148 /* let's advance progress step marks only for every other megabyte */
5149 if ((mdev->ov_left & 0x200) == 0x200)
5150 drbd_advance_rs_marks(mdev, mdev->ov_left);
5152 if (mdev->ov_left == 0) {
5153 w = kmalloc(sizeof(*w), GFP_NOIO);
5155 w->cb = w_ov_finished;
5157 drbd_queue_work(&mdev->tconn->sender_work, w);
5159 dev_err(DEV, "kmalloc(w) failed.");
5160 ov_out_of_sync_print(mdev);
5161 drbd_resync_finished(mdev);
5168 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5173 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5175 struct drbd_conf *mdev;
5176 int vnr, not_empty = 0;
5179 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5180 flush_signals(current);
5183 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5184 kref_get(&mdev->kref);
5186 if (drbd_finish_peer_reqs(mdev)) {
5187 kref_put(&mdev->kref, &drbd_minor_destroy);
5190 kref_put(&mdev->kref, &drbd_minor_destroy);
5193 set_bit(SIGNAL_ASENDER, &tconn->flags);
5195 spin_lock_irq(&tconn->req_lock);
5196 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5197 not_empty = !list_empty(&mdev->done_ee);
5201 spin_unlock_irq(&tconn->req_lock);
5203 } while (not_empty);
5208 struct asender_cmd {
5210 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5213 static struct asender_cmd asender_tbl[] = {
5214 [P_PING] = { 0, got_Ping },
5215 [P_PING_ACK] = { 0, got_PingAck },
5216 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5217 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5218 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5219 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5220 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5221 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
5222 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
5223 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5224 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5225 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5226 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
5227 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
5228 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5229 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5230 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5233 int drbd_asender(struct drbd_thread *thi)
5235 struct drbd_tconn *tconn = thi->tconn;
5236 struct asender_cmd *cmd = NULL;
5237 struct packet_info pi;
5239 void *buf = tconn->meta.rbuf;
5241 unsigned int header_size = drbd_header_size(tconn);
5242 int expect = header_size;
5243 bool ping_timeout_active = false;
5244 struct net_conf *nc;
5245 int ping_timeo, tcp_cork, ping_int;
5247 current->policy = SCHED_RR; /* Make this a realtime task! */
5248 current->rt_priority = 2; /* more important than all other tasks */
5250 while (get_t_state(thi) == RUNNING) {
5251 drbd_thread_current_set_cpu(thi);
5254 nc = rcu_dereference(tconn->net_conf);
5255 ping_timeo = nc->ping_timeo;
5256 tcp_cork = nc->tcp_cork;
5257 ping_int = nc->ping_int;
5260 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5261 if (drbd_send_ping(tconn)) {
5262 conn_err(tconn, "drbd_send_ping has failed\n");
5265 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5266 ping_timeout_active = true;
5269 /* TODO: conditionally cork; it may hurt latency if we cork without
5272 drbd_tcp_cork(tconn->meta.socket);
5273 if (tconn_finish_peer_reqs(tconn)) {
5274 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5277 /* but unconditionally uncork unless disabled */
5279 drbd_tcp_uncork(tconn->meta.socket);
5281 /* short circuit, recv_msg would return EINTR anyways. */
5282 if (signal_pending(current))
5285 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5286 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5288 flush_signals(current);
5291 * -EINTR (on meta) we got a signal
5292 * -EAGAIN (on meta) rcvtimeo expired
5293 * -ECONNRESET other side closed the connection
5294 * -ERESTARTSYS (on data) we got a signal
5295 * rv < 0 other than above: unexpected error!
5296 * rv == expected: full header or command
5297 * rv < expected: "woken" by signal during receive
5298 * rv == 0 : "connection shut down by peer"
5300 if (likely(rv > 0)) {
5303 } else if (rv == 0) {
5304 conn_err(tconn, "meta connection shut down by peer.\n");
5306 } else if (rv == -EAGAIN) {
5307 /* If the data socket received something meanwhile,
5308 * that is good enough: peer is still alive. */
5309 if (time_after(tconn->last_received,
5310 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5312 if (ping_timeout_active) {
5313 conn_err(tconn, "PingAck did not arrive in time.\n");
5316 set_bit(SEND_PING, &tconn->flags);
5318 } else if (rv == -EINTR) {
5321 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5325 if (received == expect && cmd == NULL) {
5326 if (decode_header(tconn, tconn->meta.rbuf, &pi))
5328 cmd = &asender_tbl[pi.cmd];
5329 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5330 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5331 cmdname(pi.cmd), pi.cmd);
5334 expect = header_size + cmd->pkt_size;
5335 if (pi.size != expect - header_size) {
5336 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5341 if (received == expect) {
5344 err = cmd->fn(tconn, &pi);
5346 conn_err(tconn, "%pf failed\n", cmd->fn);
5350 tconn->last_received = jiffies;
5352 if (cmd == &asender_tbl[P_PING_ACK]) {
5353 /* restore idle timeout */
5354 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5355 ping_timeout_active = false;
5358 buf = tconn->meta.rbuf;
5360 expect = header_size;
5367 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5371 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5373 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5375 conn_info(tconn, "asender terminated\n");