4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
79 /* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
83 static struct page *page_chain_del(struct page **head, int n)
97 tmp = page_chain_next(page);
99 break; /* found sufficient pages */
101 /* insufficient pages, don't use any of them. */
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
114 /* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
121 while ((tmp = page_chain_next(page)))
128 static int page_chain_free(struct page *page)
132 page_chain_for_each_safe(page, tmp) {
139 static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
156 struct page *page = NULL;
157 struct page *tmp = NULL;
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
162 if (drbd_pp_vacant >= number) {
163 spin_lock(&drbd_pp_lock);
164 page = page_chain_del(&drbd_pp_pool, number);
166 drbd_pp_vacant -= number;
167 spin_unlock(&drbd_pp_lock);
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
179 set_page_private(tmp, (unsigned long)page);
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_alloc_pages will retry this
188 * function "soon". */
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
194 spin_unlock(&drbd_pp_lock);
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
202 struct drbd_peer_request *peer_req;
203 struct list_head *le, *tle;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le, tle, &mdev->net_ee) {
211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212 if (drbd_peer_req_has_active_page(peer_req))
214 list_move(le, to_be_freed);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
220 LIST_HEAD(reclaimed);
221 struct drbd_peer_request *peer_req, *t;
223 spin_lock_irq(&mdev->tconn->req_lock);
224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->tconn->req_lock);
227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228 drbd_free_net_peer_req(mdev, peer_req);
232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233 * @mdev: DRBD device.
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 * Returns a page chain linked via page->private.
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
246 struct page *page = NULL;
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
258 if (atomic_read(&mdev->pp_in_use) < mxb)
259 page = __drbd_alloc_pages(mdev, number);
261 while (page == NULL) {
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
264 drbd_kick_lo_and_reclaim_net(mdev);
266 if (atomic_read(&mdev->pp_in_use) < mxb) {
267 page = __drbd_alloc_pages(mdev, number);
275 if (signal_pending(current)) {
276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
282 finish_wait(&drbd_pp_wait, &wait);
285 atomic_add(number, &mdev->pp_in_use);
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302 i = page_chain_free(page);
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
309 spin_unlock(&drbd_pp_lock);
311 i = atomic_sub_return(i, a);
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315 wake_up(&drbd_pp_wait);
319 You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
322 You must not have the req_lock:
324 drbd_alloc_peer_req()
325 drbd_free_peer_reqs()
327 drbd_finish_peer_reqs()
329 drbd_wait_ee_list_empty()
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
336 struct drbd_peer_request *peer_req;
337 struct page *page = NULL;
338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
345 if (!(gfp_mask & __GFP_NOWARN))
346 dev_err(DEV, "%s: allocation failed\n", __func__);
351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
362 peer_req->epoch = NULL;
363 peer_req->w.mdev = mdev;
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
371 peer_req->block_id = id;
376 mempool_free(peer_req, drbd_ee_mempool);
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
385 drbd_free_pages(mdev, peer_req->pages, is_net);
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
393 LIST_HEAD(work_list);
394 struct drbd_peer_request *peer_req, *t;
396 int is_net = list == &mdev->net_ee;
398 spin_lock_irq(&mdev->tconn->req_lock);
399 list_splice_init(list, &work_list);
400 spin_unlock_irq(&mdev->tconn->req_lock);
402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403 __drbd_free_peer_req(mdev, peer_req, is_net);
410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
416 struct drbd_peer_request *peer_req, *t;
419 spin_lock_irq(&mdev->tconn->req_lock);
420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421 list_splice_init(&mdev->done_ee, &work_list);
422 spin_unlock_irq(&mdev->tconn->req_lock);
424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425 drbd_free_net_peer_req(mdev, peer_req);
427 /* possible callbacks here:
428 * e_end_block, and e_end_resync_block, e_send_discard_write.
429 * all ignore the last argument.
431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
434 /* list_del not necessary, next/prev members not touched */
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
438 drbd_free_peer_req(mdev, peer_req);
440 wake_up(&mdev->ee_wait);
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->tconn->req_lock);
456 finish_wait(&mdev->ee_wait, &wait);
457 spin_lock_irq(&mdev->tconn->req_lock);
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
464 spin_lock_irq(&mdev->tconn->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->tconn->req_lock);
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
476 struct msghdr msg = {
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
498 struct msghdr msg = {
500 .msg_iov = (struct iovec *)&iov,
501 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
509 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
514 * ECONNRESET other side closed the connection
515 * ERESTARTSYS (on sock) we got a signal
519 if (rv == -ECONNRESET)
520 conn_info(tconn, "sock was reset by peer\n");
521 else if (rv != -ERESTARTSYS)
522 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
524 } else if (rv == 0) {
525 conn_info(tconn, "sock was shut down by peer\n");
528 /* signal came in, or peer/link went down,
529 * after we read a partial message
531 /* D_ASSERT(signal_pending(current)); */
539 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
544 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
548 err = drbd_recv(tconn, buf, size);
557 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
561 err = drbd_recv_all(tconn, buf, size);
562 if (err && !signal_pending(current))
563 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
568 * On individual connections, the socket buffer size must be set prior to the
569 * listen(2) or connect(2) calls in order to have it take effect.
570 * This is our wrapper to do so.
572 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
575 /* open coded SO_SNDBUF, SO_RCVBUF */
577 sock->sk->sk_sndbuf = snd;
578 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
581 sock->sk->sk_rcvbuf = rcv;
582 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
586 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
590 struct sockaddr_in6 src_in6;
591 struct sockaddr_in6 peer_in6;
593 int err, peer_addr_len, my_addr_len;
594 int sndbuf_size, rcvbuf_size, connect_int;
595 int disconnect_on_error = 1;
598 nc = rcu_dereference(tconn->net_conf);
603 sndbuf_size = nc->sndbuf_size;
604 rcvbuf_size = nc->rcvbuf_size;
605 connect_int = nc->connect_int;
608 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
609 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
611 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
612 src_in6.sin6_port = 0;
614 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
616 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
617 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
619 what = "sock_create_kern";
620 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
621 SOCK_STREAM, IPPROTO_TCP, &sock);
627 sock->sk->sk_rcvtimeo =
628 sock->sk->sk_sndtimeo = connect_int * HZ;
629 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
631 /* explicitly bind to the configured IP as source IP
632 * for the outgoing connections.
633 * This is needed for multihomed hosts and to be
634 * able to use lo: interfaces for drbd.
635 * Make sure to use 0 as port number, so linux selects
636 * a free one dynamically.
638 what = "bind before connect";
639 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
643 /* connect may fail, peer not yet available.
644 * stay C_WF_CONNECTION, don't go Disconnecting! */
645 disconnect_on_error = 0;
647 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
665 conn_err(tconn, "%s failed, err = %d\n", what, err);
667 if (disconnect_on_error)
668 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
674 struct accept_wait_data {
675 struct drbd_tconn *tconn;
676 struct socket *s_listen;
677 struct completion door_bell;
678 void (*original_sk_state_change)(struct sock *sk);
682 static void incomming_connection(struct sock *sk)
684 struct accept_wait_data *ad = sk->sk_user_data;
685 struct drbd_tconn *tconn = ad->tconn;
687 if (sk->sk_state != TCP_ESTABLISHED)
688 conn_warn(tconn, "unexpected tcp state change. sk_state = %d\n", sk->sk_state);
690 write_lock_bh(&sk->sk_callback_lock);
691 sk->sk_state_change = ad->original_sk_state_change;
692 sk->sk_user_data = NULL;
693 write_unlock_bh(&sk->sk_callback_lock);
695 sk->sk_state_change(sk);
696 complete(&ad->door_bell);
699 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
701 int err, sndbuf_size, rcvbuf_size, my_addr_len;
702 struct sockaddr_in6 my_addr;
703 struct socket *s_listen;
708 nc = rcu_dereference(tconn->net_conf);
713 sndbuf_size = nc->sndbuf_size;
714 rcvbuf_size = nc->rcvbuf_size;
717 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
718 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
720 what = "sock_create_kern";
721 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
722 SOCK_STREAM, IPPROTO_TCP, &s_listen);
728 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
729 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
731 what = "bind before listen";
732 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
736 ad->s_listen = s_listen;
737 write_lock_bh(&s_listen->sk->sk_callback_lock);
738 ad->original_sk_state_change = s_listen->sk->sk_state_change;
739 s_listen->sk->sk_state_change = incomming_connection;
740 s_listen->sk->sk_user_data = ad;
741 write_unlock_bh(&s_listen->sk->sk_callback_lock);
744 err = s_listen->ops->listen(s_listen, 5);
751 sock_release(s_listen);
753 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
754 conn_err(tconn, "%s failed, err = %d\n", what, err);
755 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
762 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
764 int timeo, connect_int, err = 0;
765 struct socket *s_estab = NULL;
769 nc = rcu_dereference(tconn->net_conf);
774 connect_int = nc->connect_int;
777 timeo = connect_int * HZ;
778 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
780 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
784 err = kernel_accept(ad->s_listen, &s_estab, 0);
786 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
787 conn_err(tconn, "accept failed, err = %d\n", err);
788 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
795 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
797 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
798 enum drbd_packet cmd)
800 if (!conn_prepare_command(tconn, sock))
802 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
805 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
807 unsigned int header_size = drbd_header_size(tconn);
808 struct packet_info pi;
811 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
812 if (err != header_size) {
817 err = decode_header(tconn, tconn->data.rbuf, &pi);
824 * drbd_socket_okay() - Free the socket if its connection is not okay
825 * @sock: pointer to the pointer to the socket.
827 static int drbd_socket_okay(struct socket **sock)
835 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
837 if (rr > 0 || rr == -EAGAIN) {
845 /* Gets called if a connection is established, or if a new minor gets created
847 int drbd_connected(struct drbd_conf *mdev)
851 atomic_set(&mdev->packet_seq, 0);
854 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
855 &mdev->tconn->cstate_mutex :
856 &mdev->own_state_mutex;
858 err = drbd_send_sync_param(mdev);
860 err = drbd_send_sizes(mdev, 0, 0);
862 err = drbd_send_uuids(mdev);
864 err = drbd_send_current_state(mdev);
865 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
866 clear_bit(RESIZE_PENDING, &mdev->flags);
867 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
873 * 1 yes, we have a valid connection
874 * 0 oops, did not work out, please try again
875 * -1 peer talks different language,
876 * no point in trying again, please go standalone.
877 * -2 We do not have a network config...
879 static int conn_connect(struct drbd_tconn *tconn)
881 struct drbd_socket sock, msock;
882 struct drbd_conf *mdev;
884 int vnr, timeout, h, ok;
885 bool discard_my_data;
886 enum drbd_state_rv rv;
887 struct accept_wait_data ad = {
889 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
892 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
895 mutex_init(&sock.mutex);
896 sock.sbuf = tconn->data.sbuf;
897 sock.rbuf = tconn->data.rbuf;
899 mutex_init(&msock.mutex);
900 msock.sbuf = tconn->meta.sbuf;
901 msock.rbuf = tconn->meta.rbuf;
904 clear_bit(DISCARD_CONCURRENT, &tconn->flags);
906 /* Assume that the peer only understands protocol 80 until we know better. */
907 tconn->agreed_pro_version = 80;
909 if (prepare_listen_socket(tconn, &ad))
915 s = drbd_try_connect(tconn);
919 send_first_packet(tconn, &sock, P_INITIAL_DATA);
920 } else if (!msock.socket) {
922 send_first_packet(tconn, &msock, P_INITIAL_META);
924 conn_err(tconn, "Logic error in conn_connect()\n");
925 goto out_release_sockets;
929 if (sock.socket && msock.socket) {
931 nc = rcu_dereference(tconn->net_conf);
932 timeout = nc->ping_timeo * HZ / 10;
934 schedule_timeout_interruptible(timeout);
935 ok = drbd_socket_okay(&sock.socket);
936 ok = drbd_socket_okay(&msock.socket) && ok;
942 s = drbd_wait_for_connect(tconn, &ad);
944 int fp = receive_first_packet(tconn, s);
945 drbd_socket_okay(&sock.socket);
946 drbd_socket_okay(&msock.socket);
950 conn_warn(tconn, "initial packet S crossed\n");
951 sock_release(sock.socket);
958 set_bit(DISCARD_CONCURRENT, &tconn->flags);
960 conn_warn(tconn, "initial packet M crossed\n");
961 sock_release(msock.socket);
968 conn_warn(tconn, "Error receiving initial packet\n");
976 if (tconn->cstate <= C_DISCONNECTING)
977 goto out_release_sockets;
978 if (signal_pending(current)) {
979 flush_signals(current);
981 if (get_t_state(&tconn->receiver) == EXITING)
982 goto out_release_sockets;
985 ok = drbd_socket_okay(&sock.socket);
986 ok = drbd_socket_okay(&msock.socket) && ok;
990 sock_release(ad.s_listen);
992 sock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
993 msock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
995 sock.socket->sk->sk_allocation = GFP_NOIO;
996 msock.socket->sk->sk_allocation = GFP_NOIO;
998 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
999 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
1002 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
1003 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1004 * first set it to the P_CONNECTION_FEATURES timeout,
1005 * which we set to 4x the configured ping_timeout. */
1007 nc = rcu_dereference(tconn->net_conf);
1009 sock.socket->sk->sk_sndtimeo =
1010 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1012 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1013 timeout = nc->timeout * HZ / 10;
1014 discard_my_data = nc->discard_my_data;
1017 msock.socket->sk->sk_sndtimeo = timeout;
1019 /* we don't want delays.
1020 * we use TCP_CORK where appropriate, though */
1021 drbd_tcp_nodelay(sock.socket);
1022 drbd_tcp_nodelay(msock.socket);
1024 tconn->data.socket = sock.socket;
1025 tconn->meta.socket = msock.socket;
1026 tconn->last_received = jiffies;
1028 h = drbd_do_features(tconn);
1032 if (tconn->cram_hmac_tfm) {
1033 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1034 switch (drbd_do_auth(tconn)) {
1036 conn_err(tconn, "Authentication of peer failed\n");
1039 conn_err(tconn, "Authentication of peer failed, trying again.\n");
1044 tconn->data.socket->sk->sk_sndtimeo = timeout;
1045 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1047 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1050 set_bit(STATE_SENT, &tconn->flags);
1053 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1054 kref_get(&mdev->kref);
1057 if (discard_my_data)
1058 set_bit(DISCARD_MY_DATA, &mdev->flags);
1060 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1062 drbd_connected(mdev);
1063 kref_put(&mdev->kref, &drbd_minor_destroy);
1068 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1069 if (rv < SS_SUCCESS) {
1070 clear_bit(STATE_SENT, &tconn->flags);
1074 drbd_thread_start(&tconn->asender);
1076 mutex_lock(&tconn->conf_update);
1077 /* The discard_my_data flag is a single-shot modifier to the next
1078 * connection attempt, the handshake of which is now well underway.
1079 * No need for rcu style copying of the whole struct
1080 * just to clear a single value. */
1081 tconn->net_conf->discard_my_data = 0;
1082 mutex_unlock(&tconn->conf_update);
1086 out_release_sockets:
1088 sock_release(ad.s_listen);
1090 sock_release(sock.socket);
1092 sock_release(msock.socket);
1096 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1098 unsigned int header_size = drbd_header_size(tconn);
1100 if (header_size == sizeof(struct p_header100) &&
1101 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1102 struct p_header100 *h = header;
1104 conn_err(tconn, "Header padding is not zero\n");
1107 pi->vnr = be16_to_cpu(h->volume);
1108 pi->cmd = be16_to_cpu(h->command);
1109 pi->size = be32_to_cpu(h->length);
1110 } else if (header_size == sizeof(struct p_header95) &&
1111 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1112 struct p_header95 *h = header;
1113 pi->cmd = be16_to_cpu(h->command);
1114 pi->size = be32_to_cpu(h->length);
1116 } else if (header_size == sizeof(struct p_header80) &&
1117 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1118 struct p_header80 *h = header;
1119 pi->cmd = be16_to_cpu(h->command);
1120 pi->size = be16_to_cpu(h->length);
1123 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1124 be32_to_cpu(*(__be32 *)header),
1125 tconn->agreed_pro_version);
1128 pi->data = header + header_size;
1132 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1134 void *buffer = tconn->data.rbuf;
1137 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1141 err = decode_header(tconn, buffer, pi);
1142 tconn->last_received = jiffies;
1147 static void drbd_flush(struct drbd_tconn *tconn)
1150 struct drbd_conf *mdev;
1153 if (tconn->write_ordering >= WO_bdev_flush) {
1155 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1156 if (!get_ldev(mdev))
1158 kref_get(&mdev->kref);
1161 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1164 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1165 /* would rather check on EOPNOTSUPP, but that is not reliable.
1166 * don't try again for ANY return value != 0
1167 * if (rv == -EOPNOTSUPP) */
1168 drbd_bump_write_ordering(tconn, WO_drain_io);
1171 kref_put(&mdev->kref, &drbd_minor_destroy);
1182 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1183 * @mdev: DRBD device.
1184 * @epoch: Epoch object.
1187 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1188 struct drbd_epoch *epoch,
1189 enum epoch_event ev)
1192 struct drbd_epoch *next_epoch;
1193 enum finish_epoch rv = FE_STILL_LIVE;
1195 spin_lock(&tconn->epoch_lock);
1199 epoch_size = atomic_read(&epoch->epoch_size);
1201 switch (ev & ~EV_CLEANUP) {
1203 atomic_dec(&epoch->active);
1205 case EV_GOT_BARRIER_NR:
1206 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1208 case EV_BECAME_LAST:
1213 if (epoch_size != 0 &&
1214 atomic_read(&epoch->active) == 0 &&
1215 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1216 if (!(ev & EV_CLEANUP)) {
1217 spin_unlock(&tconn->epoch_lock);
1218 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1219 spin_lock(&tconn->epoch_lock);
1222 /* FIXME: dec unacked on connection, once we have
1223 * something to count pending connection packets in. */
1224 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1225 dec_unacked(epoch->tconn);
1228 if (tconn->current_epoch != epoch) {
1229 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1230 list_del(&epoch->list);
1231 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1235 if (rv == FE_STILL_LIVE)
1239 atomic_set(&epoch->epoch_size, 0);
1240 /* atomic_set(&epoch->active, 0); is already zero */
1241 if (rv == FE_STILL_LIVE)
1252 spin_unlock(&tconn->epoch_lock);
1258 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1259 * @tconn: DRBD connection.
1260 * @wo: Write ordering method to try.
1262 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1264 struct disk_conf *dc;
1265 struct drbd_conf *mdev;
1266 enum write_ordering_e pwo;
1268 static char *write_ordering_str[] = {
1270 [WO_drain_io] = "drain",
1271 [WO_bdev_flush] = "flush",
1274 pwo = tconn->write_ordering;
1277 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1278 if (!get_ldev_if_state(mdev, D_ATTACHING))
1280 dc = rcu_dereference(mdev->ldev->disk_conf);
1282 if (wo == WO_bdev_flush && !dc->disk_flushes)
1284 if (wo == WO_drain_io && !dc->disk_drain)
1289 tconn->write_ordering = wo;
1290 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1291 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1295 * drbd_submit_peer_request()
1296 * @mdev: DRBD device.
1297 * @peer_req: peer request
1298 * @rw: flag field, see bio->bi_rw
1300 * May spread the pages to multiple bios,
1301 * depending on bio_add_page restrictions.
1303 * Returns 0 if all bios have been submitted,
1304 * -ENOMEM if we could not allocate enough bios,
1305 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1306 * single page to an empty bio (which should never happen and likely indicates
1307 * that the lower level IO stack is in some way broken). This has been observed
1308 * on certain Xen deployments.
1310 /* TODO allocate from our own bio_set. */
1311 int drbd_submit_peer_request(struct drbd_conf *mdev,
1312 struct drbd_peer_request *peer_req,
1313 const unsigned rw, const int fault_type)
1315 struct bio *bios = NULL;
1317 struct page *page = peer_req->pages;
1318 sector_t sector = peer_req->i.sector;
1319 unsigned ds = peer_req->i.size;
1320 unsigned n_bios = 0;
1321 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1324 /* In most cases, we will only need one bio. But in case the lower
1325 * level restrictions happen to be different at this offset on this
1326 * side than those of the sending peer, we may need to submit the
1327 * request in more than one bio.
1329 * Plain bio_alloc is good enough here, this is no DRBD internally
1330 * generated bio, but a bio allocated on behalf of the peer.
1333 bio = bio_alloc(GFP_NOIO, nr_pages);
1335 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1338 /* > peer_req->i.sector, unless this is the first bio */
1339 bio->bi_sector = sector;
1340 bio->bi_bdev = mdev->ldev->backing_bdev;
1342 bio->bi_private = peer_req;
1343 bio->bi_end_io = drbd_peer_request_endio;
1345 bio->bi_next = bios;
1349 page_chain_for_each(page) {
1350 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1351 if (!bio_add_page(bio, page, len, 0)) {
1352 /* A single page must always be possible!
1353 * But in case it fails anyways,
1354 * we deal with it, and complain (below). */
1355 if (bio->bi_vcnt == 0) {
1357 "bio_add_page failed for len=%u, "
1358 "bi_vcnt=0 (bi_sector=%llu)\n",
1359 len, (unsigned long long)bio->bi_sector);
1369 D_ASSERT(page == NULL);
1372 atomic_set(&peer_req->pending_bios, n_bios);
1375 bios = bios->bi_next;
1376 bio->bi_next = NULL;
1378 drbd_generic_make_request(mdev, fault_type, bio);
1385 bios = bios->bi_next;
1391 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1392 struct drbd_peer_request *peer_req)
1394 struct drbd_interval *i = &peer_req->i;
1396 drbd_remove_interval(&mdev->write_requests, i);
1397 drbd_clear_interval(i);
1399 /* Wake up any processes waiting for this peer request to complete. */
1401 wake_up(&mdev->misc_wait);
1404 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1406 struct drbd_conf *mdev;
1410 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1411 kref_get(&mdev->kref);
1413 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1414 kref_put(&mdev->kref, &drbd_minor_destroy);
1420 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1423 struct p_barrier *p = pi->data;
1424 struct drbd_epoch *epoch;
1426 /* FIXME these are unacked on connection,
1427 * not a specific (peer)device.
1429 tconn->current_epoch->barrier_nr = p->barrier;
1430 tconn->current_epoch->tconn = tconn;
1431 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1433 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1434 * the activity log, which means it would not be resynced in case the
1435 * R_PRIMARY crashes now.
1436 * Therefore we must send the barrier_ack after the barrier request was
1438 switch (tconn->write_ordering) {
1440 if (rv == FE_RECYCLED)
1443 /* receiver context, in the writeout path of the other node.
1444 * avoid potential distributed deadlock */
1445 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1449 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1454 conn_wait_active_ee_empty(tconn);
1457 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1458 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1465 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1470 atomic_set(&epoch->epoch_size, 0);
1471 atomic_set(&epoch->active, 0);
1473 spin_lock(&tconn->epoch_lock);
1474 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1475 list_add(&epoch->list, &tconn->current_epoch->list);
1476 tconn->current_epoch = epoch;
1479 /* The current_epoch got recycled while we allocated this one... */
1482 spin_unlock(&tconn->epoch_lock);
1487 /* used from receive_RSDataReply (recv_resync_read)
1488 * and from receive_Data */
1489 static struct drbd_peer_request *
1490 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1491 int data_size) __must_hold(local)
1493 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1494 struct drbd_peer_request *peer_req;
1497 void *dig_in = mdev->tconn->int_dig_in;
1498 void *dig_vv = mdev->tconn->int_dig_vv;
1499 unsigned long *data;
1502 if (mdev->tconn->peer_integrity_tfm) {
1503 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1505 * FIXME: Receive the incoming digest into the receive buffer
1506 * here, together with its struct p_data?
1508 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1514 if (!expect(IS_ALIGNED(data_size, 512)))
1516 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1519 /* even though we trust out peer,
1520 * we sometimes have to double check. */
1521 if (sector + (data_size>>9) > capacity) {
1522 dev_err(DEV, "request from peer beyond end of local disk: "
1523 "capacity: %llus < sector: %llus + size: %u\n",
1524 (unsigned long long)capacity,
1525 (unsigned long long)sector, data_size);
1529 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1530 * "criss-cross" setup, that might cause write-out on some other DRBD,
1531 * which in turn might block on the other node at this very place. */
1532 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1540 page = peer_req->pages;
1541 page_chain_for_each(page) {
1542 unsigned len = min_t(int, ds, PAGE_SIZE);
1544 err = drbd_recv_all_warn(mdev->tconn, data, len);
1545 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1546 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1547 data[0] = data[0] ^ (unsigned long)-1;
1551 drbd_free_peer_req(mdev, peer_req);
1558 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1559 if (memcmp(dig_in, dig_vv, dgs)) {
1560 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1561 (unsigned long long)sector, data_size);
1562 drbd_free_peer_req(mdev, peer_req);
1566 mdev->recv_cnt += data_size>>9;
1570 /* drbd_drain_block() just takes a data block
1571 * out of the socket input buffer, and discards it.
1573 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1582 page = drbd_alloc_pages(mdev, 1, 1);
1586 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1588 err = drbd_recv_all_warn(mdev->tconn, data, len);
1594 drbd_free_pages(mdev, page, 0);
1598 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1599 sector_t sector, int data_size)
1601 struct bio_vec *bvec;
1603 int dgs, err, i, expect;
1604 void *dig_in = mdev->tconn->int_dig_in;
1605 void *dig_vv = mdev->tconn->int_dig_vv;
1608 if (mdev->tconn->peer_integrity_tfm) {
1609 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1610 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1616 /* optimistically update recv_cnt. if receiving fails below,
1617 * we disconnect anyways, and counters will be reset. */
1618 mdev->recv_cnt += data_size>>9;
1620 bio = req->master_bio;
1621 D_ASSERT(sector == bio->bi_sector);
1623 bio_for_each_segment(bvec, bio, i) {
1624 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1625 expect = min_t(int, data_size, bvec->bv_len);
1626 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1627 kunmap(bvec->bv_page);
1630 data_size -= expect;
1634 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1635 if (memcmp(dig_in, dig_vv, dgs)) {
1636 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1641 D_ASSERT(data_size == 0);
1646 * e_end_resync_block() is called in asender context via
1647 * drbd_finish_peer_reqs().
1649 static int e_end_resync_block(struct drbd_work *w, int unused)
1651 struct drbd_peer_request *peer_req =
1652 container_of(w, struct drbd_peer_request, w);
1653 struct drbd_conf *mdev = w->mdev;
1654 sector_t sector = peer_req->i.sector;
1657 D_ASSERT(drbd_interval_empty(&peer_req->i));
1659 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1660 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1661 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1663 /* Record failure to sync */
1664 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1666 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1673 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1675 struct drbd_peer_request *peer_req;
1677 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1681 dec_rs_pending(mdev);
1684 /* corresponding dec_unacked() in e_end_resync_block()
1685 * respective _drbd_clear_done_ee */
1687 peer_req->w.cb = e_end_resync_block;
1689 spin_lock_irq(&mdev->tconn->req_lock);
1690 list_add(&peer_req->w.list, &mdev->sync_ee);
1691 spin_unlock_irq(&mdev->tconn->req_lock);
1693 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1694 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1697 /* don't care for the reason here */
1698 dev_err(DEV, "submit failed, triggering re-connect\n");
1699 spin_lock_irq(&mdev->tconn->req_lock);
1700 list_del(&peer_req->w.list);
1701 spin_unlock_irq(&mdev->tconn->req_lock);
1703 drbd_free_peer_req(mdev, peer_req);
1709 static struct drbd_request *
1710 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1711 sector_t sector, bool missing_ok, const char *func)
1713 struct drbd_request *req;
1715 /* Request object according to our peer */
1716 req = (struct drbd_request *)(unsigned long)id;
1717 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1720 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1721 (unsigned long)id, (unsigned long long)sector);
1726 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1728 struct drbd_conf *mdev;
1729 struct drbd_request *req;
1732 struct p_data *p = pi->data;
1734 mdev = vnr_to_mdev(tconn, pi->vnr);
1738 sector = be64_to_cpu(p->sector);
1740 spin_lock_irq(&mdev->tconn->req_lock);
1741 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1742 spin_unlock_irq(&mdev->tconn->req_lock);
1746 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1747 * special casing it there for the various failure cases.
1748 * still no race with drbd_fail_pending_reads */
1749 err = recv_dless_read(mdev, req, sector, pi->size);
1751 req_mod(req, DATA_RECEIVED);
1752 /* else: nothing. handled from drbd_disconnect...
1753 * I don't think we may complete this just yet
1754 * in case we are "on-disconnect: freeze" */
1759 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1761 struct drbd_conf *mdev;
1764 struct p_data *p = pi->data;
1766 mdev = vnr_to_mdev(tconn, pi->vnr);
1770 sector = be64_to_cpu(p->sector);
1771 D_ASSERT(p->block_id == ID_SYNCER);
1773 if (get_ldev(mdev)) {
1774 /* data is submitted to disk within recv_resync_read.
1775 * corresponding put_ldev done below on error,
1776 * or in drbd_peer_request_endio. */
1777 err = recv_resync_read(mdev, sector, pi->size);
1779 if (__ratelimit(&drbd_ratelimit_state))
1780 dev_err(DEV, "Can not write resync data to local disk.\n");
1782 err = drbd_drain_block(mdev, pi->size);
1784 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1787 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1792 static void restart_conflicting_writes(struct drbd_conf *mdev,
1793 sector_t sector, int size)
1795 struct drbd_interval *i;
1796 struct drbd_request *req;
1798 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1801 req = container_of(i, struct drbd_request, i);
1802 if (req->rq_state & RQ_LOCAL_PENDING ||
1803 !(req->rq_state & RQ_POSTPONED))
1805 /* as it is RQ_POSTPONED, this will cause it to
1806 * be queued on the retry workqueue. */
1807 __req_mod(req, DISCARD_WRITE, NULL);
1812 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1814 static int e_end_block(struct drbd_work *w, int cancel)
1816 struct drbd_peer_request *peer_req =
1817 container_of(w, struct drbd_peer_request, w);
1818 struct drbd_conf *mdev = w->mdev;
1819 sector_t sector = peer_req->i.sector;
1822 if (peer_req->flags & EE_SEND_WRITE_ACK) {
1823 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1824 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1825 mdev->state.conn <= C_PAUSED_SYNC_T &&
1826 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1827 P_RS_WRITE_ACK : P_WRITE_ACK;
1828 err = drbd_send_ack(mdev, pcmd, peer_req);
1829 if (pcmd == P_RS_WRITE_ACK)
1830 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1832 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1833 /* we expect it to be marked out of sync anyways...
1834 * maybe assert this? */
1838 /* we delete from the conflict detection hash _after_ we sent out the
1839 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1840 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1841 spin_lock_irq(&mdev->tconn->req_lock);
1842 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1843 drbd_remove_epoch_entry_interval(mdev, peer_req);
1844 if (peer_req->flags & EE_RESTART_REQUESTS)
1845 restart_conflicting_writes(mdev, sector, peer_req->i.size);
1846 spin_unlock_irq(&mdev->tconn->req_lock);
1848 D_ASSERT(drbd_interval_empty(&peer_req->i));
1850 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1855 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1857 struct drbd_conf *mdev = w->mdev;
1858 struct drbd_peer_request *peer_req =
1859 container_of(w, struct drbd_peer_request, w);
1862 err = drbd_send_ack(mdev, ack, peer_req);
1868 static int e_send_discard_write(struct drbd_work *w, int unused)
1870 return e_send_ack(w, P_DISCARD_WRITE);
1873 static int e_send_retry_write(struct drbd_work *w, int unused)
1875 struct drbd_tconn *tconn = w->mdev->tconn;
1877 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1878 P_RETRY_WRITE : P_DISCARD_WRITE);
1881 static bool seq_greater(u32 a, u32 b)
1884 * We assume 32-bit wrap-around here.
1885 * For 24-bit wrap-around, we would have to shift:
1888 return (s32)a - (s32)b > 0;
1891 static u32 seq_max(u32 a, u32 b)
1893 return seq_greater(a, b) ? a : b;
1896 static bool need_peer_seq(struct drbd_conf *mdev)
1898 struct drbd_tconn *tconn = mdev->tconn;
1902 * We only need to keep track of the last packet_seq number of our peer
1903 * if we are in dual-primary mode and we have the discard flag set; see
1904 * handle_write_conflicts().
1908 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1911 return tp && test_bit(DISCARD_CONCURRENT, &tconn->flags);
1914 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1916 unsigned int newest_peer_seq;
1918 if (need_peer_seq(mdev)) {
1919 spin_lock(&mdev->peer_seq_lock);
1920 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1921 mdev->peer_seq = newest_peer_seq;
1922 spin_unlock(&mdev->peer_seq_lock);
1923 /* wake up only if we actually changed mdev->peer_seq */
1924 if (peer_seq == newest_peer_seq)
1925 wake_up(&mdev->seq_wait);
1929 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1931 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1934 /* maybe change sync_ee into interval trees as well? */
1935 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1937 struct drbd_peer_request *rs_req;
1940 spin_lock_irq(&mdev->tconn->req_lock);
1941 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1942 if (overlaps(peer_req->i.sector, peer_req->i.size,
1943 rs_req->i.sector, rs_req->i.size)) {
1948 spin_unlock_irq(&mdev->tconn->req_lock);
1953 /* Called from receive_Data.
1954 * Synchronize packets on sock with packets on msock.
1956 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1957 * packet traveling on msock, they are still processed in the order they have
1960 * Note: we don't care for Ack packets overtaking P_DATA packets.
1962 * In case packet_seq is larger than mdev->peer_seq number, there are
1963 * outstanding packets on the msock. We wait for them to arrive.
1964 * In case we are the logically next packet, we update mdev->peer_seq
1965 * ourselves. Correctly handles 32bit wrap around.
1967 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1968 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1969 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1970 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1972 * returns 0 if we may process the packet,
1973 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1974 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1980 if (!need_peer_seq(mdev))
1983 spin_lock(&mdev->peer_seq_lock);
1985 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1986 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1990 if (signal_pending(current)) {
1994 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1995 spin_unlock(&mdev->peer_seq_lock);
1997 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
1999 timeout = schedule_timeout(timeout);
2000 spin_lock(&mdev->peer_seq_lock);
2003 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
2007 spin_unlock(&mdev->peer_seq_lock);
2008 finish_wait(&mdev->seq_wait, &wait);
2012 /* see also bio_flags_to_wire()
2013 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2014 * flags and back. We may replicate to other kernel versions. */
2015 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2017 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2018 (dpf & DP_FUA ? REQ_FUA : 0) |
2019 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2020 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2023 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2026 struct drbd_interval *i;
2029 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2030 struct drbd_request *req;
2031 struct bio_and_error m;
2035 req = container_of(i, struct drbd_request, i);
2036 if (!(req->rq_state & RQ_POSTPONED))
2038 req->rq_state &= ~RQ_POSTPONED;
2039 __req_mod(req, NEG_ACKED, &m);
2040 spin_unlock_irq(&mdev->tconn->req_lock);
2042 complete_master_bio(mdev, &m);
2043 spin_lock_irq(&mdev->tconn->req_lock);
2048 static int handle_write_conflicts(struct drbd_conf *mdev,
2049 struct drbd_peer_request *peer_req)
2051 struct drbd_tconn *tconn = mdev->tconn;
2052 bool resolve_conflicts = test_bit(DISCARD_CONCURRENT, &tconn->flags);
2053 sector_t sector = peer_req->i.sector;
2054 const unsigned int size = peer_req->i.size;
2055 struct drbd_interval *i;
2060 * Inserting the peer request into the write_requests tree will prevent
2061 * new conflicting local requests from being added.
2063 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2066 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2067 if (i == &peer_req->i)
2072 * Our peer has sent a conflicting remote request; this
2073 * should not happen in a two-node setup. Wait for the
2074 * earlier peer request to complete.
2076 err = drbd_wait_misc(mdev, i);
2082 equal = i->sector == sector && i->size == size;
2083 if (resolve_conflicts) {
2085 * If the peer request is fully contained within the
2086 * overlapping request, it can be discarded; otherwise,
2087 * it will be retried once all overlapping requests
2090 bool discard = i->sector <= sector && i->sector +
2091 (i->size >> 9) >= sector + (size >> 9);
2094 dev_alert(DEV, "Concurrent writes detected: "
2095 "local=%llus +%u, remote=%llus +%u, "
2096 "assuming %s came first\n",
2097 (unsigned long long)i->sector, i->size,
2098 (unsigned long long)sector, size,
2099 discard ? "local" : "remote");
2102 peer_req->w.cb = discard ? e_send_discard_write :
2104 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2105 wake_asender(mdev->tconn);
2110 struct drbd_request *req =
2111 container_of(i, struct drbd_request, i);
2114 dev_alert(DEV, "Concurrent writes detected: "
2115 "local=%llus +%u, remote=%llus +%u\n",
2116 (unsigned long long)i->sector, i->size,
2117 (unsigned long long)sector, size);
2119 if (req->rq_state & RQ_LOCAL_PENDING ||
2120 !(req->rq_state & RQ_POSTPONED)) {
2122 * Wait for the node with the discard flag to
2123 * decide if this request will be discarded or
2124 * retried. Requests that are discarded will
2125 * disappear from the write_requests tree.
2127 * In addition, wait for the conflicting
2128 * request to finish locally before submitting
2129 * the conflicting peer request.
2131 err = drbd_wait_misc(mdev, &req->i);
2133 _conn_request_state(mdev->tconn,
2134 NS(conn, C_TIMEOUT),
2136 fail_postponed_requests(mdev, sector, size);
2142 * Remember to restart the conflicting requests after
2143 * the new peer request has completed.
2145 peer_req->flags |= EE_RESTART_REQUESTS;
2152 drbd_remove_epoch_entry_interval(mdev, peer_req);
2156 /* mirrored write */
2157 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2159 struct drbd_conf *mdev;
2161 struct drbd_peer_request *peer_req;
2162 struct p_data *p = pi->data;
2163 u32 peer_seq = be32_to_cpu(p->seq_num);
2168 mdev = vnr_to_mdev(tconn, pi->vnr);
2172 if (!get_ldev(mdev)) {
2175 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2176 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2177 atomic_inc(&tconn->current_epoch->epoch_size);
2178 err2 = drbd_drain_block(mdev, pi->size);
2185 * Corresponding put_ldev done either below (on various errors), or in
2186 * drbd_peer_request_endio, if we successfully submit the data at the
2187 * end of this function.
2190 sector = be64_to_cpu(p->sector);
2191 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2197 peer_req->w.cb = e_end_block;
2199 dp_flags = be32_to_cpu(p->dp_flags);
2200 rw |= wire_flags_to_bio(mdev, dp_flags);
2201 if (peer_req->pages == NULL) {
2202 D_ASSERT(peer_req->i.size == 0);
2203 D_ASSERT(dp_flags & DP_FLUSH);
2206 if (dp_flags & DP_MAY_SET_IN_SYNC)
2207 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2209 spin_lock(&tconn->epoch_lock);
2210 peer_req->epoch = tconn->current_epoch;
2211 atomic_inc(&peer_req->epoch->epoch_size);
2212 atomic_inc(&peer_req->epoch->active);
2213 spin_unlock(&tconn->epoch_lock);
2216 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2219 peer_req->flags |= EE_IN_INTERVAL_TREE;
2220 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2222 goto out_interrupted;
2223 spin_lock_irq(&mdev->tconn->req_lock);
2224 err = handle_write_conflicts(mdev, peer_req);
2226 spin_unlock_irq(&mdev->tconn->req_lock);
2227 if (err == -ENOENT) {
2231 goto out_interrupted;
2234 spin_lock_irq(&mdev->tconn->req_lock);
2235 list_add(&peer_req->w.list, &mdev->active_ee);
2236 spin_unlock_irq(&mdev->tconn->req_lock);
2238 if (mdev->state.conn == C_SYNC_TARGET)
2239 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2241 if (mdev->tconn->agreed_pro_version < 100) {
2243 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2245 dp_flags |= DP_SEND_WRITE_ACK;
2248 dp_flags |= DP_SEND_RECEIVE_ACK;
2254 if (dp_flags & DP_SEND_WRITE_ACK) {
2255 peer_req->flags |= EE_SEND_WRITE_ACK;
2257 /* corresponding dec_unacked() in e_end_block()
2258 * respective _drbd_clear_done_ee */
2261 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2262 /* I really don't like it that the receiver thread
2263 * sends on the msock, but anyways */
2264 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2267 if (mdev->state.pdsk < D_INCONSISTENT) {
2268 /* In case we have the only disk of the cluster, */
2269 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2270 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2271 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2272 drbd_al_begin_io(mdev, &peer_req->i);
2275 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2279 /* don't care for the reason here */
2280 dev_err(DEV, "submit failed, triggering re-connect\n");
2281 spin_lock_irq(&mdev->tconn->req_lock);
2282 list_del(&peer_req->w.list);
2283 drbd_remove_epoch_entry_interval(mdev, peer_req);
2284 spin_unlock_irq(&mdev->tconn->req_lock);
2285 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2286 drbd_al_complete_io(mdev, &peer_req->i);
2289 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2291 drbd_free_peer_req(mdev, peer_req);
2295 /* We may throttle resync, if the lower device seems to be busy,
2296 * and current sync rate is above c_min_rate.
2298 * To decide whether or not the lower device is busy, we use a scheme similar
2299 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2300 * (more than 64 sectors) of activity we cannot account for with our own resync
2301 * activity, it obviously is "busy".
2303 * The current sync rate used here uses only the most recent two step marks,
2304 * to have a short time average so we can react faster.
2306 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2308 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2309 unsigned long db, dt, dbdt;
2310 struct lc_element *tmp;
2313 unsigned int c_min_rate;
2316 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2319 /* feature disabled? */
2320 if (c_min_rate == 0)
2323 spin_lock_irq(&mdev->al_lock);
2324 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2326 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2327 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2328 spin_unlock_irq(&mdev->al_lock);
2331 /* Do not slow down if app IO is already waiting for this extent */
2333 spin_unlock_irq(&mdev->al_lock);
2335 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2336 (int)part_stat_read(&disk->part0, sectors[1]) -
2337 atomic_read(&mdev->rs_sect_ev);
2339 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2340 unsigned long rs_left;
2343 mdev->rs_last_events = curr_events;
2345 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2347 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2349 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2350 rs_left = mdev->ov_left;
2352 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2354 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2357 db = mdev->rs_mark_left[i] - rs_left;
2358 dbdt = Bit2KB(db/dt);
2360 if (dbdt > c_min_rate)
2367 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2369 struct drbd_conf *mdev;
2372 struct drbd_peer_request *peer_req;
2373 struct digest_info *di = NULL;
2375 unsigned int fault_type;
2376 struct p_block_req *p = pi->data;
2378 mdev = vnr_to_mdev(tconn, pi->vnr);
2381 capacity = drbd_get_capacity(mdev->this_bdev);
2383 sector = be64_to_cpu(p->sector);
2384 size = be32_to_cpu(p->blksize);
2386 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2387 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2388 (unsigned long long)sector, size);
2391 if (sector + (size>>9) > capacity) {
2392 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2393 (unsigned long long)sector, size);
2397 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2400 case P_DATA_REQUEST:
2401 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2403 case P_RS_DATA_REQUEST:
2404 case P_CSUM_RS_REQUEST:
2406 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2410 dec_rs_pending(mdev);
2411 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2416 if (verb && __ratelimit(&drbd_ratelimit_state))
2417 dev_err(DEV, "Can not satisfy peer's read request, "
2418 "no local data.\n");
2420 /* drain possibly payload */
2421 return drbd_drain_block(mdev, pi->size);
2424 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2425 * "criss-cross" setup, that might cause write-out on some other DRBD,
2426 * which in turn might block on the other node at this very place. */
2427 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2434 case P_DATA_REQUEST:
2435 peer_req->w.cb = w_e_end_data_req;
2436 fault_type = DRBD_FAULT_DT_RD;
2437 /* application IO, don't drbd_rs_begin_io */
2440 case P_RS_DATA_REQUEST:
2441 peer_req->w.cb = w_e_end_rsdata_req;
2442 fault_type = DRBD_FAULT_RS_RD;
2443 /* used in the sector offset progress display */
2444 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2448 case P_CSUM_RS_REQUEST:
2449 fault_type = DRBD_FAULT_RS_RD;
2450 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2454 di->digest_size = pi->size;
2455 di->digest = (((char *)di)+sizeof(struct digest_info));
2457 peer_req->digest = di;
2458 peer_req->flags |= EE_HAS_DIGEST;
2460 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2463 if (pi->cmd == P_CSUM_RS_REQUEST) {
2464 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2465 peer_req->w.cb = w_e_end_csum_rs_req;
2466 /* used in the sector offset progress display */
2467 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2468 } else if (pi->cmd == P_OV_REPLY) {
2469 /* track progress, we may need to throttle */
2470 atomic_add(size >> 9, &mdev->rs_sect_in);
2471 peer_req->w.cb = w_e_end_ov_reply;
2472 dec_rs_pending(mdev);
2473 /* drbd_rs_begin_io done when we sent this request,
2474 * but accounting still needs to be done. */
2475 goto submit_for_resync;
2480 if (mdev->ov_start_sector == ~(sector_t)0 &&
2481 mdev->tconn->agreed_pro_version >= 90) {
2482 unsigned long now = jiffies;
2484 mdev->ov_start_sector = sector;
2485 mdev->ov_position = sector;
2486 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2487 mdev->rs_total = mdev->ov_left;
2488 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2489 mdev->rs_mark_left[i] = mdev->ov_left;
2490 mdev->rs_mark_time[i] = now;
2492 dev_info(DEV, "Online Verify start sector: %llu\n",
2493 (unsigned long long)sector);
2495 peer_req->w.cb = w_e_end_ov_req;
2496 fault_type = DRBD_FAULT_RS_RD;
2503 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2504 * wrt the receiver, but it is not as straightforward as it may seem.
2505 * Various places in the resync start and stop logic assume resync
2506 * requests are processed in order, requeuing this on the worker thread
2507 * introduces a bunch of new code for synchronization between threads.
2509 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2510 * "forever", throttling after drbd_rs_begin_io will lock that extent
2511 * for application writes for the same time. For now, just throttle
2512 * here, where the rest of the code expects the receiver to sleep for
2516 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2517 * this defers syncer requests for some time, before letting at least
2518 * on request through. The resync controller on the receiving side
2519 * will adapt to the incoming rate accordingly.
2521 * We cannot throttle here if remote is Primary/SyncTarget:
2522 * we would also throttle its application reads.
2523 * In that case, throttling is done on the SyncTarget only.
2525 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2526 schedule_timeout_uninterruptible(HZ/10);
2527 if (drbd_rs_begin_io(mdev, sector))
2531 atomic_add(size >> 9, &mdev->rs_sect_ev);
2535 spin_lock_irq(&mdev->tconn->req_lock);
2536 list_add_tail(&peer_req->w.list, &mdev->read_ee);
2537 spin_unlock_irq(&mdev->tconn->req_lock);
2539 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2542 /* don't care for the reason here */
2543 dev_err(DEV, "submit failed, triggering re-connect\n");
2544 spin_lock_irq(&mdev->tconn->req_lock);
2545 list_del(&peer_req->w.list);
2546 spin_unlock_irq(&mdev->tconn->req_lock);
2547 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2551 drbd_free_peer_req(mdev, peer_req);
2555 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2557 int self, peer, rv = -100;
2558 unsigned long ch_self, ch_peer;
2559 enum drbd_after_sb_p after_sb_0p;
2561 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2562 peer = mdev->p_uuid[UI_BITMAP] & 1;
2564 ch_peer = mdev->p_uuid[UI_SIZE];
2565 ch_self = mdev->comm_bm_set;
2568 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2570 switch (after_sb_0p) {
2572 case ASB_DISCARD_SECONDARY:
2573 case ASB_CALL_HELPER:
2575 dev_err(DEV, "Configuration error.\n");
2577 case ASB_DISCONNECT:
2579 case ASB_DISCARD_YOUNGER_PRI:
2580 if (self == 0 && peer == 1) {
2584 if (self == 1 && peer == 0) {
2588 /* Else fall through to one of the other strategies... */
2589 case ASB_DISCARD_OLDER_PRI:
2590 if (self == 0 && peer == 1) {
2594 if (self == 1 && peer == 0) {
2598 /* Else fall through to one of the other strategies... */
2599 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2600 "Using discard-least-changes instead\n");
2601 case ASB_DISCARD_ZERO_CHG:
2602 if (ch_peer == 0 && ch_self == 0) {
2603 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2607 if (ch_peer == 0) { rv = 1; break; }
2608 if (ch_self == 0) { rv = -1; break; }
2610 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2612 case ASB_DISCARD_LEAST_CHG:
2613 if (ch_self < ch_peer)
2615 else if (ch_self > ch_peer)
2617 else /* ( ch_self == ch_peer ) */
2618 /* Well, then use something else. */
2619 rv = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags)
2622 case ASB_DISCARD_LOCAL:
2625 case ASB_DISCARD_REMOTE:
2632 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2635 enum drbd_after_sb_p after_sb_1p;
2638 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2640 switch (after_sb_1p) {
2641 case ASB_DISCARD_YOUNGER_PRI:
2642 case ASB_DISCARD_OLDER_PRI:
2643 case ASB_DISCARD_LEAST_CHG:
2644 case ASB_DISCARD_LOCAL:
2645 case ASB_DISCARD_REMOTE:
2646 case ASB_DISCARD_ZERO_CHG:
2647 dev_err(DEV, "Configuration error.\n");
2649 case ASB_DISCONNECT:
2652 hg = drbd_asb_recover_0p(mdev);
2653 if (hg == -1 && mdev->state.role == R_SECONDARY)
2655 if (hg == 1 && mdev->state.role == R_PRIMARY)
2659 rv = drbd_asb_recover_0p(mdev);
2661 case ASB_DISCARD_SECONDARY:
2662 return mdev->state.role == R_PRIMARY ? 1 : -1;
2663 case ASB_CALL_HELPER:
2664 hg = drbd_asb_recover_0p(mdev);
2665 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2666 enum drbd_state_rv rv2;
2668 drbd_set_role(mdev, R_SECONDARY, 0);
2669 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2670 * we might be here in C_WF_REPORT_PARAMS which is transient.
2671 * we do not need to wait for the after state change work either. */
2672 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2673 if (rv2 != SS_SUCCESS) {
2674 drbd_khelper(mdev, "pri-lost-after-sb");
2676 dev_warn(DEV, "Successfully gave up primary role.\n");
2686 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2689 enum drbd_after_sb_p after_sb_2p;
2692 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2694 switch (after_sb_2p) {
2695 case ASB_DISCARD_YOUNGER_PRI:
2696 case ASB_DISCARD_OLDER_PRI:
2697 case ASB_DISCARD_LEAST_CHG:
2698 case ASB_DISCARD_LOCAL:
2699 case ASB_DISCARD_REMOTE:
2701 case ASB_DISCARD_SECONDARY:
2702 case ASB_DISCARD_ZERO_CHG:
2703 dev_err(DEV, "Configuration error.\n");
2706 rv = drbd_asb_recover_0p(mdev);
2708 case ASB_DISCONNECT:
2710 case ASB_CALL_HELPER:
2711 hg = drbd_asb_recover_0p(mdev);
2713 enum drbd_state_rv rv2;
2715 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2716 * we might be here in C_WF_REPORT_PARAMS which is transient.
2717 * we do not need to wait for the after state change work either. */
2718 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2719 if (rv2 != SS_SUCCESS) {
2720 drbd_khelper(mdev, "pri-lost-after-sb");
2722 dev_warn(DEV, "Successfully gave up primary role.\n");
2732 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2733 u64 bits, u64 flags)
2736 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2739 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2741 (unsigned long long)uuid[UI_CURRENT],
2742 (unsigned long long)uuid[UI_BITMAP],
2743 (unsigned long long)uuid[UI_HISTORY_START],
2744 (unsigned long long)uuid[UI_HISTORY_END],
2745 (unsigned long long)bits,
2746 (unsigned long long)flags);
2750 100 after split brain try auto recover
2751 2 C_SYNC_SOURCE set BitMap
2752 1 C_SYNC_SOURCE use BitMap
2754 -1 C_SYNC_TARGET use BitMap
2755 -2 C_SYNC_TARGET set BitMap
2756 -100 after split brain, disconnect
2757 -1000 unrelated data
2758 -1091 requires proto 91
2759 -1096 requires proto 96
2761 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2766 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2767 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2770 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2774 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2775 peer != UUID_JUST_CREATED)
2779 if (self != UUID_JUST_CREATED &&
2780 (peer == UUID_JUST_CREATED || peer == (u64)0))
2784 int rct, dc; /* roles at crash time */
2786 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2788 if (mdev->tconn->agreed_pro_version < 91)
2791 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2792 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2793 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2794 drbd_uuid_set_bm(mdev, 0UL);
2796 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2797 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2800 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2807 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2809 if (mdev->tconn->agreed_pro_version < 91)
2812 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2813 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2814 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2816 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2817 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2818 mdev->p_uuid[UI_BITMAP] = 0UL;
2820 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2823 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2830 /* Common power [off|failure] */
2831 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2832 (mdev->p_uuid[UI_FLAGS] & 2);
2833 /* lowest bit is set when we were primary,
2834 * next bit (weight 2) is set when peer was primary */
2838 case 0: /* !self_pri && !peer_pri */ return 0;
2839 case 1: /* self_pri && !peer_pri */ return 1;
2840 case 2: /* !self_pri && peer_pri */ return -1;
2841 case 3: /* self_pri && peer_pri */
2842 dc = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2848 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2853 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2855 if (mdev->tconn->agreed_pro_version < 96 ?
2856 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2857 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2858 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2859 /* The last P_SYNC_UUID did not get though. Undo the last start of
2860 resync as sync source modifications of the peer's UUIDs. */
2862 if (mdev->tconn->agreed_pro_version < 91)
2865 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2866 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2868 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2869 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2876 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2877 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2878 peer = mdev->p_uuid[i] & ~((u64)1);
2884 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2885 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2890 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2892 if (mdev->tconn->agreed_pro_version < 96 ?
2893 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2894 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2895 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2896 /* The last P_SYNC_UUID did not get though. Undo the last start of
2897 resync as sync source modifications of our UUIDs. */
2899 if (mdev->tconn->agreed_pro_version < 91)
2902 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2903 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2905 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2906 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2907 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2915 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2916 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2917 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2923 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2924 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2925 if (self == peer && self != ((u64)0))
2929 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2930 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2931 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2932 peer = mdev->p_uuid[j] & ~((u64)1);
2941 /* drbd_sync_handshake() returns the new conn state on success, or
2942 CONN_MASK (-1) on failure.
2944 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2945 enum drbd_disk_state peer_disk) __must_hold(local)
2947 enum drbd_conns rv = C_MASK;
2948 enum drbd_disk_state mydisk;
2949 struct net_conf *nc;
2950 int hg, rule_nr, rr_conflict, tentative;
2952 mydisk = mdev->state.disk;
2953 if (mydisk == D_NEGOTIATING)
2954 mydisk = mdev->new_state_tmp.disk;
2956 dev_info(DEV, "drbd_sync_handshake:\n");
2957 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2958 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2959 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2961 hg = drbd_uuid_compare(mdev, &rule_nr);
2963 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2966 dev_alert(DEV, "Unrelated data, aborting!\n");
2970 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2974 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2975 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2976 int f = (hg == -100) || abs(hg) == 2;
2977 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2980 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2981 hg > 0 ? "source" : "target");
2985 drbd_khelper(mdev, "initial-split-brain");
2988 nc = rcu_dereference(mdev->tconn->net_conf);
2990 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2991 int pcount = (mdev->state.role == R_PRIMARY)
2992 + (peer_role == R_PRIMARY);
2993 int forced = (hg == -100);
2997 hg = drbd_asb_recover_0p(mdev);
3000 hg = drbd_asb_recover_1p(mdev);
3003 hg = drbd_asb_recover_2p(mdev);
3006 if (abs(hg) < 100) {
3007 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3008 "automatically solved. Sync from %s node\n",
3009 pcount, (hg < 0) ? "peer" : "this");
3011 dev_warn(DEV, "Doing a full sync, since"
3012 " UUIDs where ambiguous.\n");
3019 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3021 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3025 dev_warn(DEV, "Split-Brain detected, manually solved. "
3026 "Sync from %s node\n",
3027 (hg < 0) ? "peer" : "this");
3029 rr_conflict = nc->rr_conflict;
3030 tentative = nc->tentative;
3034 /* FIXME this log message is not correct if we end up here
3035 * after an attempted attach on a diskless node.
3036 * We just refuse to attach -- well, we drop the "connection"
3037 * to that disk, in a way... */
3038 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3039 drbd_khelper(mdev, "split-brain");
3043 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3044 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3048 if (hg < 0 && /* by intention we do not use mydisk here. */
3049 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3050 switch (rr_conflict) {
3051 case ASB_CALL_HELPER:
3052 drbd_khelper(mdev, "pri-lost");
3054 case ASB_DISCONNECT:
3055 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3058 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3063 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3065 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3067 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3068 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3069 abs(hg) >= 2 ? "full" : "bit-map based");
3074 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3075 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3076 BM_LOCKED_SET_ALLOWED))
3080 if (hg > 0) { /* become sync source. */
3082 } else if (hg < 0) { /* become sync target */
3086 if (drbd_bm_total_weight(mdev)) {
3087 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3088 drbd_bm_total_weight(mdev));
3095 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3097 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3098 if (peer == ASB_DISCARD_REMOTE)
3099 return ASB_DISCARD_LOCAL;
3101 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3102 if (peer == ASB_DISCARD_LOCAL)
3103 return ASB_DISCARD_REMOTE;
3105 /* everything else is valid if they are equal on both sides. */
3109 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3111 struct p_protocol *p = pi->data;
3112 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3113 int p_proto, p_discard_my_data, p_two_primaries, cf;
3114 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3115 char integrity_alg[SHARED_SECRET_MAX] = "";
3116 struct crypto_hash *peer_integrity_tfm = NULL;
3117 void *int_dig_in = NULL, *int_dig_vv = NULL;
3119 p_proto = be32_to_cpu(p->protocol);
3120 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3121 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3122 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
3123 p_two_primaries = be32_to_cpu(p->two_primaries);
3124 cf = be32_to_cpu(p->conn_flags);
3125 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3127 if (tconn->agreed_pro_version >= 87) {
3130 if (pi->size > sizeof(integrity_alg))
3132 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3135 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3138 if (pi->cmd != P_PROTOCOL_UPDATE) {
3139 clear_bit(CONN_DRY_RUN, &tconn->flags);
3141 if (cf & CF_DRY_RUN)
3142 set_bit(CONN_DRY_RUN, &tconn->flags);
3145 nc = rcu_dereference(tconn->net_conf);
3147 if (p_proto != nc->wire_protocol) {
3148 conn_err(tconn, "incompatible %s settings\n", "protocol");
3149 goto disconnect_rcu_unlock;
3152 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3153 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3154 goto disconnect_rcu_unlock;
3157 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3158 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3159 goto disconnect_rcu_unlock;
3162 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3163 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3164 goto disconnect_rcu_unlock;
3167 if (p_discard_my_data && nc->discard_my_data) {
3168 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3169 goto disconnect_rcu_unlock;
3172 if (p_two_primaries != nc->two_primaries) {
3173 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3174 goto disconnect_rcu_unlock;
3177 if (strcmp(integrity_alg, nc->integrity_alg)) {
3178 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3179 goto disconnect_rcu_unlock;
3185 if (integrity_alg[0]) {
3189 * We can only change the peer data integrity algorithm
3190 * here. Changing our own data integrity algorithm
3191 * requires that we send a P_PROTOCOL_UPDATE packet at
3192 * the same time; otherwise, the peer has no way to
3193 * tell between which packets the algorithm should
3197 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3198 if (!peer_integrity_tfm) {
3199 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3204 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3205 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3206 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3207 if (!(int_dig_in && int_dig_vv)) {
3208 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3213 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3214 if (!new_net_conf) {
3215 conn_err(tconn, "Allocation of new net_conf failed\n");
3219 mutex_lock(&tconn->data.mutex);
3220 mutex_lock(&tconn->conf_update);
3221 old_net_conf = tconn->net_conf;
3222 *new_net_conf = *old_net_conf;
3224 new_net_conf->wire_protocol = p_proto;
3225 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3226 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3227 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3228 new_net_conf->two_primaries = p_two_primaries;
3230 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3231 mutex_unlock(&tconn->conf_update);
3232 mutex_unlock(&tconn->data.mutex);
3234 crypto_free_hash(tconn->peer_integrity_tfm);
3235 kfree(tconn->int_dig_in);
3236 kfree(tconn->int_dig_vv);
3237 tconn->peer_integrity_tfm = peer_integrity_tfm;
3238 tconn->int_dig_in = int_dig_in;
3239 tconn->int_dig_vv = int_dig_vv;
3241 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3242 conn_info(tconn, "peer data-integrity-alg: %s\n",
3243 integrity_alg[0] ? integrity_alg : "(none)");
3246 kfree(old_net_conf);
3249 disconnect_rcu_unlock:
3252 crypto_free_hash(peer_integrity_tfm);
3255 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3260 * input: alg name, feature name
3261 * return: NULL (alg name was "")
3262 * ERR_PTR(error) if something goes wrong
3263 * or the crypto hash ptr, if it worked out ok. */
3264 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3265 const char *alg, const char *name)
3267 struct crypto_hash *tfm;
3272 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3274 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3275 alg, name, PTR_ERR(tfm));
3281 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3283 void *buffer = tconn->data.rbuf;
3284 int size = pi->size;
3287 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3288 s = drbd_recv(tconn, buffer, s);
3302 * config_unknown_volume - device configuration command for unknown volume
3304 * When a device is added to an existing connection, the node on which the
3305 * device is added first will send configuration commands to its peer but the
3306 * peer will not know about the device yet. It will warn and ignore these
3307 * commands. Once the device is added on the second node, the second node will
3308 * send the same device configuration commands, but in the other direction.
3310 * (We can also end up here if drbd is misconfigured.)
3312 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3314 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3315 cmdname(pi->cmd), pi->vnr);
3316 return ignore_remaining_packet(tconn, pi);
3319 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3321 struct drbd_conf *mdev;
3322 struct p_rs_param_95 *p;
3323 unsigned int header_size, data_size, exp_max_sz;
3324 struct crypto_hash *verify_tfm = NULL;
3325 struct crypto_hash *csums_tfm = NULL;
3326 struct net_conf *old_net_conf, *new_net_conf = NULL;
3327 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3328 const int apv = tconn->agreed_pro_version;
3329 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3333 mdev = vnr_to_mdev(tconn, pi->vnr);
3335 return config_unknown_volume(tconn, pi);
3337 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3338 : apv == 88 ? sizeof(struct p_rs_param)
3340 : apv <= 94 ? sizeof(struct p_rs_param_89)
3341 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3343 if (pi->size > exp_max_sz) {
3344 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3345 pi->size, exp_max_sz);
3350 header_size = sizeof(struct p_rs_param);
3351 data_size = pi->size - header_size;
3352 } else if (apv <= 94) {
3353 header_size = sizeof(struct p_rs_param_89);
3354 data_size = pi->size - header_size;
3355 D_ASSERT(data_size == 0);
3357 header_size = sizeof(struct p_rs_param_95);
3358 data_size = pi->size - header_size;
3359 D_ASSERT(data_size == 0);
3362 /* initialize verify_alg and csums_alg */
3364 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3366 err = drbd_recv_all(mdev->tconn, p, header_size);
3370 mutex_lock(&mdev->tconn->conf_update);
3371 old_net_conf = mdev->tconn->net_conf;
3372 if (get_ldev(mdev)) {
3373 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3374 if (!new_disk_conf) {
3376 mutex_unlock(&mdev->tconn->conf_update);
3377 dev_err(DEV, "Allocation of new disk_conf failed\n");
3381 old_disk_conf = mdev->ldev->disk_conf;
3382 *new_disk_conf = *old_disk_conf;
3384 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3389 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3390 dev_err(DEV, "verify-alg of wrong size, "
3391 "peer wants %u, accepting only up to %u byte\n",
3392 data_size, SHARED_SECRET_MAX);
3397 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3400 /* we expect NUL terminated string */
3401 /* but just in case someone tries to be evil */
3402 D_ASSERT(p->verify_alg[data_size-1] == 0);
3403 p->verify_alg[data_size-1] = 0;
3405 } else /* apv >= 89 */ {
3406 /* we still expect NUL terminated strings */
3407 /* but just in case someone tries to be evil */
3408 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3409 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3410 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3411 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3414 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3415 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3416 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3417 old_net_conf->verify_alg, p->verify_alg);
3420 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3421 p->verify_alg, "verify-alg");
3422 if (IS_ERR(verify_tfm)) {
3428 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3429 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3430 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3431 old_net_conf->csums_alg, p->csums_alg);
3434 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3435 p->csums_alg, "csums-alg");
3436 if (IS_ERR(csums_tfm)) {
3442 if (apv > 94 && new_disk_conf) {
3443 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3444 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3445 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3446 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3448 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3449 if (fifo_size != mdev->rs_plan_s->size) {
3450 new_plan = fifo_alloc(fifo_size);
3452 dev_err(DEV, "kmalloc of fifo_buffer failed");
3459 if (verify_tfm || csums_tfm) {
3460 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3461 if (!new_net_conf) {
3462 dev_err(DEV, "Allocation of new net_conf failed\n");
3466 *new_net_conf = *old_net_conf;
3469 strcpy(new_net_conf->verify_alg, p->verify_alg);
3470 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3471 crypto_free_hash(mdev->tconn->verify_tfm);
3472 mdev->tconn->verify_tfm = verify_tfm;
3473 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3476 strcpy(new_net_conf->csums_alg, p->csums_alg);
3477 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3478 crypto_free_hash(mdev->tconn->csums_tfm);
3479 mdev->tconn->csums_tfm = csums_tfm;
3480 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3482 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3486 if (new_disk_conf) {
3487 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3492 old_plan = mdev->rs_plan_s;
3493 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3496 mutex_unlock(&mdev->tconn->conf_update);
3499 kfree(old_net_conf);
3500 kfree(old_disk_conf);
3506 if (new_disk_conf) {
3508 kfree(new_disk_conf);
3510 mutex_unlock(&mdev->tconn->conf_update);
3515 if (new_disk_conf) {
3517 kfree(new_disk_conf);
3519 mutex_unlock(&mdev->tconn->conf_update);
3520 /* just for completeness: actually not needed,
3521 * as this is not reached if csums_tfm was ok. */
3522 crypto_free_hash(csums_tfm);
3523 /* but free the verify_tfm again, if csums_tfm did not work out */
3524 crypto_free_hash(verify_tfm);
3525 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3529 /* warn if the arguments differ by more than 12.5% */
3530 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3531 const char *s, sector_t a, sector_t b)
3534 if (a == 0 || b == 0)
3536 d = (a > b) ? (a - b) : (b - a);
3537 if (d > (a>>3) || d > (b>>3))
3538 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3539 (unsigned long long)a, (unsigned long long)b);
3542 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3544 struct drbd_conf *mdev;
3545 struct p_sizes *p = pi->data;
3546 enum determine_dev_size dd = unchanged;
3547 sector_t p_size, p_usize, my_usize;
3548 int ldsc = 0; /* local disk size changed */
3549 enum dds_flags ddsf;
3551 mdev = vnr_to_mdev(tconn, pi->vnr);
3553 return config_unknown_volume(tconn, pi);
3555 p_size = be64_to_cpu(p->d_size);
3556 p_usize = be64_to_cpu(p->u_size);
3558 /* just store the peer's disk size for now.
3559 * we still need to figure out whether we accept that. */
3560 mdev->p_size = p_size;
3562 if (get_ldev(mdev)) {
3564 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3567 warn_if_differ_considerably(mdev, "lower level device sizes",
3568 p_size, drbd_get_max_capacity(mdev->ldev));
3569 warn_if_differ_considerably(mdev, "user requested size",
3572 /* if this is the first connect, or an otherwise expected
3573 * param exchange, choose the minimum */
3574 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3575 p_usize = min_not_zero(my_usize, p_usize);
3577 /* Never shrink a device with usable data during connect.
3578 But allow online shrinking if we are connected. */
3579 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3580 drbd_get_capacity(mdev->this_bdev) &&
3581 mdev->state.disk >= D_OUTDATED &&
3582 mdev->state.conn < C_CONNECTED) {
3583 dev_err(DEV, "The peer's disk size is too small!\n");
3584 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3589 if (my_usize != p_usize) {
3590 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3592 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3593 if (!new_disk_conf) {
3594 dev_err(DEV, "Allocation of new disk_conf failed\n");
3599 mutex_lock(&mdev->tconn->conf_update);
3600 old_disk_conf = mdev->ldev->disk_conf;
3601 *new_disk_conf = *old_disk_conf;
3602 new_disk_conf->disk_size = p_usize;
3604 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3605 mutex_unlock(&mdev->tconn->conf_update);
3607 kfree(old_disk_conf);
3609 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3610 (unsigned long)my_usize);
3616 ddsf = be16_to_cpu(p->dds_flags);
3617 if (get_ldev(mdev)) {
3618 dd = drbd_determine_dev_size(mdev, ddsf);
3620 if (dd == dev_size_error)
3624 /* I am diskless, need to accept the peer's size. */
3625 drbd_set_my_capacity(mdev, p_size);
3628 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3629 drbd_reconsider_max_bio_size(mdev);
3631 if (get_ldev(mdev)) {
3632 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3633 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3640 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3641 if (be64_to_cpu(p->c_size) !=
3642 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3643 /* we have different sizes, probably peer
3644 * needs to know my new size... */
3645 drbd_send_sizes(mdev, 0, ddsf);
3647 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3648 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3649 if (mdev->state.pdsk >= D_INCONSISTENT &&
3650 mdev->state.disk >= D_INCONSISTENT) {
3651 if (ddsf & DDSF_NO_RESYNC)
3652 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3654 resync_after_online_grow(mdev);
3656 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3663 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3665 struct drbd_conf *mdev;
3666 struct p_uuids *p = pi->data;
3668 int i, updated_uuids = 0;
3670 mdev = vnr_to_mdev(tconn, pi->vnr);
3672 return config_unknown_volume(tconn, pi);
3674 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3676 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3677 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3679 kfree(mdev->p_uuid);
3680 mdev->p_uuid = p_uuid;
3682 if (mdev->state.conn < C_CONNECTED &&
3683 mdev->state.disk < D_INCONSISTENT &&
3684 mdev->state.role == R_PRIMARY &&
3685 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3686 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3687 (unsigned long long)mdev->ed_uuid);
3688 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3692 if (get_ldev(mdev)) {
3693 int skip_initial_sync =
3694 mdev->state.conn == C_CONNECTED &&
3695 mdev->tconn->agreed_pro_version >= 90 &&
3696 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3697 (p_uuid[UI_FLAGS] & 8);
3698 if (skip_initial_sync) {
3699 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3700 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3701 "clear_n_write from receive_uuids",
3702 BM_LOCKED_TEST_ALLOWED);
3703 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3704 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3705 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3711 } else if (mdev->state.disk < D_INCONSISTENT &&
3712 mdev->state.role == R_PRIMARY) {
3713 /* I am a diskless primary, the peer just created a new current UUID
3715 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3718 /* Before we test for the disk state, we should wait until an eventually
3719 ongoing cluster wide state change is finished. That is important if
3720 we are primary and are detaching from our disk. We need to see the
3721 new disk state... */
3722 mutex_lock(mdev->state_mutex);
3723 mutex_unlock(mdev->state_mutex);
3724 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3725 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3728 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3734 * convert_state() - Converts the peer's view of the cluster state to our point of view
3735 * @ps: The state as seen by the peer.
3737 static union drbd_state convert_state(union drbd_state ps)
3739 union drbd_state ms;
3741 static enum drbd_conns c_tab[] = {
3742 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3743 [C_CONNECTED] = C_CONNECTED,
3745 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3746 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3747 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3748 [C_VERIFY_S] = C_VERIFY_T,
3754 ms.conn = c_tab[ps.conn];
3759 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3764 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3766 struct drbd_conf *mdev;
3767 struct p_req_state *p = pi->data;
3768 union drbd_state mask, val;
3769 enum drbd_state_rv rv;
3771 mdev = vnr_to_mdev(tconn, pi->vnr);
3775 mask.i = be32_to_cpu(p->mask);
3776 val.i = be32_to_cpu(p->val);
3778 if (test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags) &&
3779 mutex_is_locked(mdev->state_mutex)) {
3780 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3784 mask = convert_state(mask);
3785 val = convert_state(val);
3787 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3788 drbd_send_sr_reply(mdev, rv);
3795 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3797 struct p_req_state *p = pi->data;
3798 union drbd_state mask, val;
3799 enum drbd_state_rv rv;
3801 mask.i = be32_to_cpu(p->mask);
3802 val.i = be32_to_cpu(p->val);
3804 if (test_bit(DISCARD_CONCURRENT, &tconn->flags) &&
3805 mutex_is_locked(&tconn->cstate_mutex)) {
3806 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3810 mask = convert_state(mask);
3811 val = convert_state(val);
3813 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3814 conn_send_sr_reply(tconn, rv);
3819 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3821 struct drbd_conf *mdev;
3822 struct p_state *p = pi->data;
3823 union drbd_state os, ns, peer_state;
3824 enum drbd_disk_state real_peer_disk;
3825 enum chg_state_flags cs_flags;
3828 mdev = vnr_to_mdev(tconn, pi->vnr);
3830 return config_unknown_volume(tconn, pi);
3832 peer_state.i = be32_to_cpu(p->state);
3834 real_peer_disk = peer_state.disk;
3835 if (peer_state.disk == D_NEGOTIATING) {
3836 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3837 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3840 spin_lock_irq(&mdev->tconn->req_lock);
3842 os = ns = drbd_read_state(mdev);
3843 spin_unlock_irq(&mdev->tconn->req_lock);
3845 /* If some other part of the code (asender thread, timeout)
3846 * already decided to close the connection again,
3847 * we must not "re-establish" it here. */
3848 if (os.conn <= C_TEAR_DOWN)
3851 /* If this is the "end of sync" confirmation, usually the peer disk
3852 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3853 * set) resync started in PausedSyncT, or if the timing of pause-/
3854 * unpause-sync events has been "just right", the peer disk may
3855 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3857 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3858 real_peer_disk == D_UP_TO_DATE &&
3859 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3860 /* If we are (becoming) SyncSource, but peer is still in sync
3861 * preparation, ignore its uptodate-ness to avoid flapping, it
3862 * will change to inconsistent once the peer reaches active
3864 * It may have changed syncer-paused flags, however, so we
3865 * cannot ignore this completely. */
3866 if (peer_state.conn > C_CONNECTED &&
3867 peer_state.conn < C_SYNC_SOURCE)
3868 real_peer_disk = D_INCONSISTENT;
3870 /* if peer_state changes to connected at the same time,
3871 * it explicitly notifies us that it finished resync.
3872 * Maybe we should finish it up, too? */
3873 else if (os.conn >= C_SYNC_SOURCE &&
3874 peer_state.conn == C_CONNECTED) {
3875 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3876 drbd_resync_finished(mdev);
3881 /* explicit verify finished notification, stop sector reached. */
3882 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3883 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3884 ov_out_of_sync_print(mdev);
3885 drbd_resync_finished(mdev);
3889 /* peer says his disk is inconsistent, while we think it is uptodate,
3890 * and this happens while the peer still thinks we have a sync going on,
3891 * but we think we are already done with the sync.
3892 * We ignore this to avoid flapping pdsk.
3893 * This should not happen, if the peer is a recent version of drbd. */
3894 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3895 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3896 real_peer_disk = D_UP_TO_DATE;
3898 if (ns.conn == C_WF_REPORT_PARAMS)
3899 ns.conn = C_CONNECTED;
3901 if (peer_state.conn == C_AHEAD)
3904 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3905 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3906 int cr; /* consider resync */
3908 /* if we established a new connection */
3909 cr = (os.conn < C_CONNECTED);
3910 /* if we had an established connection
3911 * and one of the nodes newly attaches a disk */
3912 cr |= (os.conn == C_CONNECTED &&
3913 (peer_state.disk == D_NEGOTIATING ||
3914 os.disk == D_NEGOTIATING));
3915 /* if we have both been inconsistent, and the peer has been
3916 * forced to be UpToDate with --overwrite-data */
3917 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3918 /* if we had been plain connected, and the admin requested to
3919 * start a sync by "invalidate" or "invalidate-remote" */
3920 cr |= (os.conn == C_CONNECTED &&
3921 (peer_state.conn >= C_STARTING_SYNC_S &&
3922 peer_state.conn <= C_WF_BITMAP_T));
3925 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3928 if (ns.conn == C_MASK) {
3929 ns.conn = C_CONNECTED;
3930 if (mdev->state.disk == D_NEGOTIATING) {
3931 drbd_force_state(mdev, NS(disk, D_FAILED));
3932 } else if (peer_state.disk == D_NEGOTIATING) {
3933 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3934 peer_state.disk = D_DISKLESS;
3935 real_peer_disk = D_DISKLESS;
3937 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3939 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3940 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3946 spin_lock_irq(&mdev->tconn->req_lock);
3947 if (os.i != drbd_read_state(mdev).i)
3949 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3950 ns.peer = peer_state.role;
3951 ns.pdsk = real_peer_disk;
3952 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3953 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3954 ns.disk = mdev->new_state_tmp.disk;
3955 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3956 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3957 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3958 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3959 for temporal network outages! */
3960 spin_unlock_irq(&mdev->tconn->req_lock);
3961 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3962 tl_clear(mdev->tconn);
3963 drbd_uuid_new_current(mdev);
3964 clear_bit(NEW_CUR_UUID, &mdev->flags);
3965 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3968 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3969 ns = drbd_read_state(mdev);
3970 spin_unlock_irq(&mdev->tconn->req_lock);
3972 if (rv < SS_SUCCESS) {
3973 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3977 if (os.conn > C_WF_REPORT_PARAMS) {
3978 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3979 peer_state.disk != D_NEGOTIATING ) {
3980 /* we want resync, peer has not yet decided to sync... */
3981 /* Nowadays only used when forcing a node into primary role and
3982 setting its disk to UpToDate with that */
3983 drbd_send_uuids(mdev);
3984 drbd_send_current_state(mdev);
3988 clear_bit(DISCARD_MY_DATA, &mdev->flags);
3990 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3995 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
3997 struct drbd_conf *mdev;
3998 struct p_rs_uuid *p = pi->data;
4000 mdev = vnr_to_mdev(tconn, pi->vnr);
4004 wait_event(mdev->misc_wait,
4005 mdev->state.conn == C_WF_SYNC_UUID ||
4006 mdev->state.conn == C_BEHIND ||
4007 mdev->state.conn < C_CONNECTED ||
4008 mdev->state.disk < D_NEGOTIATING);
4010 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4012 /* Here the _drbd_uuid_ functions are right, current should
4013 _not_ be rotated into the history */
4014 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4015 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4016 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4018 drbd_print_uuids(mdev, "updated sync uuid");
4019 drbd_start_resync(mdev, C_SYNC_TARGET);
4023 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4029 * receive_bitmap_plain
4031 * Return 0 when done, 1 when another iteration is needed, and a negative error
4032 * code upon failure.
4035 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4036 unsigned long *p, struct bm_xfer_ctx *c)
4038 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4039 drbd_header_size(mdev->tconn);
4040 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4041 c->bm_words - c->word_offset);
4042 unsigned int want = num_words * sizeof(*p);
4046 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4051 err = drbd_recv_all(mdev->tconn, p, want);
4055 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4057 c->word_offset += num_words;
4058 c->bit_offset = c->word_offset * BITS_PER_LONG;
4059 if (c->bit_offset > c->bm_bits)
4060 c->bit_offset = c->bm_bits;
4065 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4067 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4070 static int dcbp_get_start(struct p_compressed_bm *p)
4072 return (p->encoding & 0x80) != 0;
4075 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4077 return (p->encoding >> 4) & 0x7;
4083 * Return 0 when done, 1 when another iteration is needed, and a negative error
4084 * code upon failure.
4087 recv_bm_rle_bits(struct drbd_conf *mdev,
4088 struct p_compressed_bm *p,
4089 struct bm_xfer_ctx *c,
4092 struct bitstream bs;
4096 unsigned long s = c->bit_offset;
4098 int toggle = dcbp_get_start(p);
4102 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4104 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4108 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4109 bits = vli_decode_bits(&rl, look_ahead);
4115 if (e >= c->bm_bits) {
4116 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4119 _drbd_bm_set_bits(mdev, s, e);
4123 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4124 have, bits, look_ahead,
4125 (unsigned int)(bs.cur.b - p->code),
4126 (unsigned int)bs.buf_len);
4129 look_ahead >>= bits;
4132 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4135 look_ahead |= tmp << have;
4140 bm_xfer_ctx_bit_to_word_offset(c);
4142 return (s != c->bm_bits);
4148 * Return 0 when done, 1 when another iteration is needed, and a negative error
4149 * code upon failure.
4152 decode_bitmap_c(struct drbd_conf *mdev,
4153 struct p_compressed_bm *p,
4154 struct bm_xfer_ctx *c,
4157 if (dcbp_get_code(p) == RLE_VLI_Bits)
4158 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4160 /* other variants had been implemented for evaluation,
4161 * but have been dropped as this one turned out to be "best"
4162 * during all our tests. */
4164 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4165 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4169 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4170 const char *direction, struct bm_xfer_ctx *c)
4172 /* what would it take to transfer it "plaintext" */
4173 unsigned int header_size = drbd_header_size(mdev->tconn);
4174 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4175 unsigned int plain =
4176 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4177 c->bm_words * sizeof(unsigned long);
4178 unsigned int total = c->bytes[0] + c->bytes[1];
4181 /* total can not be zero. but just in case: */
4185 /* don't report if not compressed */
4189 /* total < plain. check for overflow, still */
4190 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4191 : (1000 * total / plain);
4197 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4198 "total %u; compression: %u.%u%%\n",
4200 c->bytes[1], c->packets[1],
4201 c->bytes[0], c->packets[0],
4202 total, r/10, r % 10);
4205 /* Since we are processing the bitfield from lower addresses to higher,
4206 it does not matter if the process it in 32 bit chunks or 64 bit
4207 chunks as long as it is little endian. (Understand it as byte stream,
4208 beginning with the lowest byte...) If we would use big endian
4209 we would need to process it from the highest address to the lowest,
4210 in order to be agnostic to the 32 vs 64 bits issue.
4212 returns 0 on failure, 1 if we successfully received it. */
4213 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4215 struct drbd_conf *mdev;
4216 struct bm_xfer_ctx c;
4219 mdev = vnr_to_mdev(tconn, pi->vnr);
4223 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4224 /* you are supposed to send additional out-of-sync information
4225 * if you actually set bits during this phase */
4227 c = (struct bm_xfer_ctx) {
4228 .bm_bits = drbd_bm_bits(mdev),
4229 .bm_words = drbd_bm_words(mdev),
4233 if (pi->cmd == P_BITMAP)
4234 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4235 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4236 /* MAYBE: sanity check that we speak proto >= 90,
4237 * and the feature is enabled! */
4238 struct p_compressed_bm *p = pi->data;
4240 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4241 dev_err(DEV, "ReportCBitmap packet too large\n");
4245 if (pi->size <= sizeof(*p)) {
4246 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4250 err = drbd_recv_all(mdev->tconn, p, pi->size);
4253 err = decode_bitmap_c(mdev, p, &c, pi->size);
4255 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4260 c.packets[pi->cmd == P_BITMAP]++;
4261 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4268 err = drbd_recv_header(mdev->tconn, pi);
4273 INFO_bm_xfer_stats(mdev, "receive", &c);
4275 if (mdev->state.conn == C_WF_BITMAP_T) {
4276 enum drbd_state_rv rv;
4278 err = drbd_send_bitmap(mdev);
4281 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4282 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4283 D_ASSERT(rv == SS_SUCCESS);
4284 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4285 /* admin may have requested C_DISCONNECTING,
4286 * other threads may have noticed network errors */
4287 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4288 drbd_conn_str(mdev->state.conn));
4293 drbd_bm_unlock(mdev);
4294 if (!err && mdev->state.conn == C_WF_BITMAP_S)
4295 drbd_start_resync(mdev, C_SYNC_SOURCE);
4299 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4301 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4304 return ignore_remaining_packet(tconn, pi);
4307 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4309 /* Make sure we've acked all the TCP data associated
4310 * with the data requests being unplugged */
4311 drbd_tcp_quickack(tconn->data.socket);
4316 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4318 struct drbd_conf *mdev;
4319 struct p_block_desc *p = pi->data;
4321 mdev = vnr_to_mdev(tconn, pi->vnr);
4325 switch (mdev->state.conn) {
4326 case C_WF_SYNC_UUID:
4331 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4332 drbd_conn_str(mdev->state.conn));
4335 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4343 int (*fn)(struct drbd_tconn *, struct packet_info *);
4346 static struct data_cmd drbd_cmd_handler[] = {
4347 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4348 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4349 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4350 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4351 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4352 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4353 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
4354 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4355 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4356 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4357 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
4358 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4359 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4360 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4361 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4362 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4363 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4364 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4365 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4366 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4367 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4368 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4369 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4370 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4373 static void drbdd(struct drbd_tconn *tconn)
4375 struct packet_info pi;
4376 size_t shs; /* sub header size */
4379 while (get_t_state(&tconn->receiver) == RUNNING) {
4380 struct data_cmd *cmd;
4382 drbd_thread_current_set_cpu(&tconn->receiver);
4383 if (drbd_recv_header(tconn, &pi))
4386 cmd = &drbd_cmd_handler[pi.cmd];
4387 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4388 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4389 cmdname(pi.cmd), pi.cmd);
4393 shs = cmd->pkt_size;
4394 if (pi.size > shs && !cmd->expect_payload) {
4395 conn_err(tconn, "No payload expected %s l:%d\n",
4396 cmdname(pi.cmd), pi.size);
4401 err = drbd_recv_all_warn(tconn, pi.data, shs);
4407 err = cmd->fn(tconn, &pi);
4409 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4410 cmdname(pi.cmd), err, pi.size);
4417 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4420 void conn_flush_workqueue(struct drbd_tconn *tconn)
4422 struct drbd_wq_barrier barr;
4424 barr.w.cb = w_prev_work_done;
4425 barr.w.tconn = tconn;
4426 init_completion(&barr.done);
4427 drbd_queue_work(&tconn->sender_work, &barr.w);
4428 wait_for_completion(&barr.done);
4431 static void conn_disconnect(struct drbd_tconn *tconn)
4433 struct drbd_conf *mdev;
4437 if (tconn->cstate == C_STANDALONE)
4440 /* We are about to start the cleanup after connection loss.
4441 * Make sure drbd_make_request knows about that.
4442 * Usually we should be in some network failure state already,
4443 * but just in case we are not, we fix it up here.
4445 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4447 /* asender does not clean up anything. it must not interfere, either */
4448 drbd_thread_stop(&tconn->asender);
4449 drbd_free_sock(tconn);
4452 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4453 kref_get(&mdev->kref);
4455 drbd_disconnected(mdev);
4456 kref_put(&mdev->kref, &drbd_minor_destroy);
4461 if (!list_empty(&tconn->current_epoch->list))
4462 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4463 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4464 atomic_set(&tconn->current_epoch->epoch_size, 0);
4465 tconn->send.seen_any_write_yet = false;
4467 conn_info(tconn, "Connection closed\n");
4469 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4470 conn_try_outdate_peer_async(tconn);
4472 spin_lock_irq(&tconn->req_lock);
4474 if (oc >= C_UNCONNECTED)
4475 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4477 spin_unlock_irq(&tconn->req_lock);
4479 if (oc == C_DISCONNECTING)
4480 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4483 static int drbd_disconnected(struct drbd_conf *mdev)
4487 /* wait for current activity to cease. */
4488 spin_lock_irq(&mdev->tconn->req_lock);
4489 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4490 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4491 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4492 spin_unlock_irq(&mdev->tconn->req_lock);
4494 /* We do not have data structures that would allow us to
4495 * get the rs_pending_cnt down to 0 again.
4496 * * On C_SYNC_TARGET we do not have any data structures describing
4497 * the pending RSDataRequest's we have sent.
4498 * * On C_SYNC_SOURCE there is no data structure that tracks
4499 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4500 * And no, it is not the sum of the reference counts in the
4501 * resync_LRU. The resync_LRU tracks the whole operation including
4502 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4504 drbd_rs_cancel_all(mdev);
4506 mdev->rs_failed = 0;
4507 atomic_set(&mdev->rs_pending_cnt, 0);
4508 wake_up(&mdev->misc_wait);
4510 del_timer_sync(&mdev->resync_timer);
4511 resync_timer_fn((unsigned long)mdev);
4513 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4514 * w_make_resync_request etc. which may still be on the worker queue
4515 * to be "canceled" */
4516 drbd_flush_workqueue(mdev);
4518 drbd_finish_peer_reqs(mdev);
4520 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4521 might have issued a work again. The one before drbd_finish_peer_reqs() is
4522 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4523 drbd_flush_workqueue(mdev);
4525 kfree(mdev->p_uuid);
4526 mdev->p_uuid = NULL;
4528 if (!drbd_suspended(mdev))
4529 tl_clear(mdev->tconn);
4533 /* serialize with bitmap writeout triggered by the state change,
4535 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4537 /* tcp_close and release of sendpage pages can be deferred. I don't
4538 * want to use SO_LINGER, because apparently it can be deferred for
4539 * more than 20 seconds (longest time I checked).
4541 * Actually we don't care for exactly when the network stack does its
4542 * put_page(), but release our reference on these pages right here.
4544 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4546 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4547 i = atomic_read(&mdev->pp_in_use_by_net);
4549 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4550 i = atomic_read(&mdev->pp_in_use);
4552 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4554 D_ASSERT(list_empty(&mdev->read_ee));
4555 D_ASSERT(list_empty(&mdev->active_ee));
4556 D_ASSERT(list_empty(&mdev->sync_ee));
4557 D_ASSERT(list_empty(&mdev->done_ee));
4563 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4564 * we can agree on is stored in agreed_pro_version.
4566 * feature flags and the reserved array should be enough room for future
4567 * enhancements of the handshake protocol, and possible plugins...
4569 * for now, they are expected to be zero, but ignored.
4571 static int drbd_send_features(struct drbd_tconn *tconn)
4573 struct drbd_socket *sock;
4574 struct p_connection_features *p;
4576 sock = &tconn->data;
4577 p = conn_prepare_command(tconn, sock);
4580 memset(p, 0, sizeof(*p));
4581 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4582 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4583 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4588 * 1 yes, we have a valid connection
4589 * 0 oops, did not work out, please try again
4590 * -1 peer talks different language,
4591 * no point in trying again, please go standalone.
4593 static int drbd_do_features(struct drbd_tconn *tconn)
4595 /* ASSERT current == tconn->receiver ... */
4596 struct p_connection_features *p;
4597 const int expect = sizeof(struct p_connection_features);
4598 struct packet_info pi;
4601 err = drbd_send_features(tconn);
4605 err = drbd_recv_header(tconn, &pi);
4609 if (pi.cmd != P_CONNECTION_FEATURES) {
4610 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4611 cmdname(pi.cmd), pi.cmd);
4615 if (pi.size != expect) {
4616 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4622 err = drbd_recv_all_warn(tconn, p, expect);
4626 p->protocol_min = be32_to_cpu(p->protocol_min);
4627 p->protocol_max = be32_to_cpu(p->protocol_max);
4628 if (p->protocol_max == 0)
4629 p->protocol_max = p->protocol_min;
4631 if (PRO_VERSION_MAX < p->protocol_min ||
4632 PRO_VERSION_MIN > p->protocol_max)
4635 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4637 conn_info(tconn, "Handshake successful: "
4638 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4643 conn_err(tconn, "incompatible DRBD dialects: "
4644 "I support %d-%d, peer supports %d-%d\n",
4645 PRO_VERSION_MIN, PRO_VERSION_MAX,
4646 p->protocol_min, p->protocol_max);
4650 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4651 static int drbd_do_auth(struct drbd_tconn *tconn)
4653 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4654 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4658 #define CHALLENGE_LEN 64
4662 0 - failed, try again (network error),
4663 -1 - auth failed, don't try again.
4666 static int drbd_do_auth(struct drbd_tconn *tconn)
4668 struct drbd_socket *sock;
4669 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4670 struct scatterlist sg;
4671 char *response = NULL;
4672 char *right_response = NULL;
4673 char *peers_ch = NULL;
4674 unsigned int key_len;
4675 char secret[SHARED_SECRET_MAX]; /* 64 byte */
4676 unsigned int resp_size;
4677 struct hash_desc desc;
4678 struct packet_info pi;
4679 struct net_conf *nc;
4682 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4685 nc = rcu_dereference(tconn->net_conf);
4686 key_len = strlen(nc->shared_secret);
4687 memcpy(secret, nc->shared_secret, key_len);
4690 desc.tfm = tconn->cram_hmac_tfm;
4693 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4695 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4700 get_random_bytes(my_challenge, CHALLENGE_LEN);
4702 sock = &tconn->data;
4703 if (!conn_prepare_command(tconn, sock)) {
4707 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4708 my_challenge, CHALLENGE_LEN);
4712 err = drbd_recv_header(tconn, &pi);
4718 if (pi.cmd != P_AUTH_CHALLENGE) {
4719 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4720 cmdname(pi.cmd), pi.cmd);
4725 if (pi.size > CHALLENGE_LEN * 2) {
4726 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4731 peers_ch = kmalloc(pi.size, GFP_NOIO);
4732 if (peers_ch == NULL) {
4733 conn_err(tconn, "kmalloc of peers_ch failed\n");
4738 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4744 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4745 response = kmalloc(resp_size, GFP_NOIO);
4746 if (response == NULL) {
4747 conn_err(tconn, "kmalloc of response failed\n");
4752 sg_init_table(&sg, 1);
4753 sg_set_buf(&sg, peers_ch, pi.size);
4755 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4757 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4762 if (!conn_prepare_command(tconn, sock)) {
4766 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4767 response, resp_size);
4771 err = drbd_recv_header(tconn, &pi);
4777 if (pi.cmd != P_AUTH_RESPONSE) {
4778 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4779 cmdname(pi.cmd), pi.cmd);
4784 if (pi.size != resp_size) {
4785 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4790 err = drbd_recv_all_warn(tconn, response , resp_size);
4796 right_response = kmalloc(resp_size, GFP_NOIO);
4797 if (right_response == NULL) {
4798 conn_err(tconn, "kmalloc of right_response failed\n");
4803 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4805 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4807 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4812 rv = !memcmp(response, right_response, resp_size);
4815 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4823 kfree(right_response);
4829 int drbdd_init(struct drbd_thread *thi)
4831 struct drbd_tconn *tconn = thi->tconn;
4834 conn_info(tconn, "receiver (re)started\n");
4837 h = conn_connect(tconn);
4839 conn_disconnect(tconn);
4840 schedule_timeout_interruptible(HZ);
4843 conn_warn(tconn, "Discarding network configuration.\n");
4844 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4851 conn_disconnect(tconn);
4853 conn_info(tconn, "receiver terminated\n");
4857 /* ********* acknowledge sender ******** */
4859 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4861 struct p_req_state_reply *p = pi->data;
4862 int retcode = be32_to_cpu(p->retcode);
4864 if (retcode >= SS_SUCCESS) {
4865 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4867 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4868 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4869 drbd_set_st_err_str(retcode), retcode);
4871 wake_up(&tconn->ping_wait);
4876 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4878 struct drbd_conf *mdev;
4879 struct p_req_state_reply *p = pi->data;
4880 int retcode = be32_to_cpu(p->retcode);
4882 mdev = vnr_to_mdev(tconn, pi->vnr);
4886 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4887 D_ASSERT(tconn->agreed_pro_version < 100);
4888 return got_conn_RqSReply(tconn, pi);
4891 if (retcode >= SS_SUCCESS) {
4892 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4894 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4895 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4896 drbd_set_st_err_str(retcode), retcode);
4898 wake_up(&mdev->state_wait);
4903 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4905 return drbd_send_ping_ack(tconn);
4909 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4911 /* restore idle timeout */
4912 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4913 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4914 wake_up(&tconn->ping_wait);
4919 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4921 struct drbd_conf *mdev;
4922 struct p_block_ack *p = pi->data;
4923 sector_t sector = be64_to_cpu(p->sector);
4924 int blksize = be32_to_cpu(p->blksize);
4926 mdev = vnr_to_mdev(tconn, pi->vnr);
4930 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4932 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4934 if (get_ldev(mdev)) {
4935 drbd_rs_complete_io(mdev, sector);
4936 drbd_set_in_sync(mdev, sector, blksize);
4937 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4938 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4941 dec_rs_pending(mdev);
4942 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4948 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4949 struct rb_root *root, const char *func,
4950 enum drbd_req_event what, bool missing_ok)
4952 struct drbd_request *req;
4953 struct bio_and_error m;
4955 spin_lock_irq(&mdev->tconn->req_lock);
4956 req = find_request(mdev, root, id, sector, missing_ok, func);
4957 if (unlikely(!req)) {
4958 spin_unlock_irq(&mdev->tconn->req_lock);
4961 __req_mod(req, what, &m);
4962 spin_unlock_irq(&mdev->tconn->req_lock);
4965 complete_master_bio(mdev, &m);
4969 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4971 struct drbd_conf *mdev;
4972 struct p_block_ack *p = pi->data;
4973 sector_t sector = be64_to_cpu(p->sector);
4974 int blksize = be32_to_cpu(p->blksize);
4975 enum drbd_req_event what;
4977 mdev = vnr_to_mdev(tconn, pi->vnr);
4981 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4983 if (p->block_id == ID_SYNCER) {
4984 drbd_set_in_sync(mdev, sector, blksize);
4985 dec_rs_pending(mdev);
4989 case P_RS_WRITE_ACK:
4990 what = WRITE_ACKED_BY_PEER_AND_SIS;
4993 what = WRITE_ACKED_BY_PEER;
4996 what = RECV_ACKED_BY_PEER;
4998 case P_DISCARD_WRITE:
4999 what = DISCARD_WRITE;
5002 what = POSTPONE_WRITE;
5008 return validate_req_change_req_state(mdev, p->block_id, sector,
5009 &mdev->write_requests, __func__,
5013 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5015 struct drbd_conf *mdev;
5016 struct p_block_ack *p = pi->data;
5017 sector_t sector = be64_to_cpu(p->sector);
5018 int size = be32_to_cpu(p->blksize);
5021 mdev = vnr_to_mdev(tconn, pi->vnr);
5025 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5027 if (p->block_id == ID_SYNCER) {
5028 dec_rs_pending(mdev);
5029 drbd_rs_failed_io(mdev, sector, size);
5033 err = validate_req_change_req_state(mdev, p->block_id, sector,
5034 &mdev->write_requests, __func__,
5037 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5038 The master bio might already be completed, therefore the
5039 request is no longer in the collision hash. */
5040 /* In Protocol B we might already have got a P_RECV_ACK
5041 but then get a P_NEG_ACK afterwards. */
5042 drbd_set_out_of_sync(mdev, sector, size);
5047 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5049 struct drbd_conf *mdev;
5050 struct p_block_ack *p = pi->data;
5051 sector_t sector = be64_to_cpu(p->sector);
5053 mdev = vnr_to_mdev(tconn, pi->vnr);
5057 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5059 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5060 (unsigned long long)sector, be32_to_cpu(p->blksize));
5062 return validate_req_change_req_state(mdev, p->block_id, sector,
5063 &mdev->read_requests, __func__,
5067 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5069 struct drbd_conf *mdev;
5072 struct p_block_ack *p = pi->data;
5074 mdev = vnr_to_mdev(tconn, pi->vnr);
5078 sector = be64_to_cpu(p->sector);
5079 size = be32_to_cpu(p->blksize);
5081 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5083 dec_rs_pending(mdev);
5085 if (get_ldev_if_state(mdev, D_FAILED)) {
5086 drbd_rs_complete_io(mdev, sector);
5088 case P_NEG_RS_DREPLY:
5089 drbd_rs_failed_io(mdev, sector, size);
5101 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5103 struct p_barrier_ack *p = pi->data;
5104 struct drbd_conf *mdev;
5107 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5110 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5111 if (mdev->state.conn == C_AHEAD &&
5112 atomic_read(&mdev->ap_in_flight) == 0 &&
5113 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5114 mdev->start_resync_timer.expires = jiffies + HZ;
5115 add_timer(&mdev->start_resync_timer);
5123 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5125 struct drbd_conf *mdev;
5126 struct p_block_ack *p = pi->data;
5127 struct drbd_work *w;
5131 mdev = vnr_to_mdev(tconn, pi->vnr);
5135 sector = be64_to_cpu(p->sector);
5136 size = be32_to_cpu(p->blksize);
5138 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5140 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5141 drbd_ov_out_of_sync_found(mdev, sector, size);
5143 ov_out_of_sync_print(mdev);
5145 if (!get_ldev(mdev))
5148 drbd_rs_complete_io(mdev, sector);
5149 dec_rs_pending(mdev);
5153 /* let's advance progress step marks only for every other megabyte */
5154 if ((mdev->ov_left & 0x200) == 0x200)
5155 drbd_advance_rs_marks(mdev, mdev->ov_left);
5157 if (mdev->ov_left == 0) {
5158 w = kmalloc(sizeof(*w), GFP_NOIO);
5160 w->cb = w_ov_finished;
5162 drbd_queue_work(&mdev->tconn->sender_work, w);
5164 dev_err(DEV, "kmalloc(w) failed.");
5165 ov_out_of_sync_print(mdev);
5166 drbd_resync_finished(mdev);
5173 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5178 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5180 struct drbd_conf *mdev;
5181 int vnr, not_empty = 0;
5184 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5185 flush_signals(current);
5188 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5189 kref_get(&mdev->kref);
5191 if (drbd_finish_peer_reqs(mdev)) {
5192 kref_put(&mdev->kref, &drbd_minor_destroy);
5195 kref_put(&mdev->kref, &drbd_minor_destroy);
5198 set_bit(SIGNAL_ASENDER, &tconn->flags);
5200 spin_lock_irq(&tconn->req_lock);
5201 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5202 not_empty = !list_empty(&mdev->done_ee);
5206 spin_unlock_irq(&tconn->req_lock);
5208 } while (not_empty);
5213 struct asender_cmd {
5215 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5218 static struct asender_cmd asender_tbl[] = {
5219 [P_PING] = { 0, got_Ping },
5220 [P_PING_ACK] = { 0, got_PingAck },
5221 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5222 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5223 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5224 [P_DISCARD_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5225 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5226 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
5227 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
5228 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5229 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5230 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5231 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
5232 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
5233 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5234 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5235 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5238 int drbd_asender(struct drbd_thread *thi)
5240 struct drbd_tconn *tconn = thi->tconn;
5241 struct asender_cmd *cmd = NULL;
5242 struct packet_info pi;
5244 void *buf = tconn->meta.rbuf;
5246 unsigned int header_size = drbd_header_size(tconn);
5247 int expect = header_size;
5248 bool ping_timeout_active = false;
5249 struct net_conf *nc;
5250 int ping_timeo, tcp_cork, ping_int;
5252 current->policy = SCHED_RR; /* Make this a realtime task! */
5253 current->rt_priority = 2; /* more important than all other tasks */
5255 while (get_t_state(thi) == RUNNING) {
5256 drbd_thread_current_set_cpu(thi);
5259 nc = rcu_dereference(tconn->net_conf);
5260 ping_timeo = nc->ping_timeo;
5261 tcp_cork = nc->tcp_cork;
5262 ping_int = nc->ping_int;
5265 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5266 if (drbd_send_ping(tconn)) {
5267 conn_err(tconn, "drbd_send_ping has failed\n");
5270 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5271 ping_timeout_active = true;
5274 /* TODO: conditionally cork; it may hurt latency if we cork without
5277 drbd_tcp_cork(tconn->meta.socket);
5278 if (tconn_finish_peer_reqs(tconn)) {
5279 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5282 /* but unconditionally uncork unless disabled */
5284 drbd_tcp_uncork(tconn->meta.socket);
5286 /* short circuit, recv_msg would return EINTR anyways. */
5287 if (signal_pending(current))
5290 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5291 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5293 flush_signals(current);
5296 * -EINTR (on meta) we got a signal
5297 * -EAGAIN (on meta) rcvtimeo expired
5298 * -ECONNRESET other side closed the connection
5299 * -ERESTARTSYS (on data) we got a signal
5300 * rv < 0 other than above: unexpected error!
5301 * rv == expected: full header or command
5302 * rv < expected: "woken" by signal during receive
5303 * rv == 0 : "connection shut down by peer"
5305 if (likely(rv > 0)) {
5308 } else if (rv == 0) {
5309 conn_err(tconn, "meta connection shut down by peer.\n");
5311 } else if (rv == -EAGAIN) {
5312 /* If the data socket received something meanwhile,
5313 * that is good enough: peer is still alive. */
5314 if (time_after(tconn->last_received,
5315 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5317 if (ping_timeout_active) {
5318 conn_err(tconn, "PingAck did not arrive in time.\n");
5321 set_bit(SEND_PING, &tconn->flags);
5323 } else if (rv == -EINTR) {
5326 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5330 if (received == expect && cmd == NULL) {
5331 if (decode_header(tconn, tconn->meta.rbuf, &pi))
5333 cmd = &asender_tbl[pi.cmd];
5334 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5335 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5336 cmdname(pi.cmd), pi.cmd);
5339 expect = header_size + cmd->pkt_size;
5340 if (pi.size != expect - header_size) {
5341 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5346 if (received == expect) {
5349 err = cmd->fn(tconn, &pi);
5351 conn_err(tconn, "%pf failed\n", cmd->fn);
5355 tconn->last_received = jiffies;
5357 if (cmd == &asender_tbl[P_PING_ACK]) {
5358 /* restore idle timeout */
5359 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5360 ping_timeout_active = false;
5363 buf = tconn->meta.rbuf;
5365 expect = header_size;
5372 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5376 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5378 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5380 conn_info(tconn, "asender terminated\n");