4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
64 static int drbd_do_features(struct drbd_tconn *tconn);
65 static int drbd_do_auth(struct drbd_tconn *tconn);
66 static int drbd_disconnected(struct drbd_conf *mdev);
68 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *, struct drbd_epoch *, enum epoch_event);
69 static int e_end_block(struct drbd_work *, int);
72 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
75 * some helper functions to deal with single linked page lists,
76 * page->private being our "next" pointer.
79 /* If at least n pages are linked at head, get n pages off.
80 * Otherwise, don't modify head, and return NULL.
81 * Locking is the responsibility of the caller.
83 static struct page *page_chain_del(struct page **head, int n)
97 tmp = page_chain_next(page);
99 break; /* found sufficient pages */
101 /* insufficient pages, don't use any of them. */
106 /* add end of list marker for the returned list */
107 set_page_private(page, 0);
108 /* actual return value, and adjustment of head */
114 /* may be used outside of locks to find the tail of a (usually short)
115 * "private" page chain, before adding it back to a global chain head
116 * with page_chain_add() under a spinlock. */
117 static struct page *page_chain_tail(struct page *page, int *len)
121 while ((tmp = page_chain_next(page)))
128 static int page_chain_free(struct page *page)
132 page_chain_for_each_safe(page, tmp) {
139 static void page_chain_add(struct page **head,
140 struct page *chain_first, struct page *chain_last)
144 tmp = page_chain_tail(chain_first, NULL);
145 BUG_ON(tmp != chain_last);
148 /* add chain to head */
149 set_page_private(chain_last, (unsigned long)*head);
153 static struct page *__drbd_alloc_pages(struct drbd_conf *mdev,
156 struct page *page = NULL;
157 struct page *tmp = NULL;
160 /* Yes, testing drbd_pp_vacant outside the lock is racy.
161 * So what. It saves a spin_lock. */
162 if (drbd_pp_vacant >= number) {
163 spin_lock(&drbd_pp_lock);
164 page = page_chain_del(&drbd_pp_pool, number);
166 drbd_pp_vacant -= number;
167 spin_unlock(&drbd_pp_lock);
172 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
173 * "criss-cross" setup, that might cause write-out on some other DRBD,
174 * which in turn might block on the other node at this very place. */
175 for (i = 0; i < number; i++) {
176 tmp = alloc_page(GFP_TRY);
179 set_page_private(tmp, (unsigned long)page);
186 /* Not enough pages immediately available this time.
187 * No need to jump around here, drbd_alloc_pages will retry this
188 * function "soon". */
190 tmp = page_chain_tail(page, NULL);
191 spin_lock(&drbd_pp_lock);
192 page_chain_add(&drbd_pp_pool, page, tmp);
194 spin_unlock(&drbd_pp_lock);
199 static void reclaim_finished_net_peer_reqs(struct drbd_conf *mdev,
200 struct list_head *to_be_freed)
202 struct drbd_peer_request *peer_req;
203 struct list_head *le, *tle;
205 /* The EEs are always appended to the end of the list. Since
206 they are sent in order over the wire, they have to finish
207 in order. As soon as we see the first not finished we can
208 stop to examine the list... */
210 list_for_each_safe(le, tle, &mdev->net_ee) {
211 peer_req = list_entry(le, struct drbd_peer_request, w.list);
212 if (drbd_peer_req_has_active_page(peer_req))
214 list_move(le, to_be_freed);
218 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
220 LIST_HEAD(reclaimed);
221 struct drbd_peer_request *peer_req, *t;
223 spin_lock_irq(&mdev->tconn->req_lock);
224 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
225 spin_unlock_irq(&mdev->tconn->req_lock);
227 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
228 drbd_free_net_peer_req(mdev, peer_req);
232 * drbd_alloc_pages() - Returns @number pages, retries forever (or until signalled)
233 * @mdev: DRBD device.
234 * @number: number of pages requested
235 * @retry: whether to retry, if not enough pages are available right now
237 * Tries to allocate number pages, first from our own page pool, then from
238 * the kernel, unless this allocation would exceed the max_buffers setting.
239 * Possibly retry until DRBD frees sufficient pages somewhere else.
241 * Returns a page chain linked via page->private.
243 struct page *drbd_alloc_pages(struct drbd_conf *mdev, unsigned int number,
246 struct page *page = NULL;
251 /* Yes, we may run up to @number over max_buffers. If we
252 * follow it strictly, the admin will get it wrong anyways. */
254 nc = rcu_dereference(mdev->tconn->net_conf);
255 mxb = nc ? nc->max_buffers : 1000000;
258 if (atomic_read(&mdev->pp_in_use) < mxb)
259 page = __drbd_alloc_pages(mdev, number);
261 while (page == NULL) {
262 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
264 drbd_kick_lo_and_reclaim_net(mdev);
266 if (atomic_read(&mdev->pp_in_use) < mxb) {
267 page = __drbd_alloc_pages(mdev, number);
275 if (signal_pending(current)) {
276 dev_warn(DEV, "drbd_alloc_pages interrupted!\n");
282 finish_wait(&drbd_pp_wait, &wait);
285 atomic_add(number, &mdev->pp_in_use);
289 /* Must not be used from irq, as that may deadlock: see drbd_alloc_pages.
290 * Is also used from inside an other spin_lock_irq(&mdev->tconn->req_lock);
291 * Either links the page chain back to the global pool,
292 * or returns all pages to the system. */
293 static void drbd_free_pages(struct drbd_conf *mdev, struct page *page, int is_net)
295 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
301 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count)
302 i = page_chain_free(page);
305 tmp = page_chain_tail(page, &i);
306 spin_lock(&drbd_pp_lock);
307 page_chain_add(&drbd_pp_pool, page, tmp);
309 spin_unlock(&drbd_pp_lock);
311 i = atomic_sub_return(i, a);
313 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
314 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
315 wake_up(&drbd_pp_wait);
319 You need to hold the req_lock:
320 _drbd_wait_ee_list_empty()
322 You must not have the req_lock:
324 drbd_alloc_peer_req()
325 drbd_free_peer_reqs()
327 drbd_finish_peer_reqs()
329 drbd_wait_ee_list_empty()
332 struct drbd_peer_request *
333 drbd_alloc_peer_req(struct drbd_conf *mdev, u64 id, sector_t sector,
334 unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
336 struct drbd_peer_request *peer_req;
337 struct page *page = NULL;
338 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
340 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
343 peer_req = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
345 if (!(gfp_mask & __GFP_NOWARN))
346 dev_err(DEV, "%s: allocation failed\n", __func__);
351 page = drbd_alloc_pages(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
356 drbd_clear_interval(&peer_req->i);
357 peer_req->i.size = data_size;
358 peer_req->i.sector = sector;
359 peer_req->i.local = false;
360 peer_req->i.waiting = false;
362 peer_req->epoch = NULL;
363 peer_req->w.mdev = mdev;
364 peer_req->pages = page;
365 atomic_set(&peer_req->pending_bios, 0);
368 * The block_id is opaque to the receiver. It is not endianness
369 * converted, and sent back to the sender unchanged.
371 peer_req->block_id = id;
376 mempool_free(peer_req, drbd_ee_mempool);
380 void __drbd_free_peer_req(struct drbd_conf *mdev, struct drbd_peer_request *peer_req,
383 if (peer_req->flags & EE_HAS_DIGEST)
384 kfree(peer_req->digest);
385 drbd_free_pages(mdev, peer_req->pages, is_net);
386 D_ASSERT(atomic_read(&peer_req->pending_bios) == 0);
387 D_ASSERT(drbd_interval_empty(&peer_req->i));
388 mempool_free(peer_req, drbd_ee_mempool);
391 int drbd_free_peer_reqs(struct drbd_conf *mdev, struct list_head *list)
393 LIST_HEAD(work_list);
394 struct drbd_peer_request *peer_req, *t;
396 int is_net = list == &mdev->net_ee;
398 spin_lock_irq(&mdev->tconn->req_lock);
399 list_splice_init(list, &work_list);
400 spin_unlock_irq(&mdev->tconn->req_lock);
402 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
403 __drbd_free_peer_req(mdev, peer_req, is_net);
410 * See also comments in _req_mod(,BARRIER_ACKED) and receive_Barrier.
412 static int drbd_finish_peer_reqs(struct drbd_conf *mdev)
414 LIST_HEAD(work_list);
415 LIST_HEAD(reclaimed);
416 struct drbd_peer_request *peer_req, *t;
419 spin_lock_irq(&mdev->tconn->req_lock);
420 reclaim_finished_net_peer_reqs(mdev, &reclaimed);
421 list_splice_init(&mdev->done_ee, &work_list);
422 spin_unlock_irq(&mdev->tconn->req_lock);
424 list_for_each_entry_safe(peer_req, t, &reclaimed, w.list)
425 drbd_free_net_peer_req(mdev, peer_req);
427 /* possible callbacks here:
428 * e_end_block, and e_end_resync_block, e_send_superseded.
429 * all ignore the last argument.
431 list_for_each_entry_safe(peer_req, t, &work_list, w.list) {
434 /* list_del not necessary, next/prev members not touched */
435 err2 = peer_req->w.cb(&peer_req->w, !!err);
438 drbd_free_peer_req(mdev, peer_req);
440 wake_up(&mdev->ee_wait);
445 static void _drbd_wait_ee_list_empty(struct drbd_conf *mdev,
446 struct list_head *head)
450 /* avoids spin_lock/unlock
451 * and calling prepare_to_wait in the fast path */
452 while (!list_empty(head)) {
453 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
454 spin_unlock_irq(&mdev->tconn->req_lock);
456 finish_wait(&mdev->ee_wait, &wait);
457 spin_lock_irq(&mdev->tconn->req_lock);
461 static void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
462 struct list_head *head)
464 spin_lock_irq(&mdev->tconn->req_lock);
465 _drbd_wait_ee_list_empty(mdev, head);
466 spin_unlock_irq(&mdev->tconn->req_lock);
469 static int drbd_recv_short(struct socket *sock, void *buf, size_t size, int flags)
476 struct msghdr msg = {
478 .msg_iov = (struct iovec *)&iov,
479 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
485 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
491 static int drbd_recv(struct drbd_tconn *tconn, void *buf, size_t size)
498 struct msghdr msg = {
500 .msg_iov = (struct iovec *)&iov,
501 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
509 rv = sock_recvmsg(tconn->data.socket, &msg, size, msg.msg_flags);
514 * ECONNRESET other side closed the connection
515 * ERESTARTSYS (on sock) we got a signal
519 if (rv == -ECONNRESET)
520 conn_info(tconn, "sock was reset by peer\n");
521 else if (rv != -ERESTARTSYS)
522 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
524 } else if (rv == 0) {
525 conn_info(tconn, "sock was shut down by peer\n");
528 /* signal came in, or peer/link went down,
529 * after we read a partial message
531 /* D_ASSERT(signal_pending(current)); */
539 conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
544 static int drbd_recv_all(struct drbd_tconn *tconn, void *buf, size_t size)
548 err = drbd_recv(tconn, buf, size);
557 static int drbd_recv_all_warn(struct drbd_tconn *tconn, void *buf, size_t size)
561 err = drbd_recv_all(tconn, buf, size);
562 if (err && !signal_pending(current))
563 conn_warn(tconn, "short read (expected size %d)\n", (int)size);
568 * On individual connections, the socket buffer size must be set prior to the
569 * listen(2) or connect(2) calls in order to have it take effect.
570 * This is our wrapper to do so.
572 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
575 /* open coded SO_SNDBUF, SO_RCVBUF */
577 sock->sk->sk_sndbuf = snd;
578 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
581 sock->sk->sk_rcvbuf = rcv;
582 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
586 static struct socket *drbd_try_connect(struct drbd_tconn *tconn)
590 struct sockaddr_in6 src_in6;
591 struct sockaddr_in6 peer_in6;
593 int err, peer_addr_len, my_addr_len;
594 int sndbuf_size, rcvbuf_size, connect_int;
595 int disconnect_on_error = 1;
598 nc = rcu_dereference(tconn->net_conf);
603 sndbuf_size = nc->sndbuf_size;
604 rcvbuf_size = nc->rcvbuf_size;
605 connect_int = nc->connect_int;
608 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(src_in6));
609 memcpy(&src_in6, &tconn->my_addr, my_addr_len);
611 if (((struct sockaddr *)&tconn->my_addr)->sa_family == AF_INET6)
612 src_in6.sin6_port = 0;
614 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
616 peer_addr_len = min_t(int, tconn->peer_addr_len, sizeof(src_in6));
617 memcpy(&peer_in6, &tconn->peer_addr, peer_addr_len);
619 what = "sock_create_kern";
620 err = sock_create_kern(((struct sockaddr *)&src_in6)->sa_family,
621 SOCK_STREAM, IPPROTO_TCP, &sock);
627 sock->sk->sk_rcvtimeo =
628 sock->sk->sk_sndtimeo = connect_int * HZ;
629 drbd_setbufsize(sock, sndbuf_size, rcvbuf_size);
631 /* explicitly bind to the configured IP as source IP
632 * for the outgoing connections.
633 * This is needed for multihomed hosts and to be
634 * able to use lo: interfaces for drbd.
635 * Make sure to use 0 as port number, so linux selects
636 * a free one dynamically.
638 what = "bind before connect";
639 err = sock->ops->bind(sock, (struct sockaddr *) &src_in6, my_addr_len);
643 /* connect may fail, peer not yet available.
644 * stay C_WF_CONNECTION, don't go Disconnecting! */
645 disconnect_on_error = 0;
647 err = sock->ops->connect(sock, (struct sockaddr *) &peer_in6, peer_addr_len, 0);
656 /* timeout, busy, signal pending */
657 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
658 case EINTR: case ERESTARTSYS:
659 /* peer not (yet) available, network problem */
660 case ECONNREFUSED: case ENETUNREACH:
661 case EHOSTDOWN: case EHOSTUNREACH:
662 disconnect_on_error = 0;
665 conn_err(tconn, "%s failed, err = %d\n", what, err);
667 if (disconnect_on_error)
668 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
674 struct accept_wait_data {
675 struct drbd_tconn *tconn;
676 struct socket *s_listen;
677 struct completion door_bell;
678 void (*original_sk_state_change)(struct sock *sk);
682 static void drbd_incoming_connection(struct sock *sk)
684 struct accept_wait_data *ad = sk->sk_user_data;
685 void (*state_change)(struct sock *sk);
687 state_change = ad->original_sk_state_change;
688 if (sk->sk_state == TCP_ESTABLISHED)
689 complete(&ad->door_bell);
693 static int prepare_listen_socket(struct drbd_tconn *tconn, struct accept_wait_data *ad)
695 int err, sndbuf_size, rcvbuf_size, my_addr_len;
696 struct sockaddr_in6 my_addr;
697 struct socket *s_listen;
702 nc = rcu_dereference(tconn->net_conf);
707 sndbuf_size = nc->sndbuf_size;
708 rcvbuf_size = nc->rcvbuf_size;
711 my_addr_len = min_t(int, tconn->my_addr_len, sizeof(struct sockaddr_in6));
712 memcpy(&my_addr, &tconn->my_addr, my_addr_len);
714 what = "sock_create_kern";
715 err = sock_create_kern(((struct sockaddr *)&my_addr)->sa_family,
716 SOCK_STREAM, IPPROTO_TCP, &s_listen);
722 s_listen->sk->sk_reuse = 1; /* SO_REUSEADDR */
723 drbd_setbufsize(s_listen, sndbuf_size, rcvbuf_size);
725 what = "bind before listen";
726 err = s_listen->ops->bind(s_listen, (struct sockaddr *)&my_addr, my_addr_len);
730 ad->s_listen = s_listen;
731 write_lock_bh(&s_listen->sk->sk_callback_lock);
732 ad->original_sk_state_change = s_listen->sk->sk_state_change;
733 s_listen->sk->sk_state_change = drbd_incoming_connection;
734 s_listen->sk->sk_user_data = ad;
735 write_unlock_bh(&s_listen->sk->sk_callback_lock);
738 err = s_listen->ops->listen(s_listen, 5);
745 sock_release(s_listen);
747 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
748 conn_err(tconn, "%s failed, err = %d\n", what, err);
749 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
756 static void unregister_state_change(struct sock *sk, struct accept_wait_data *ad)
758 write_lock_bh(&sk->sk_callback_lock);
759 sk->sk_state_change = ad->original_sk_state_change;
760 sk->sk_user_data = NULL;
761 write_unlock_bh(&sk->sk_callback_lock);
764 static struct socket *drbd_wait_for_connect(struct drbd_tconn *tconn, struct accept_wait_data *ad)
766 int timeo, connect_int, err = 0;
767 struct socket *s_estab = NULL;
771 nc = rcu_dereference(tconn->net_conf);
776 connect_int = nc->connect_int;
779 timeo = connect_int * HZ;
780 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
782 err = wait_for_completion_interruptible_timeout(&ad->door_bell, timeo);
786 err = kernel_accept(ad->s_listen, &s_estab, 0);
788 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
789 conn_err(tconn, "accept failed, err = %d\n", err);
790 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
795 unregister_state_change(s_estab->sk, ad);
800 static int decode_header(struct drbd_tconn *, void *, struct packet_info *);
802 static int send_first_packet(struct drbd_tconn *tconn, struct drbd_socket *sock,
803 enum drbd_packet cmd)
805 if (!conn_prepare_command(tconn, sock))
807 return conn_send_command(tconn, sock, cmd, 0, NULL, 0);
810 static int receive_first_packet(struct drbd_tconn *tconn, struct socket *sock)
812 unsigned int header_size = drbd_header_size(tconn);
813 struct packet_info pi;
816 err = drbd_recv_short(sock, tconn->data.rbuf, header_size, 0);
817 if (err != header_size) {
822 err = decode_header(tconn, tconn->data.rbuf, &pi);
829 * drbd_socket_okay() - Free the socket if its connection is not okay
830 * @sock: pointer to the pointer to the socket.
832 static int drbd_socket_okay(struct socket **sock)
840 rr = drbd_recv_short(*sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
842 if (rr > 0 || rr == -EAGAIN) {
850 /* Gets called if a connection is established, or if a new minor gets created
852 int drbd_connected(struct drbd_conf *mdev)
856 atomic_set(&mdev->packet_seq, 0);
859 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
860 &mdev->tconn->cstate_mutex :
861 &mdev->own_state_mutex;
863 err = drbd_send_sync_param(mdev);
865 err = drbd_send_sizes(mdev, 0, 0);
867 err = drbd_send_uuids(mdev);
869 err = drbd_send_current_state(mdev);
870 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
871 clear_bit(RESIZE_PENDING, &mdev->flags);
872 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
878 * 1 yes, we have a valid connection
879 * 0 oops, did not work out, please try again
880 * -1 peer talks different language,
881 * no point in trying again, please go standalone.
882 * -2 We do not have a network config...
884 static int conn_connect(struct drbd_tconn *tconn)
886 struct drbd_socket sock, msock;
887 struct drbd_conf *mdev;
889 int vnr, timeout, h, ok;
890 bool discard_my_data;
891 enum drbd_state_rv rv;
892 struct accept_wait_data ad = {
894 .door_bell = COMPLETION_INITIALIZER_ONSTACK(ad.door_bell),
897 if (conn_request_state(tconn, NS(conn, C_WF_CONNECTION), CS_VERBOSE) < SS_SUCCESS)
900 mutex_init(&sock.mutex);
901 sock.sbuf = tconn->data.sbuf;
902 sock.rbuf = tconn->data.rbuf;
904 mutex_init(&msock.mutex);
905 msock.sbuf = tconn->meta.sbuf;
906 msock.rbuf = tconn->meta.rbuf;
909 /* Assume that the peer only understands protocol 80 until we know better. */
910 tconn->agreed_pro_version = 80;
912 if (prepare_listen_socket(tconn, &ad))
918 s = drbd_try_connect(tconn);
922 send_first_packet(tconn, &sock, P_INITIAL_DATA);
923 } else if (!msock.socket) {
924 clear_bit(RESOLVE_CONFLICTS, &tconn->flags);
926 send_first_packet(tconn, &msock, P_INITIAL_META);
928 conn_err(tconn, "Logic error in conn_connect()\n");
929 goto out_release_sockets;
933 if (sock.socket && msock.socket) {
935 nc = rcu_dereference(tconn->net_conf);
936 timeout = nc->ping_timeo * HZ / 10;
938 schedule_timeout_interruptible(timeout);
939 ok = drbd_socket_okay(&sock.socket);
940 ok = drbd_socket_okay(&msock.socket) && ok;
946 s = drbd_wait_for_connect(tconn, &ad);
948 int fp = receive_first_packet(tconn, s);
949 drbd_socket_okay(&sock.socket);
950 drbd_socket_okay(&msock.socket);
954 conn_warn(tconn, "initial packet S crossed\n");
955 sock_release(sock.socket);
962 set_bit(RESOLVE_CONFLICTS, &tconn->flags);
964 conn_warn(tconn, "initial packet M crossed\n");
965 sock_release(msock.socket);
972 conn_warn(tconn, "Error receiving initial packet\n");
980 if (tconn->cstate <= C_DISCONNECTING)
981 goto out_release_sockets;
982 if (signal_pending(current)) {
983 flush_signals(current);
985 if (get_t_state(&tconn->receiver) == EXITING)
986 goto out_release_sockets;
989 ok = drbd_socket_okay(&sock.socket);
990 ok = drbd_socket_okay(&msock.socket) && ok;
994 sock_release(ad.s_listen);
996 sock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
997 msock.socket->sk->sk_reuse = 1; /* SO_REUSEADDR */
999 sock.socket->sk->sk_allocation = GFP_NOIO;
1000 msock.socket->sk->sk_allocation = GFP_NOIO;
1002 sock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
1003 msock.socket->sk->sk_priority = TC_PRIO_INTERACTIVE;
1006 * sock.socket->sk->sk_sndtimeo = tconn->net_conf->timeout*HZ/10;
1007 * sock.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1008 * first set it to the P_CONNECTION_FEATURES timeout,
1009 * which we set to 4x the configured ping_timeout. */
1011 nc = rcu_dereference(tconn->net_conf);
1013 sock.socket->sk->sk_sndtimeo =
1014 sock.socket->sk->sk_rcvtimeo = nc->ping_timeo*4*HZ/10;
1016 msock.socket->sk->sk_rcvtimeo = nc->ping_int*HZ;
1017 timeout = nc->timeout * HZ / 10;
1018 discard_my_data = nc->discard_my_data;
1021 msock.socket->sk->sk_sndtimeo = timeout;
1023 /* we don't want delays.
1024 * we use TCP_CORK where appropriate, though */
1025 drbd_tcp_nodelay(sock.socket);
1026 drbd_tcp_nodelay(msock.socket);
1028 tconn->data.socket = sock.socket;
1029 tconn->meta.socket = msock.socket;
1030 tconn->last_received = jiffies;
1032 h = drbd_do_features(tconn);
1036 if (tconn->cram_hmac_tfm) {
1037 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
1038 switch (drbd_do_auth(tconn)) {
1040 conn_err(tconn, "Authentication of peer failed\n");
1043 conn_err(tconn, "Authentication of peer failed, trying again.\n");
1048 tconn->data.socket->sk->sk_sndtimeo = timeout;
1049 tconn->data.socket->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
1051 if (drbd_send_protocol(tconn) == -EOPNOTSUPP)
1054 set_bit(STATE_SENT, &tconn->flags);
1057 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1058 kref_get(&mdev->kref);
1061 if (discard_my_data)
1062 set_bit(DISCARD_MY_DATA, &mdev->flags);
1064 clear_bit(DISCARD_MY_DATA, &mdev->flags);
1066 drbd_connected(mdev);
1067 kref_put(&mdev->kref, &drbd_minor_destroy);
1072 rv = conn_request_state(tconn, NS(conn, C_WF_REPORT_PARAMS), CS_VERBOSE);
1073 if (rv < SS_SUCCESS) {
1074 clear_bit(STATE_SENT, &tconn->flags);
1078 drbd_thread_start(&tconn->asender);
1080 mutex_lock(&tconn->conf_update);
1081 /* The discard_my_data flag is a single-shot modifier to the next
1082 * connection attempt, the handshake of which is now well underway.
1083 * No need for rcu style copying of the whole struct
1084 * just to clear a single value. */
1085 tconn->net_conf->discard_my_data = 0;
1086 mutex_unlock(&tconn->conf_update);
1090 out_release_sockets:
1092 sock_release(ad.s_listen);
1094 sock_release(sock.socket);
1096 sock_release(msock.socket);
1100 static int decode_header(struct drbd_tconn *tconn, void *header, struct packet_info *pi)
1102 unsigned int header_size = drbd_header_size(tconn);
1104 if (header_size == sizeof(struct p_header100) &&
1105 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC_100)) {
1106 struct p_header100 *h = header;
1108 conn_err(tconn, "Header padding is not zero\n");
1111 pi->vnr = be16_to_cpu(h->volume);
1112 pi->cmd = be16_to_cpu(h->command);
1113 pi->size = be32_to_cpu(h->length);
1114 } else if (header_size == sizeof(struct p_header95) &&
1115 *(__be16 *)header == cpu_to_be16(DRBD_MAGIC_BIG)) {
1116 struct p_header95 *h = header;
1117 pi->cmd = be16_to_cpu(h->command);
1118 pi->size = be32_to_cpu(h->length);
1120 } else if (header_size == sizeof(struct p_header80) &&
1121 *(__be32 *)header == cpu_to_be32(DRBD_MAGIC)) {
1122 struct p_header80 *h = header;
1123 pi->cmd = be16_to_cpu(h->command);
1124 pi->size = be16_to_cpu(h->length);
1127 conn_err(tconn, "Wrong magic value 0x%08x in protocol version %d\n",
1128 be32_to_cpu(*(__be32 *)header),
1129 tconn->agreed_pro_version);
1132 pi->data = header + header_size;
1136 static int drbd_recv_header(struct drbd_tconn *tconn, struct packet_info *pi)
1138 void *buffer = tconn->data.rbuf;
1141 err = drbd_recv_all_warn(tconn, buffer, drbd_header_size(tconn));
1145 err = decode_header(tconn, buffer, pi);
1146 tconn->last_received = jiffies;
1151 static void drbd_flush(struct drbd_tconn *tconn)
1154 struct drbd_conf *mdev;
1157 if (tconn->write_ordering >= WO_bdev_flush) {
1159 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1160 if (!get_ldev(mdev))
1162 kref_get(&mdev->kref);
1165 rv = blkdev_issue_flush(mdev->ldev->backing_bdev,
1168 dev_info(DEV, "local disk flush failed with status %d\n", rv);
1169 /* would rather check on EOPNOTSUPP, but that is not reliable.
1170 * don't try again for ANY return value != 0
1171 * if (rv == -EOPNOTSUPP) */
1172 drbd_bump_write_ordering(tconn, WO_drain_io);
1175 kref_put(&mdev->kref, &drbd_minor_destroy);
1186 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
1187 * @mdev: DRBD device.
1188 * @epoch: Epoch object.
1191 static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
1192 struct drbd_epoch *epoch,
1193 enum epoch_event ev)
1196 struct drbd_epoch *next_epoch;
1197 enum finish_epoch rv = FE_STILL_LIVE;
1199 spin_lock(&tconn->epoch_lock);
1203 epoch_size = atomic_read(&epoch->epoch_size);
1205 switch (ev & ~EV_CLEANUP) {
1207 atomic_dec(&epoch->active);
1209 case EV_GOT_BARRIER_NR:
1210 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1212 case EV_BECAME_LAST:
1217 if (epoch_size != 0 &&
1218 atomic_read(&epoch->active) == 0 &&
1219 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1220 if (!(ev & EV_CLEANUP)) {
1221 spin_unlock(&tconn->epoch_lock);
1222 drbd_send_b_ack(epoch->tconn, epoch->barrier_nr, epoch_size);
1223 spin_lock(&tconn->epoch_lock);
1226 /* FIXME: dec unacked on connection, once we have
1227 * something to count pending connection packets in. */
1228 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1229 dec_unacked(epoch->tconn);
1232 if (tconn->current_epoch != epoch) {
1233 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1234 list_del(&epoch->list);
1235 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1239 if (rv == FE_STILL_LIVE)
1243 atomic_set(&epoch->epoch_size, 0);
1244 /* atomic_set(&epoch->active, 0); is already zero */
1245 if (rv == FE_STILL_LIVE)
1256 spin_unlock(&tconn->epoch_lock);
1262 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1263 * @tconn: DRBD connection.
1264 * @wo: Write ordering method to try.
1266 void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo)
1268 struct disk_conf *dc;
1269 struct drbd_conf *mdev;
1270 enum write_ordering_e pwo;
1272 static char *write_ordering_str[] = {
1274 [WO_drain_io] = "drain",
1275 [WO_bdev_flush] = "flush",
1278 pwo = tconn->write_ordering;
1281 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1282 if (!get_ldev_if_state(mdev, D_ATTACHING))
1284 dc = rcu_dereference(mdev->ldev->disk_conf);
1286 if (wo == WO_bdev_flush && !dc->disk_flushes)
1288 if (wo == WO_drain_io && !dc->disk_drain)
1293 tconn->write_ordering = wo;
1294 if (pwo != tconn->write_ordering || wo == WO_bdev_flush)
1295 conn_info(tconn, "Method to ensure write ordering: %s\n", write_ordering_str[tconn->write_ordering]);
1299 * drbd_submit_peer_request()
1300 * @mdev: DRBD device.
1301 * @peer_req: peer request
1302 * @rw: flag field, see bio->bi_rw
1304 * May spread the pages to multiple bios,
1305 * depending on bio_add_page restrictions.
1307 * Returns 0 if all bios have been submitted,
1308 * -ENOMEM if we could not allocate enough bios,
1309 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1310 * single page to an empty bio (which should never happen and likely indicates
1311 * that the lower level IO stack is in some way broken). This has been observed
1312 * on certain Xen deployments.
1314 /* TODO allocate from our own bio_set. */
1315 int drbd_submit_peer_request(struct drbd_conf *mdev,
1316 struct drbd_peer_request *peer_req,
1317 const unsigned rw, const int fault_type)
1319 struct bio *bios = NULL;
1321 struct page *page = peer_req->pages;
1322 sector_t sector = peer_req->i.sector;
1323 unsigned ds = peer_req->i.size;
1324 unsigned n_bios = 0;
1325 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1328 /* In most cases, we will only need one bio. But in case the lower
1329 * level restrictions happen to be different at this offset on this
1330 * side than those of the sending peer, we may need to submit the
1331 * request in more than one bio.
1333 * Plain bio_alloc is good enough here, this is no DRBD internally
1334 * generated bio, but a bio allocated on behalf of the peer.
1337 bio = bio_alloc(GFP_NOIO, nr_pages);
1339 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1342 /* > peer_req->i.sector, unless this is the first bio */
1343 bio->bi_sector = sector;
1344 bio->bi_bdev = mdev->ldev->backing_bdev;
1346 bio->bi_private = peer_req;
1347 bio->bi_end_io = drbd_peer_request_endio;
1349 bio->bi_next = bios;
1353 page_chain_for_each(page) {
1354 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1355 if (!bio_add_page(bio, page, len, 0)) {
1356 /* A single page must always be possible!
1357 * But in case it fails anyways,
1358 * we deal with it, and complain (below). */
1359 if (bio->bi_vcnt == 0) {
1361 "bio_add_page failed for len=%u, "
1362 "bi_vcnt=0 (bi_sector=%llu)\n",
1363 len, (unsigned long long)bio->bi_sector);
1373 D_ASSERT(page == NULL);
1376 atomic_set(&peer_req->pending_bios, n_bios);
1379 bios = bios->bi_next;
1380 bio->bi_next = NULL;
1382 drbd_generic_make_request(mdev, fault_type, bio);
1389 bios = bios->bi_next;
1395 static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
1396 struct drbd_peer_request *peer_req)
1398 struct drbd_interval *i = &peer_req->i;
1400 drbd_remove_interval(&mdev->write_requests, i);
1401 drbd_clear_interval(i);
1403 /* Wake up any processes waiting for this peer request to complete. */
1405 wake_up(&mdev->misc_wait);
1408 void conn_wait_active_ee_empty(struct drbd_tconn *tconn)
1410 struct drbd_conf *mdev;
1414 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1415 kref_get(&mdev->kref);
1417 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1418 kref_put(&mdev->kref, &drbd_minor_destroy);
1424 static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
1427 struct p_barrier *p = pi->data;
1428 struct drbd_epoch *epoch;
1430 /* FIXME these are unacked on connection,
1431 * not a specific (peer)device.
1433 tconn->current_epoch->barrier_nr = p->barrier;
1434 tconn->current_epoch->tconn = tconn;
1435 rv = drbd_may_finish_epoch(tconn, tconn->current_epoch, EV_GOT_BARRIER_NR);
1437 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1438 * the activity log, which means it would not be resynced in case the
1439 * R_PRIMARY crashes now.
1440 * Therefore we must send the barrier_ack after the barrier request was
1442 switch (tconn->write_ordering) {
1444 if (rv == FE_RECYCLED)
1447 /* receiver context, in the writeout path of the other node.
1448 * avoid potential distributed deadlock */
1449 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1453 conn_warn(tconn, "Allocation of an epoch failed, slowing down\n");
1458 conn_wait_active_ee_empty(tconn);
1461 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1462 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1469 conn_err(tconn, "Strangeness in tconn->write_ordering %d\n", tconn->write_ordering);
1474 atomic_set(&epoch->epoch_size, 0);
1475 atomic_set(&epoch->active, 0);
1477 spin_lock(&tconn->epoch_lock);
1478 if (atomic_read(&tconn->current_epoch->epoch_size)) {
1479 list_add(&epoch->list, &tconn->current_epoch->list);
1480 tconn->current_epoch = epoch;
1483 /* The current_epoch got recycled while we allocated this one... */
1486 spin_unlock(&tconn->epoch_lock);
1491 /* used from receive_RSDataReply (recv_resync_read)
1492 * and from receive_Data */
1493 static struct drbd_peer_request *
1494 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
1495 int data_size) __must_hold(local)
1497 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1498 struct drbd_peer_request *peer_req;
1501 void *dig_in = mdev->tconn->int_dig_in;
1502 void *dig_vv = mdev->tconn->int_dig_vv;
1503 unsigned long *data;
1506 if (mdev->tconn->peer_integrity_tfm) {
1507 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1509 * FIXME: Receive the incoming digest into the receive buffer
1510 * here, together with its struct p_data?
1512 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1518 if (!expect(IS_ALIGNED(data_size, 512)))
1520 if (!expect(data_size <= DRBD_MAX_BIO_SIZE))
1523 /* even though we trust out peer,
1524 * we sometimes have to double check. */
1525 if (sector + (data_size>>9) > capacity) {
1526 dev_err(DEV, "request from peer beyond end of local disk: "
1527 "capacity: %llus < sector: %llus + size: %u\n",
1528 (unsigned long long)capacity,
1529 (unsigned long long)sector, data_size);
1533 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1534 * "criss-cross" setup, that might cause write-out on some other DRBD,
1535 * which in turn might block on the other node at this very place. */
1536 peer_req = drbd_alloc_peer_req(mdev, id, sector, data_size, GFP_NOIO);
1544 page = peer_req->pages;
1545 page_chain_for_each(page) {
1546 unsigned len = min_t(int, ds, PAGE_SIZE);
1548 err = drbd_recv_all_warn(mdev->tconn, data, len);
1549 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1550 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1551 data[0] = data[0] ^ (unsigned long)-1;
1555 drbd_free_peer_req(mdev, peer_req);
1562 drbd_csum_ee(mdev, mdev->tconn->peer_integrity_tfm, peer_req, dig_vv);
1563 if (memcmp(dig_in, dig_vv, dgs)) {
1564 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1565 (unsigned long long)sector, data_size);
1566 drbd_free_peer_req(mdev, peer_req);
1570 mdev->recv_cnt += data_size>>9;
1574 /* drbd_drain_block() just takes a data block
1575 * out of the socket input buffer, and discards it.
1577 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1586 page = drbd_alloc_pages(mdev, 1, 1);
1590 unsigned int len = min_t(int, data_size, PAGE_SIZE);
1592 err = drbd_recv_all_warn(mdev->tconn, data, len);
1598 drbd_free_pages(mdev, page, 0);
1602 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1603 sector_t sector, int data_size)
1605 struct bio_vec *bvec;
1607 int dgs, err, i, expect;
1608 void *dig_in = mdev->tconn->int_dig_in;
1609 void *dig_vv = mdev->tconn->int_dig_vv;
1612 if (mdev->tconn->peer_integrity_tfm) {
1613 dgs = crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1614 err = drbd_recv_all_warn(mdev->tconn, dig_in, dgs);
1620 /* optimistically update recv_cnt. if receiving fails below,
1621 * we disconnect anyways, and counters will be reset. */
1622 mdev->recv_cnt += data_size>>9;
1624 bio = req->master_bio;
1625 D_ASSERT(sector == bio->bi_sector);
1627 bio_for_each_segment(bvec, bio, i) {
1628 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
1629 expect = min_t(int, data_size, bvec->bv_len);
1630 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1631 kunmap(bvec->bv_page);
1634 data_size -= expect;
1638 drbd_csum_bio(mdev, mdev->tconn->peer_integrity_tfm, bio, dig_vv);
1639 if (memcmp(dig_in, dig_vv, dgs)) {
1640 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1645 D_ASSERT(data_size == 0);
1650 * e_end_resync_block() is called in asender context via
1651 * drbd_finish_peer_reqs().
1653 static int e_end_resync_block(struct drbd_work *w, int unused)
1655 struct drbd_peer_request *peer_req =
1656 container_of(w, struct drbd_peer_request, w);
1657 struct drbd_conf *mdev = w->mdev;
1658 sector_t sector = peer_req->i.sector;
1661 D_ASSERT(drbd_interval_empty(&peer_req->i));
1663 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1664 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1665 err = drbd_send_ack(mdev, P_RS_WRITE_ACK, peer_req);
1667 /* Record failure to sync */
1668 drbd_rs_failed_io(mdev, sector, peer_req->i.size);
1670 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1677 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1679 struct drbd_peer_request *peer_req;
1681 peer_req = read_in_block(mdev, ID_SYNCER, sector, data_size);
1685 dec_rs_pending(mdev);
1688 /* corresponding dec_unacked() in e_end_resync_block()
1689 * respective _drbd_clear_done_ee */
1691 peer_req->w.cb = e_end_resync_block;
1693 spin_lock_irq(&mdev->tconn->req_lock);
1694 list_add(&peer_req->w.list, &mdev->sync_ee);
1695 spin_unlock_irq(&mdev->tconn->req_lock);
1697 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1698 if (drbd_submit_peer_request(mdev, peer_req, WRITE, DRBD_FAULT_RS_WR) == 0)
1701 /* don't care for the reason here */
1702 dev_err(DEV, "submit failed, triggering re-connect\n");
1703 spin_lock_irq(&mdev->tconn->req_lock);
1704 list_del(&peer_req->w.list);
1705 spin_unlock_irq(&mdev->tconn->req_lock);
1707 drbd_free_peer_req(mdev, peer_req);
1713 static struct drbd_request *
1714 find_request(struct drbd_conf *mdev, struct rb_root *root, u64 id,
1715 sector_t sector, bool missing_ok, const char *func)
1717 struct drbd_request *req;
1719 /* Request object according to our peer */
1720 req = (struct drbd_request *)(unsigned long)id;
1721 if (drbd_contains_interval(root, sector, &req->i) && req->i.local)
1724 dev_err(DEV, "%s: failed to find request 0x%lx, sector %llus\n", func,
1725 (unsigned long)id, (unsigned long long)sector);
1730 static int receive_DataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1732 struct drbd_conf *mdev;
1733 struct drbd_request *req;
1736 struct p_data *p = pi->data;
1738 mdev = vnr_to_mdev(tconn, pi->vnr);
1742 sector = be64_to_cpu(p->sector);
1744 spin_lock_irq(&mdev->tconn->req_lock);
1745 req = find_request(mdev, &mdev->read_requests, p->block_id, sector, false, __func__);
1746 spin_unlock_irq(&mdev->tconn->req_lock);
1750 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1751 * special casing it there for the various failure cases.
1752 * still no race with drbd_fail_pending_reads */
1753 err = recv_dless_read(mdev, req, sector, pi->size);
1755 req_mod(req, DATA_RECEIVED);
1756 /* else: nothing. handled from drbd_disconnect...
1757 * I don't think we may complete this just yet
1758 * in case we are "on-disconnect: freeze" */
1763 static int receive_RSDataReply(struct drbd_tconn *tconn, struct packet_info *pi)
1765 struct drbd_conf *mdev;
1768 struct p_data *p = pi->data;
1770 mdev = vnr_to_mdev(tconn, pi->vnr);
1774 sector = be64_to_cpu(p->sector);
1775 D_ASSERT(p->block_id == ID_SYNCER);
1777 if (get_ldev(mdev)) {
1778 /* data is submitted to disk within recv_resync_read.
1779 * corresponding put_ldev done below on error,
1780 * or in drbd_peer_request_endio. */
1781 err = recv_resync_read(mdev, sector, pi->size);
1783 if (__ratelimit(&drbd_ratelimit_state))
1784 dev_err(DEV, "Can not write resync data to local disk.\n");
1786 err = drbd_drain_block(mdev, pi->size);
1788 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
1791 atomic_add(pi->size >> 9, &mdev->rs_sect_in);
1796 static void restart_conflicting_writes(struct drbd_conf *mdev,
1797 sector_t sector, int size)
1799 struct drbd_interval *i;
1800 struct drbd_request *req;
1802 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
1805 req = container_of(i, struct drbd_request, i);
1806 if (req->rq_state & RQ_LOCAL_PENDING ||
1807 !(req->rq_state & RQ_POSTPONED))
1809 /* as it is RQ_POSTPONED, this will cause it to
1810 * be queued on the retry workqueue. */
1811 __req_mod(req, CONFLICT_RESOLVED, NULL);
1816 * e_end_block() is called in asender context via drbd_finish_peer_reqs().
1818 static int e_end_block(struct drbd_work *w, int cancel)
1820 struct drbd_peer_request *peer_req =
1821 container_of(w, struct drbd_peer_request, w);
1822 struct drbd_conf *mdev = w->mdev;
1823 sector_t sector = peer_req->i.sector;
1826 if (peer_req->flags & EE_SEND_WRITE_ACK) {
1827 if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
1828 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1829 mdev->state.conn <= C_PAUSED_SYNC_T &&
1830 peer_req->flags & EE_MAY_SET_IN_SYNC) ?
1831 P_RS_WRITE_ACK : P_WRITE_ACK;
1832 err = drbd_send_ack(mdev, pcmd, peer_req);
1833 if (pcmd == P_RS_WRITE_ACK)
1834 drbd_set_in_sync(mdev, sector, peer_req->i.size);
1836 err = drbd_send_ack(mdev, P_NEG_ACK, peer_req);
1837 /* we expect it to be marked out of sync anyways...
1838 * maybe assert this? */
1842 /* we delete from the conflict detection hash _after_ we sent out the
1843 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1844 if (peer_req->flags & EE_IN_INTERVAL_TREE) {
1845 spin_lock_irq(&mdev->tconn->req_lock);
1846 D_ASSERT(!drbd_interval_empty(&peer_req->i));
1847 drbd_remove_epoch_entry_interval(mdev, peer_req);
1848 if (peer_req->flags & EE_RESTART_REQUESTS)
1849 restart_conflicting_writes(mdev, sector, peer_req->i.size);
1850 spin_unlock_irq(&mdev->tconn->req_lock);
1852 D_ASSERT(drbd_interval_empty(&peer_req->i));
1854 drbd_may_finish_epoch(mdev->tconn, peer_req->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1859 static int e_send_ack(struct drbd_work *w, enum drbd_packet ack)
1861 struct drbd_conf *mdev = w->mdev;
1862 struct drbd_peer_request *peer_req =
1863 container_of(w, struct drbd_peer_request, w);
1866 err = drbd_send_ack(mdev, ack, peer_req);
1872 static int e_send_superseded(struct drbd_work *w, int unused)
1874 return e_send_ack(w, P_SUPERSEDED);
1877 static int e_send_retry_write(struct drbd_work *w, int unused)
1879 struct drbd_tconn *tconn = w->mdev->tconn;
1881 return e_send_ack(w, tconn->agreed_pro_version >= 100 ?
1882 P_RETRY_WRITE : P_SUPERSEDED);
1885 static bool seq_greater(u32 a, u32 b)
1888 * We assume 32-bit wrap-around here.
1889 * For 24-bit wrap-around, we would have to shift:
1892 return (s32)a - (s32)b > 0;
1895 static u32 seq_max(u32 a, u32 b)
1897 return seq_greater(a, b) ? a : b;
1900 static bool need_peer_seq(struct drbd_conf *mdev)
1902 struct drbd_tconn *tconn = mdev->tconn;
1906 * We only need to keep track of the last packet_seq number of our peer
1907 * if we are in dual-primary mode and we have the resolve-conflicts flag set; see
1908 * handle_write_conflicts().
1912 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
1915 return tp && test_bit(RESOLVE_CONFLICTS, &tconn->flags);
1918 static void update_peer_seq(struct drbd_conf *mdev, unsigned int peer_seq)
1920 unsigned int newest_peer_seq;
1922 if (need_peer_seq(mdev)) {
1923 spin_lock(&mdev->peer_seq_lock);
1924 newest_peer_seq = seq_max(mdev->peer_seq, peer_seq);
1925 mdev->peer_seq = newest_peer_seq;
1926 spin_unlock(&mdev->peer_seq_lock);
1927 /* wake up only if we actually changed mdev->peer_seq */
1928 if (peer_seq == newest_peer_seq)
1929 wake_up(&mdev->seq_wait);
1933 static inline int overlaps(sector_t s1, int l1, sector_t s2, int l2)
1935 return !((s1 + (l1>>9) <= s2) || (s1 >= s2 + (l2>>9)));
1938 /* maybe change sync_ee into interval trees as well? */
1939 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_peer_request *peer_req)
1941 struct drbd_peer_request *rs_req;
1944 spin_lock_irq(&mdev->tconn->req_lock);
1945 list_for_each_entry(rs_req, &mdev->sync_ee, w.list) {
1946 if (overlaps(peer_req->i.sector, peer_req->i.size,
1947 rs_req->i.sector, rs_req->i.size)) {
1952 spin_unlock_irq(&mdev->tconn->req_lock);
1957 /* Called from receive_Data.
1958 * Synchronize packets on sock with packets on msock.
1960 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1961 * packet traveling on msock, they are still processed in the order they have
1964 * Note: we don't care for Ack packets overtaking P_DATA packets.
1966 * In case packet_seq is larger than mdev->peer_seq number, there are
1967 * outstanding packets on the msock. We wait for them to arrive.
1968 * In case we are the logically next packet, we update mdev->peer_seq
1969 * ourselves. Correctly handles 32bit wrap around.
1971 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1972 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1973 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1974 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1976 * returns 0 if we may process the packet,
1977 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1978 static int wait_for_and_update_peer_seq(struct drbd_conf *mdev, const u32 peer_seq)
1984 if (!need_peer_seq(mdev))
1987 spin_lock(&mdev->peer_seq_lock);
1989 if (!seq_greater(peer_seq - 1, mdev->peer_seq)) {
1990 mdev->peer_seq = seq_max(mdev->peer_seq, peer_seq);
1994 if (signal_pending(current)) {
1998 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1999 spin_unlock(&mdev->peer_seq_lock);
2001 timeout = rcu_dereference(mdev->tconn->net_conf)->ping_timeo*HZ/10;
2003 timeout = schedule_timeout(timeout);
2004 spin_lock(&mdev->peer_seq_lock);
2007 dev_err(DEV, "Timed out waiting for missing ack packets; disconnecting\n");
2011 spin_unlock(&mdev->peer_seq_lock);
2012 finish_wait(&mdev->seq_wait, &wait);
2016 /* see also bio_flags_to_wire()
2017 * DRBD_REQ_*, because we need to semantically map the flags to data packet
2018 * flags and back. We may replicate to other kernel versions. */
2019 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
2021 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
2022 (dpf & DP_FUA ? REQ_FUA : 0) |
2023 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
2024 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
2027 static void fail_postponed_requests(struct drbd_conf *mdev, sector_t sector,
2030 struct drbd_interval *i;
2033 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2034 struct drbd_request *req;
2035 struct bio_and_error m;
2039 req = container_of(i, struct drbd_request, i);
2040 if (!(req->rq_state & RQ_POSTPONED))
2042 req->rq_state &= ~RQ_POSTPONED;
2043 __req_mod(req, NEG_ACKED, &m);
2044 spin_unlock_irq(&mdev->tconn->req_lock);
2046 complete_master_bio(mdev, &m);
2047 spin_lock_irq(&mdev->tconn->req_lock);
2052 static int handle_write_conflicts(struct drbd_conf *mdev,
2053 struct drbd_peer_request *peer_req)
2055 struct drbd_tconn *tconn = mdev->tconn;
2056 bool resolve_conflicts = test_bit(RESOLVE_CONFLICTS, &tconn->flags);
2057 sector_t sector = peer_req->i.sector;
2058 const unsigned int size = peer_req->i.size;
2059 struct drbd_interval *i;
2064 * Inserting the peer request into the write_requests tree will prevent
2065 * new conflicting local requests from being added.
2067 drbd_insert_interval(&mdev->write_requests, &peer_req->i);
2070 drbd_for_each_overlap(i, &mdev->write_requests, sector, size) {
2071 if (i == &peer_req->i)
2076 * Our peer has sent a conflicting remote request; this
2077 * should not happen in a two-node setup. Wait for the
2078 * earlier peer request to complete.
2080 err = drbd_wait_misc(mdev, i);
2086 equal = i->sector == sector && i->size == size;
2087 if (resolve_conflicts) {
2089 * If the peer request is fully contained within the
2090 * overlapping request, it can be considered overwritten
2091 * and thus superseded; otherwise, it will be retried
2092 * once all overlapping requests have completed.
2094 bool superseded = i->sector <= sector && i->sector +
2095 (i->size >> 9) >= sector + (size >> 9);
2098 dev_alert(DEV, "Concurrent writes detected: "
2099 "local=%llus +%u, remote=%llus +%u, "
2100 "assuming %s came first\n",
2101 (unsigned long long)i->sector, i->size,
2102 (unsigned long long)sector, size,
2103 superseded ? "local" : "remote");
2106 peer_req->w.cb = superseded ? e_send_superseded :
2108 list_add_tail(&peer_req->w.list, &mdev->done_ee);
2109 wake_asender(mdev->tconn);
2114 struct drbd_request *req =
2115 container_of(i, struct drbd_request, i);
2118 dev_alert(DEV, "Concurrent writes detected: "
2119 "local=%llus +%u, remote=%llus +%u\n",
2120 (unsigned long long)i->sector, i->size,
2121 (unsigned long long)sector, size);
2123 if (req->rq_state & RQ_LOCAL_PENDING ||
2124 !(req->rq_state & RQ_POSTPONED)) {
2126 * Wait for the node with the discard flag to
2127 * decide if this request has been superseded
2128 * or needs to be retried.
2129 * Requests that have been superseded will
2130 * disappear from the write_requests tree.
2132 * In addition, wait for the conflicting
2133 * request to finish locally before submitting
2134 * the conflicting peer request.
2136 err = drbd_wait_misc(mdev, &req->i);
2138 _conn_request_state(mdev->tconn,
2139 NS(conn, C_TIMEOUT),
2141 fail_postponed_requests(mdev, sector, size);
2147 * Remember to restart the conflicting requests after
2148 * the new peer request has completed.
2150 peer_req->flags |= EE_RESTART_REQUESTS;
2157 drbd_remove_epoch_entry_interval(mdev, peer_req);
2161 /* mirrored write */
2162 static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
2164 struct drbd_conf *mdev;
2166 struct drbd_peer_request *peer_req;
2167 struct p_data *p = pi->data;
2168 u32 peer_seq = be32_to_cpu(p->seq_num);
2173 mdev = vnr_to_mdev(tconn, pi->vnr);
2177 if (!get_ldev(mdev)) {
2180 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2181 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
2182 atomic_inc(&tconn->current_epoch->epoch_size);
2183 err2 = drbd_drain_block(mdev, pi->size);
2190 * Corresponding put_ldev done either below (on various errors), or in
2191 * drbd_peer_request_endio, if we successfully submit the data at the
2192 * end of this function.
2195 sector = be64_to_cpu(p->sector);
2196 peer_req = read_in_block(mdev, p->block_id, sector, pi->size);
2202 peer_req->w.cb = e_end_block;
2204 dp_flags = be32_to_cpu(p->dp_flags);
2205 rw |= wire_flags_to_bio(mdev, dp_flags);
2206 if (peer_req->pages == NULL) {
2207 D_ASSERT(peer_req->i.size == 0);
2208 D_ASSERT(dp_flags & DP_FLUSH);
2211 if (dp_flags & DP_MAY_SET_IN_SYNC)
2212 peer_req->flags |= EE_MAY_SET_IN_SYNC;
2214 spin_lock(&tconn->epoch_lock);
2215 peer_req->epoch = tconn->current_epoch;
2216 atomic_inc(&peer_req->epoch->epoch_size);
2217 atomic_inc(&peer_req->epoch->active);
2218 spin_unlock(&tconn->epoch_lock);
2221 tp = rcu_dereference(mdev->tconn->net_conf)->two_primaries;
2224 peer_req->flags |= EE_IN_INTERVAL_TREE;
2225 err = wait_for_and_update_peer_seq(mdev, peer_seq);
2227 goto out_interrupted;
2228 spin_lock_irq(&mdev->tconn->req_lock);
2229 err = handle_write_conflicts(mdev, peer_req);
2231 spin_unlock_irq(&mdev->tconn->req_lock);
2232 if (err == -ENOENT) {
2236 goto out_interrupted;
2239 spin_lock_irq(&mdev->tconn->req_lock);
2240 list_add(&peer_req->w.list, &mdev->active_ee);
2241 spin_unlock_irq(&mdev->tconn->req_lock);
2243 if (mdev->state.conn == C_SYNC_TARGET)
2244 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, peer_req));
2246 if (mdev->tconn->agreed_pro_version < 100) {
2248 switch (rcu_dereference(mdev->tconn->net_conf)->wire_protocol) {
2250 dp_flags |= DP_SEND_WRITE_ACK;
2253 dp_flags |= DP_SEND_RECEIVE_ACK;
2259 if (dp_flags & DP_SEND_WRITE_ACK) {
2260 peer_req->flags |= EE_SEND_WRITE_ACK;
2262 /* corresponding dec_unacked() in e_end_block()
2263 * respective _drbd_clear_done_ee */
2266 if (dp_flags & DP_SEND_RECEIVE_ACK) {
2267 /* I really don't like it that the receiver thread
2268 * sends on the msock, but anyways */
2269 drbd_send_ack(mdev, P_RECV_ACK, peer_req);
2272 if (mdev->state.pdsk < D_INCONSISTENT) {
2273 /* In case we have the only disk of the cluster, */
2274 drbd_set_out_of_sync(mdev, peer_req->i.sector, peer_req->i.size);
2275 peer_req->flags |= EE_CALL_AL_COMPLETE_IO;
2276 peer_req->flags &= ~EE_MAY_SET_IN_SYNC;
2277 drbd_al_begin_io(mdev, &peer_req->i);
2280 err = drbd_submit_peer_request(mdev, peer_req, rw, DRBD_FAULT_DT_WR);
2284 /* don't care for the reason here */
2285 dev_err(DEV, "submit failed, triggering re-connect\n");
2286 spin_lock_irq(&mdev->tconn->req_lock);
2287 list_del(&peer_req->w.list);
2288 drbd_remove_epoch_entry_interval(mdev, peer_req);
2289 spin_unlock_irq(&mdev->tconn->req_lock);
2290 if (peer_req->flags & EE_CALL_AL_COMPLETE_IO)
2291 drbd_al_complete_io(mdev, &peer_req->i);
2294 drbd_may_finish_epoch(tconn, peer_req->epoch, EV_PUT + EV_CLEANUP);
2296 drbd_free_peer_req(mdev, peer_req);
2300 /* We may throttle resync, if the lower device seems to be busy,
2301 * and current sync rate is above c_min_rate.
2303 * To decide whether or not the lower device is busy, we use a scheme similar
2304 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
2305 * (more than 64 sectors) of activity we cannot account for with our own resync
2306 * activity, it obviously is "busy".
2308 * The current sync rate used here uses only the most recent two step marks,
2309 * to have a short time average so we can react faster.
2311 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
2313 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
2314 unsigned long db, dt, dbdt;
2315 struct lc_element *tmp;
2318 unsigned int c_min_rate;
2321 c_min_rate = rcu_dereference(mdev->ldev->disk_conf)->c_min_rate;
2324 /* feature disabled? */
2325 if (c_min_rate == 0)
2328 spin_lock_irq(&mdev->al_lock);
2329 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
2331 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
2332 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
2333 spin_unlock_irq(&mdev->al_lock);
2336 /* Do not slow down if app IO is already waiting for this extent */
2338 spin_unlock_irq(&mdev->al_lock);
2340 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
2341 (int)part_stat_read(&disk->part0, sectors[1]) -
2342 atomic_read(&mdev->rs_sect_ev);
2344 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
2345 unsigned long rs_left;
2348 mdev->rs_last_events = curr_events;
2350 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
2352 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
2354 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
2355 rs_left = mdev->ov_left;
2357 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
2359 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
2362 db = mdev->rs_mark_left[i] - rs_left;
2363 dbdt = Bit2KB(db/dt);
2365 if (dbdt > c_min_rate)
2372 static int receive_DataRequest(struct drbd_tconn *tconn, struct packet_info *pi)
2374 struct drbd_conf *mdev;
2377 struct drbd_peer_request *peer_req;
2378 struct digest_info *di = NULL;
2380 unsigned int fault_type;
2381 struct p_block_req *p = pi->data;
2383 mdev = vnr_to_mdev(tconn, pi->vnr);
2386 capacity = drbd_get_capacity(mdev->this_bdev);
2388 sector = be64_to_cpu(p->sector);
2389 size = be32_to_cpu(p->blksize);
2391 if (size <= 0 || !IS_ALIGNED(size, 512) || size > DRBD_MAX_BIO_SIZE) {
2392 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2393 (unsigned long long)sector, size);
2396 if (sector + (size>>9) > capacity) {
2397 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
2398 (unsigned long long)sector, size);
2402 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2405 case P_DATA_REQUEST:
2406 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2408 case P_RS_DATA_REQUEST:
2409 case P_CSUM_RS_REQUEST:
2411 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2415 dec_rs_pending(mdev);
2416 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2421 if (verb && __ratelimit(&drbd_ratelimit_state))
2422 dev_err(DEV, "Can not satisfy peer's read request, "
2423 "no local data.\n");
2425 /* drain possibly payload */
2426 return drbd_drain_block(mdev, pi->size);
2429 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2430 * "criss-cross" setup, that might cause write-out on some other DRBD,
2431 * which in turn might block on the other node at this very place. */
2432 peer_req = drbd_alloc_peer_req(mdev, p->block_id, sector, size, GFP_NOIO);
2439 case P_DATA_REQUEST:
2440 peer_req->w.cb = w_e_end_data_req;
2441 fault_type = DRBD_FAULT_DT_RD;
2442 /* application IO, don't drbd_rs_begin_io */
2445 case P_RS_DATA_REQUEST:
2446 peer_req->w.cb = w_e_end_rsdata_req;
2447 fault_type = DRBD_FAULT_RS_RD;
2448 /* used in the sector offset progress display */
2449 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2453 case P_CSUM_RS_REQUEST:
2454 fault_type = DRBD_FAULT_RS_RD;
2455 di = kmalloc(sizeof(*di) + pi->size, GFP_NOIO);
2459 di->digest_size = pi->size;
2460 di->digest = (((char *)di)+sizeof(struct digest_info));
2462 peer_req->digest = di;
2463 peer_req->flags |= EE_HAS_DIGEST;
2465 if (drbd_recv_all(mdev->tconn, di->digest, pi->size))
2468 if (pi->cmd == P_CSUM_RS_REQUEST) {
2469 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
2470 peer_req->w.cb = w_e_end_csum_rs_req;
2471 /* used in the sector offset progress display */
2472 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2473 } else if (pi->cmd == P_OV_REPLY) {
2474 /* track progress, we may need to throttle */
2475 atomic_add(size >> 9, &mdev->rs_sect_in);
2476 peer_req->w.cb = w_e_end_ov_reply;
2477 dec_rs_pending(mdev);
2478 /* drbd_rs_begin_io done when we sent this request,
2479 * but accounting still needs to be done. */
2480 goto submit_for_resync;
2485 if (mdev->ov_start_sector == ~(sector_t)0 &&
2486 mdev->tconn->agreed_pro_version >= 90) {
2487 unsigned long now = jiffies;
2489 mdev->ov_start_sector = sector;
2490 mdev->ov_position = sector;
2491 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2492 mdev->rs_total = mdev->ov_left;
2493 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2494 mdev->rs_mark_left[i] = mdev->ov_left;
2495 mdev->rs_mark_time[i] = now;
2497 dev_info(DEV, "Online Verify start sector: %llu\n",
2498 (unsigned long long)sector);
2500 peer_req->w.cb = w_e_end_ov_req;
2501 fault_type = DRBD_FAULT_RS_RD;
2508 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2509 * wrt the receiver, but it is not as straightforward as it may seem.
2510 * Various places in the resync start and stop logic assume resync
2511 * requests are processed in order, requeuing this on the worker thread
2512 * introduces a bunch of new code for synchronization between threads.
2514 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2515 * "forever", throttling after drbd_rs_begin_io will lock that extent
2516 * for application writes for the same time. For now, just throttle
2517 * here, where the rest of the code expects the receiver to sleep for
2521 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2522 * this defers syncer requests for some time, before letting at least
2523 * on request through. The resync controller on the receiving side
2524 * will adapt to the incoming rate accordingly.
2526 * We cannot throttle here if remote is Primary/SyncTarget:
2527 * we would also throttle its application reads.
2528 * In that case, throttling is done on the SyncTarget only.
2530 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2531 schedule_timeout_uninterruptible(HZ/10);
2532 if (drbd_rs_begin_io(mdev, sector))
2536 atomic_add(size >> 9, &mdev->rs_sect_ev);
2540 spin_lock_irq(&mdev->tconn->req_lock);
2541 list_add_tail(&peer_req->w.list, &mdev->read_ee);
2542 spin_unlock_irq(&mdev->tconn->req_lock);
2544 if (drbd_submit_peer_request(mdev, peer_req, READ, fault_type) == 0)
2547 /* don't care for the reason here */
2548 dev_err(DEV, "submit failed, triggering re-connect\n");
2549 spin_lock_irq(&mdev->tconn->req_lock);
2550 list_del(&peer_req->w.list);
2551 spin_unlock_irq(&mdev->tconn->req_lock);
2552 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2556 drbd_free_peer_req(mdev, peer_req);
2560 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2562 int self, peer, rv = -100;
2563 unsigned long ch_self, ch_peer;
2564 enum drbd_after_sb_p after_sb_0p;
2566 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2567 peer = mdev->p_uuid[UI_BITMAP] & 1;
2569 ch_peer = mdev->p_uuid[UI_SIZE];
2570 ch_self = mdev->comm_bm_set;
2573 after_sb_0p = rcu_dereference(mdev->tconn->net_conf)->after_sb_0p;
2575 switch (after_sb_0p) {
2577 case ASB_DISCARD_SECONDARY:
2578 case ASB_CALL_HELPER:
2580 dev_err(DEV, "Configuration error.\n");
2582 case ASB_DISCONNECT:
2584 case ASB_DISCARD_YOUNGER_PRI:
2585 if (self == 0 && peer == 1) {
2589 if (self == 1 && peer == 0) {
2593 /* Else fall through to one of the other strategies... */
2594 case ASB_DISCARD_OLDER_PRI:
2595 if (self == 0 && peer == 1) {
2599 if (self == 1 && peer == 0) {
2603 /* Else fall through to one of the other strategies... */
2604 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2605 "Using discard-least-changes instead\n");
2606 case ASB_DISCARD_ZERO_CHG:
2607 if (ch_peer == 0 && ch_self == 0) {
2608 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2612 if (ch_peer == 0) { rv = 1; break; }
2613 if (ch_self == 0) { rv = -1; break; }
2615 if (after_sb_0p == ASB_DISCARD_ZERO_CHG)
2617 case ASB_DISCARD_LEAST_CHG:
2618 if (ch_self < ch_peer)
2620 else if (ch_self > ch_peer)
2622 else /* ( ch_self == ch_peer ) */
2623 /* Well, then use something else. */
2624 rv = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags)
2627 case ASB_DISCARD_LOCAL:
2630 case ASB_DISCARD_REMOTE:
2637 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2640 enum drbd_after_sb_p after_sb_1p;
2643 after_sb_1p = rcu_dereference(mdev->tconn->net_conf)->after_sb_1p;
2645 switch (after_sb_1p) {
2646 case ASB_DISCARD_YOUNGER_PRI:
2647 case ASB_DISCARD_OLDER_PRI:
2648 case ASB_DISCARD_LEAST_CHG:
2649 case ASB_DISCARD_LOCAL:
2650 case ASB_DISCARD_REMOTE:
2651 case ASB_DISCARD_ZERO_CHG:
2652 dev_err(DEV, "Configuration error.\n");
2654 case ASB_DISCONNECT:
2657 hg = drbd_asb_recover_0p(mdev);
2658 if (hg == -1 && mdev->state.role == R_SECONDARY)
2660 if (hg == 1 && mdev->state.role == R_PRIMARY)
2664 rv = drbd_asb_recover_0p(mdev);
2666 case ASB_DISCARD_SECONDARY:
2667 return mdev->state.role == R_PRIMARY ? 1 : -1;
2668 case ASB_CALL_HELPER:
2669 hg = drbd_asb_recover_0p(mdev);
2670 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2671 enum drbd_state_rv rv2;
2673 drbd_set_role(mdev, R_SECONDARY, 0);
2674 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2675 * we might be here in C_WF_REPORT_PARAMS which is transient.
2676 * we do not need to wait for the after state change work either. */
2677 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2678 if (rv2 != SS_SUCCESS) {
2679 drbd_khelper(mdev, "pri-lost-after-sb");
2681 dev_warn(DEV, "Successfully gave up primary role.\n");
2691 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2694 enum drbd_after_sb_p after_sb_2p;
2697 after_sb_2p = rcu_dereference(mdev->tconn->net_conf)->after_sb_2p;
2699 switch (after_sb_2p) {
2700 case ASB_DISCARD_YOUNGER_PRI:
2701 case ASB_DISCARD_OLDER_PRI:
2702 case ASB_DISCARD_LEAST_CHG:
2703 case ASB_DISCARD_LOCAL:
2704 case ASB_DISCARD_REMOTE:
2706 case ASB_DISCARD_SECONDARY:
2707 case ASB_DISCARD_ZERO_CHG:
2708 dev_err(DEV, "Configuration error.\n");
2711 rv = drbd_asb_recover_0p(mdev);
2713 case ASB_DISCONNECT:
2715 case ASB_CALL_HELPER:
2716 hg = drbd_asb_recover_0p(mdev);
2718 enum drbd_state_rv rv2;
2720 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2721 * we might be here in C_WF_REPORT_PARAMS which is transient.
2722 * we do not need to wait for the after state change work either. */
2723 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2724 if (rv2 != SS_SUCCESS) {
2725 drbd_khelper(mdev, "pri-lost-after-sb");
2727 dev_warn(DEV, "Successfully gave up primary role.\n");
2737 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2738 u64 bits, u64 flags)
2741 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2744 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2746 (unsigned long long)uuid[UI_CURRENT],
2747 (unsigned long long)uuid[UI_BITMAP],
2748 (unsigned long long)uuid[UI_HISTORY_START],
2749 (unsigned long long)uuid[UI_HISTORY_END],
2750 (unsigned long long)bits,
2751 (unsigned long long)flags);
2755 100 after split brain try auto recover
2756 2 C_SYNC_SOURCE set BitMap
2757 1 C_SYNC_SOURCE use BitMap
2759 -1 C_SYNC_TARGET use BitMap
2760 -2 C_SYNC_TARGET set BitMap
2761 -100 after split brain, disconnect
2762 -1000 unrelated data
2763 -1091 requires proto 91
2764 -1096 requires proto 96
2766 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2771 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2772 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2775 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2779 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2780 peer != UUID_JUST_CREATED)
2784 if (self != UUID_JUST_CREATED &&
2785 (peer == UUID_JUST_CREATED || peer == (u64)0))
2789 int rct, dc; /* roles at crash time */
2791 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2793 if (mdev->tconn->agreed_pro_version < 91)
2796 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2797 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2798 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2799 drbd_uuid_set_bm(mdev, 0UL);
2801 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2802 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2805 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2812 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2814 if (mdev->tconn->agreed_pro_version < 91)
2817 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2818 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2819 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2821 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2822 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2823 mdev->p_uuid[UI_BITMAP] = 0UL;
2825 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2828 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2835 /* Common power [off|failure] */
2836 rct = (test_bit(CRASHED_PRIMARY, &mdev->flags) ? 1 : 0) +
2837 (mdev->p_uuid[UI_FLAGS] & 2);
2838 /* lowest bit is set when we were primary,
2839 * next bit (weight 2) is set when peer was primary */
2843 case 0: /* !self_pri && !peer_pri */ return 0;
2844 case 1: /* self_pri && !peer_pri */ return 1;
2845 case 2: /* !self_pri && peer_pri */ return -1;
2846 case 3: /* self_pri && peer_pri */
2847 dc = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2853 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2858 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2860 if (mdev->tconn->agreed_pro_version < 96 ?
2861 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2862 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2863 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2864 /* The last P_SYNC_UUID did not get though. Undo the last start of
2865 resync as sync source modifications of the peer's UUIDs. */
2867 if (mdev->tconn->agreed_pro_version < 91)
2870 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2871 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2873 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2874 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2881 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2882 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2883 peer = mdev->p_uuid[i] & ~((u64)1);
2889 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2890 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2895 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2897 if (mdev->tconn->agreed_pro_version < 96 ?
2898 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2899 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2900 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2901 /* The last P_SYNC_UUID did not get though. Undo the last start of
2902 resync as sync source modifications of our UUIDs. */
2904 if (mdev->tconn->agreed_pro_version < 91)
2907 _drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2908 _drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2910 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2911 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2912 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2920 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2921 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2922 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2928 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2929 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2930 if (self == peer && self != ((u64)0))
2934 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2935 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2936 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2937 peer = mdev->p_uuid[j] & ~((u64)1);
2946 /* drbd_sync_handshake() returns the new conn state on success, or
2947 CONN_MASK (-1) on failure.
2949 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2950 enum drbd_disk_state peer_disk) __must_hold(local)
2952 enum drbd_conns rv = C_MASK;
2953 enum drbd_disk_state mydisk;
2954 struct net_conf *nc;
2955 int hg, rule_nr, rr_conflict, tentative;
2957 mydisk = mdev->state.disk;
2958 if (mydisk == D_NEGOTIATING)
2959 mydisk = mdev->new_state_tmp.disk;
2961 dev_info(DEV, "drbd_sync_handshake:\n");
2962 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2963 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2964 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2966 hg = drbd_uuid_compare(mdev, &rule_nr);
2968 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2971 dev_alert(DEV, "Unrelated data, aborting!\n");
2975 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2979 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2980 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2981 int f = (hg == -100) || abs(hg) == 2;
2982 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2985 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2986 hg > 0 ? "source" : "target");
2990 drbd_khelper(mdev, "initial-split-brain");
2993 nc = rcu_dereference(mdev->tconn->net_conf);
2995 if (hg == 100 || (hg == -100 && nc->always_asbp)) {
2996 int pcount = (mdev->state.role == R_PRIMARY)
2997 + (peer_role == R_PRIMARY);
2998 int forced = (hg == -100);
3002 hg = drbd_asb_recover_0p(mdev);
3005 hg = drbd_asb_recover_1p(mdev);
3008 hg = drbd_asb_recover_2p(mdev);
3011 if (abs(hg) < 100) {
3012 dev_warn(DEV, "Split-Brain detected, %d primaries, "
3013 "automatically solved. Sync from %s node\n",
3014 pcount, (hg < 0) ? "peer" : "this");
3016 dev_warn(DEV, "Doing a full sync, since"
3017 " UUIDs where ambiguous.\n");
3024 if (test_bit(DISCARD_MY_DATA, &mdev->flags) && !(mdev->p_uuid[UI_FLAGS]&1))
3026 if (!test_bit(DISCARD_MY_DATA, &mdev->flags) && (mdev->p_uuid[UI_FLAGS]&1))
3030 dev_warn(DEV, "Split-Brain detected, manually solved. "
3031 "Sync from %s node\n",
3032 (hg < 0) ? "peer" : "this");
3034 rr_conflict = nc->rr_conflict;
3035 tentative = nc->tentative;
3039 /* FIXME this log message is not correct if we end up here
3040 * after an attempted attach on a diskless node.
3041 * We just refuse to attach -- well, we drop the "connection"
3042 * to that disk, in a way... */
3043 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
3044 drbd_khelper(mdev, "split-brain");
3048 if (hg > 0 && mydisk <= D_INCONSISTENT) {
3049 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
3053 if (hg < 0 && /* by intention we do not use mydisk here. */
3054 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
3055 switch (rr_conflict) {
3056 case ASB_CALL_HELPER:
3057 drbd_khelper(mdev, "pri-lost");
3059 case ASB_DISCONNECT:
3060 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
3063 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
3068 if (tentative || test_bit(CONN_DRY_RUN, &mdev->tconn->flags)) {
3070 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
3072 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
3073 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
3074 abs(hg) >= 2 ? "full" : "bit-map based");
3079 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
3080 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
3081 BM_LOCKED_SET_ALLOWED))
3085 if (hg > 0) { /* become sync source. */
3087 } else if (hg < 0) { /* become sync target */
3091 if (drbd_bm_total_weight(mdev)) {
3092 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
3093 drbd_bm_total_weight(mdev));
3100 static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
3102 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
3103 if (peer == ASB_DISCARD_REMOTE)
3104 return ASB_DISCARD_LOCAL;
3106 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
3107 if (peer == ASB_DISCARD_LOCAL)
3108 return ASB_DISCARD_REMOTE;
3110 /* everything else is valid if they are equal on both sides. */
3114 static int receive_protocol(struct drbd_tconn *tconn, struct packet_info *pi)
3116 struct p_protocol *p = pi->data;
3117 enum drbd_after_sb_p p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
3118 int p_proto, p_discard_my_data, p_two_primaries, cf;
3119 struct net_conf *nc, *old_net_conf, *new_net_conf = NULL;
3120 char integrity_alg[SHARED_SECRET_MAX] = "";
3121 struct crypto_hash *peer_integrity_tfm = NULL;
3122 void *int_dig_in = NULL, *int_dig_vv = NULL;
3124 p_proto = be32_to_cpu(p->protocol);
3125 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
3126 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
3127 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
3128 p_two_primaries = be32_to_cpu(p->two_primaries);
3129 cf = be32_to_cpu(p->conn_flags);
3130 p_discard_my_data = cf & CF_DISCARD_MY_DATA;
3132 if (tconn->agreed_pro_version >= 87) {
3135 if (pi->size > sizeof(integrity_alg))
3137 err = drbd_recv_all(tconn, integrity_alg, pi->size);
3140 integrity_alg[SHARED_SECRET_MAX - 1] = 0;
3143 if (pi->cmd != P_PROTOCOL_UPDATE) {
3144 clear_bit(CONN_DRY_RUN, &tconn->flags);
3146 if (cf & CF_DRY_RUN)
3147 set_bit(CONN_DRY_RUN, &tconn->flags);
3150 nc = rcu_dereference(tconn->net_conf);
3152 if (p_proto != nc->wire_protocol) {
3153 conn_err(tconn, "incompatible %s settings\n", "protocol");
3154 goto disconnect_rcu_unlock;
3157 if (convert_after_sb(p_after_sb_0p) != nc->after_sb_0p) {
3158 conn_err(tconn, "incompatible %s settings\n", "after-sb-0pri");
3159 goto disconnect_rcu_unlock;
3162 if (convert_after_sb(p_after_sb_1p) != nc->after_sb_1p) {
3163 conn_err(tconn, "incompatible %s settings\n", "after-sb-1pri");
3164 goto disconnect_rcu_unlock;
3167 if (convert_after_sb(p_after_sb_2p) != nc->after_sb_2p) {
3168 conn_err(tconn, "incompatible %s settings\n", "after-sb-2pri");
3169 goto disconnect_rcu_unlock;
3172 if (p_discard_my_data && nc->discard_my_data) {
3173 conn_err(tconn, "incompatible %s settings\n", "discard-my-data");
3174 goto disconnect_rcu_unlock;
3177 if (p_two_primaries != nc->two_primaries) {
3178 conn_err(tconn, "incompatible %s settings\n", "allow-two-primaries");
3179 goto disconnect_rcu_unlock;
3182 if (strcmp(integrity_alg, nc->integrity_alg)) {
3183 conn_err(tconn, "incompatible %s settings\n", "data-integrity-alg");
3184 goto disconnect_rcu_unlock;
3190 if (integrity_alg[0]) {
3194 * We can only change the peer data integrity algorithm
3195 * here. Changing our own data integrity algorithm
3196 * requires that we send a P_PROTOCOL_UPDATE packet at
3197 * the same time; otherwise, the peer has no way to
3198 * tell between which packets the algorithm should
3202 peer_integrity_tfm = crypto_alloc_hash(integrity_alg, 0, CRYPTO_ALG_ASYNC);
3203 if (!peer_integrity_tfm) {
3204 conn_err(tconn, "peer data-integrity-alg %s not supported\n",
3209 hash_size = crypto_hash_digestsize(peer_integrity_tfm);
3210 int_dig_in = kmalloc(hash_size, GFP_KERNEL);
3211 int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
3212 if (!(int_dig_in && int_dig_vv)) {
3213 conn_err(tconn, "Allocation of buffers for data integrity checking failed\n");
3218 new_net_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
3219 if (!new_net_conf) {
3220 conn_err(tconn, "Allocation of new net_conf failed\n");
3224 mutex_lock(&tconn->data.mutex);
3225 mutex_lock(&tconn->conf_update);
3226 old_net_conf = tconn->net_conf;
3227 *new_net_conf = *old_net_conf;
3229 new_net_conf->wire_protocol = p_proto;
3230 new_net_conf->after_sb_0p = convert_after_sb(p_after_sb_0p);
3231 new_net_conf->after_sb_1p = convert_after_sb(p_after_sb_1p);
3232 new_net_conf->after_sb_2p = convert_after_sb(p_after_sb_2p);
3233 new_net_conf->two_primaries = p_two_primaries;
3235 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3236 mutex_unlock(&tconn->conf_update);
3237 mutex_unlock(&tconn->data.mutex);
3239 crypto_free_hash(tconn->peer_integrity_tfm);
3240 kfree(tconn->int_dig_in);
3241 kfree(tconn->int_dig_vv);
3242 tconn->peer_integrity_tfm = peer_integrity_tfm;
3243 tconn->int_dig_in = int_dig_in;
3244 tconn->int_dig_vv = int_dig_vv;
3246 if (strcmp(old_net_conf->integrity_alg, integrity_alg))
3247 conn_info(tconn, "peer data-integrity-alg: %s\n",
3248 integrity_alg[0] ? integrity_alg : "(none)");
3251 kfree(old_net_conf);
3254 disconnect_rcu_unlock:
3257 crypto_free_hash(peer_integrity_tfm);
3260 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3265 * input: alg name, feature name
3266 * return: NULL (alg name was "")
3267 * ERR_PTR(error) if something goes wrong
3268 * or the crypto hash ptr, if it worked out ok. */
3269 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
3270 const char *alg, const char *name)
3272 struct crypto_hash *tfm;
3277 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
3279 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
3280 alg, name, PTR_ERR(tfm));
3286 static int ignore_remaining_packet(struct drbd_tconn *tconn, struct packet_info *pi)
3288 void *buffer = tconn->data.rbuf;
3289 int size = pi->size;
3292 int s = min_t(int, size, DRBD_SOCKET_BUFFER_SIZE);
3293 s = drbd_recv(tconn, buffer, s);
3307 * config_unknown_volume - device configuration command for unknown volume
3309 * When a device is added to an existing connection, the node on which the
3310 * device is added first will send configuration commands to its peer but the
3311 * peer will not know about the device yet. It will warn and ignore these
3312 * commands. Once the device is added on the second node, the second node will
3313 * send the same device configuration commands, but in the other direction.
3315 * (We can also end up here if drbd is misconfigured.)
3317 static int config_unknown_volume(struct drbd_tconn *tconn, struct packet_info *pi)
3319 conn_warn(tconn, "%s packet received for volume %u, which is not configured locally\n",
3320 cmdname(pi->cmd), pi->vnr);
3321 return ignore_remaining_packet(tconn, pi);
3324 static int receive_SyncParam(struct drbd_tconn *tconn, struct packet_info *pi)
3326 struct drbd_conf *mdev;
3327 struct p_rs_param_95 *p;
3328 unsigned int header_size, data_size, exp_max_sz;
3329 struct crypto_hash *verify_tfm = NULL;
3330 struct crypto_hash *csums_tfm = NULL;
3331 struct net_conf *old_net_conf, *new_net_conf = NULL;
3332 struct disk_conf *old_disk_conf = NULL, *new_disk_conf = NULL;
3333 const int apv = tconn->agreed_pro_version;
3334 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
3338 mdev = vnr_to_mdev(tconn, pi->vnr);
3340 return config_unknown_volume(tconn, pi);
3342 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
3343 : apv == 88 ? sizeof(struct p_rs_param)
3345 : apv <= 94 ? sizeof(struct p_rs_param_89)
3346 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
3348 if (pi->size > exp_max_sz) {
3349 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
3350 pi->size, exp_max_sz);
3355 header_size = sizeof(struct p_rs_param);
3356 data_size = pi->size - header_size;
3357 } else if (apv <= 94) {
3358 header_size = sizeof(struct p_rs_param_89);
3359 data_size = pi->size - header_size;
3360 D_ASSERT(data_size == 0);
3362 header_size = sizeof(struct p_rs_param_95);
3363 data_size = pi->size - header_size;
3364 D_ASSERT(data_size == 0);
3367 /* initialize verify_alg and csums_alg */
3369 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
3371 err = drbd_recv_all(mdev->tconn, p, header_size);
3375 mutex_lock(&mdev->tconn->conf_update);
3376 old_net_conf = mdev->tconn->net_conf;
3377 if (get_ldev(mdev)) {
3378 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3379 if (!new_disk_conf) {
3381 mutex_unlock(&mdev->tconn->conf_update);
3382 dev_err(DEV, "Allocation of new disk_conf failed\n");
3386 old_disk_conf = mdev->ldev->disk_conf;
3387 *new_disk_conf = *old_disk_conf;
3389 new_disk_conf->resync_rate = be32_to_cpu(p->resync_rate);
3394 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
3395 dev_err(DEV, "verify-alg of wrong size, "
3396 "peer wants %u, accepting only up to %u byte\n",
3397 data_size, SHARED_SECRET_MAX);
3402 err = drbd_recv_all(mdev->tconn, p->verify_alg, data_size);
3405 /* we expect NUL terminated string */
3406 /* but just in case someone tries to be evil */
3407 D_ASSERT(p->verify_alg[data_size-1] == 0);
3408 p->verify_alg[data_size-1] = 0;
3410 } else /* apv >= 89 */ {
3411 /* we still expect NUL terminated strings */
3412 /* but just in case someone tries to be evil */
3413 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
3414 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
3415 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
3416 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
3419 if (strcmp(old_net_conf->verify_alg, p->verify_alg)) {
3420 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3421 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
3422 old_net_conf->verify_alg, p->verify_alg);
3425 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
3426 p->verify_alg, "verify-alg");
3427 if (IS_ERR(verify_tfm)) {
3433 if (apv >= 89 && strcmp(old_net_conf->csums_alg, p->csums_alg)) {
3434 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
3435 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
3436 old_net_conf->csums_alg, p->csums_alg);
3439 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
3440 p->csums_alg, "csums-alg");
3441 if (IS_ERR(csums_tfm)) {
3447 if (apv > 94 && new_disk_conf) {
3448 new_disk_conf->c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
3449 new_disk_conf->c_delay_target = be32_to_cpu(p->c_delay_target);
3450 new_disk_conf->c_fill_target = be32_to_cpu(p->c_fill_target);
3451 new_disk_conf->c_max_rate = be32_to_cpu(p->c_max_rate);
3453 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
3454 if (fifo_size != mdev->rs_plan_s->size) {
3455 new_plan = fifo_alloc(fifo_size);
3457 dev_err(DEV, "kmalloc of fifo_buffer failed");
3464 if (verify_tfm || csums_tfm) {
3465 new_net_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
3466 if (!new_net_conf) {
3467 dev_err(DEV, "Allocation of new net_conf failed\n");
3471 *new_net_conf = *old_net_conf;
3474 strcpy(new_net_conf->verify_alg, p->verify_alg);
3475 new_net_conf->verify_alg_len = strlen(p->verify_alg) + 1;
3476 crypto_free_hash(mdev->tconn->verify_tfm);
3477 mdev->tconn->verify_tfm = verify_tfm;
3478 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
3481 strcpy(new_net_conf->csums_alg, p->csums_alg);
3482 new_net_conf->csums_alg_len = strlen(p->csums_alg) + 1;
3483 crypto_free_hash(mdev->tconn->csums_tfm);
3484 mdev->tconn->csums_tfm = csums_tfm;
3485 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
3487 rcu_assign_pointer(tconn->net_conf, new_net_conf);
3491 if (new_disk_conf) {
3492 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3497 old_plan = mdev->rs_plan_s;
3498 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
3501 mutex_unlock(&mdev->tconn->conf_update);
3504 kfree(old_net_conf);
3505 kfree(old_disk_conf);
3511 if (new_disk_conf) {
3513 kfree(new_disk_conf);
3515 mutex_unlock(&mdev->tconn->conf_update);
3520 if (new_disk_conf) {
3522 kfree(new_disk_conf);
3524 mutex_unlock(&mdev->tconn->conf_update);
3525 /* just for completeness: actually not needed,
3526 * as this is not reached if csums_tfm was ok. */
3527 crypto_free_hash(csums_tfm);
3528 /* but free the verify_tfm again, if csums_tfm did not work out */
3529 crypto_free_hash(verify_tfm);
3530 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3534 /* warn if the arguments differ by more than 12.5% */
3535 static void warn_if_differ_considerably(struct drbd_conf *mdev,
3536 const char *s, sector_t a, sector_t b)
3539 if (a == 0 || b == 0)
3541 d = (a > b) ? (a - b) : (b - a);
3542 if (d > (a>>3) || d > (b>>3))
3543 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
3544 (unsigned long long)a, (unsigned long long)b);
3547 static int receive_sizes(struct drbd_tconn *tconn, struct packet_info *pi)
3549 struct drbd_conf *mdev;
3550 struct p_sizes *p = pi->data;
3551 enum determine_dev_size dd = unchanged;
3552 sector_t p_size, p_usize, my_usize;
3553 int ldsc = 0; /* local disk size changed */
3554 enum dds_flags ddsf;
3556 mdev = vnr_to_mdev(tconn, pi->vnr);
3558 return config_unknown_volume(tconn, pi);
3560 p_size = be64_to_cpu(p->d_size);
3561 p_usize = be64_to_cpu(p->u_size);
3563 /* just store the peer's disk size for now.
3564 * we still need to figure out whether we accept that. */
3565 mdev->p_size = p_size;
3567 if (get_ldev(mdev)) {
3569 my_usize = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
3572 warn_if_differ_considerably(mdev, "lower level device sizes",
3573 p_size, drbd_get_max_capacity(mdev->ldev));
3574 warn_if_differ_considerably(mdev, "user requested size",
3577 /* if this is the first connect, or an otherwise expected
3578 * param exchange, choose the minimum */
3579 if (mdev->state.conn == C_WF_REPORT_PARAMS)
3580 p_usize = min_not_zero(my_usize, p_usize);
3582 /* Never shrink a device with usable data during connect.
3583 But allow online shrinking if we are connected. */
3584 if (drbd_new_dev_size(mdev, mdev->ldev, p_usize, 0) <
3585 drbd_get_capacity(mdev->this_bdev) &&
3586 mdev->state.disk >= D_OUTDATED &&
3587 mdev->state.conn < C_CONNECTED) {
3588 dev_err(DEV, "The peer's disk size is too small!\n");
3589 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3594 if (my_usize != p_usize) {
3595 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
3597 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
3598 if (!new_disk_conf) {
3599 dev_err(DEV, "Allocation of new disk_conf failed\n");
3604 mutex_lock(&mdev->tconn->conf_update);
3605 old_disk_conf = mdev->ldev->disk_conf;
3606 *new_disk_conf = *old_disk_conf;
3607 new_disk_conf->disk_size = p_usize;
3609 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
3610 mutex_unlock(&mdev->tconn->conf_update);
3612 kfree(old_disk_conf);
3614 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3615 (unsigned long)my_usize);
3621 ddsf = be16_to_cpu(p->dds_flags);
3622 if (get_ldev(mdev)) {
3623 dd = drbd_determine_dev_size(mdev, ddsf);
3625 if (dd == dev_size_error)
3629 /* I am diskless, need to accept the peer's size. */
3630 drbd_set_my_capacity(mdev, p_size);
3633 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3634 drbd_reconsider_max_bio_size(mdev);
3636 if (get_ldev(mdev)) {
3637 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3638 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3645 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3646 if (be64_to_cpu(p->c_size) !=
3647 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3648 /* we have different sizes, probably peer
3649 * needs to know my new size... */
3650 drbd_send_sizes(mdev, 0, ddsf);
3652 if (test_and_clear_bit(RESIZE_PENDING, &mdev->flags) ||
3653 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3654 if (mdev->state.pdsk >= D_INCONSISTENT &&
3655 mdev->state.disk >= D_INCONSISTENT) {
3656 if (ddsf & DDSF_NO_RESYNC)
3657 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3659 resync_after_online_grow(mdev);
3661 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
3668 static int receive_uuids(struct drbd_tconn *tconn, struct packet_info *pi)
3670 struct drbd_conf *mdev;
3671 struct p_uuids *p = pi->data;
3673 int i, updated_uuids = 0;
3675 mdev = vnr_to_mdev(tconn, pi->vnr);
3677 return config_unknown_volume(tconn, pi);
3679 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3681 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3682 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3684 kfree(mdev->p_uuid);
3685 mdev->p_uuid = p_uuid;
3687 if (mdev->state.conn < C_CONNECTED &&
3688 mdev->state.disk < D_INCONSISTENT &&
3689 mdev->state.role == R_PRIMARY &&
3690 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3691 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3692 (unsigned long long)mdev->ed_uuid);
3693 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3697 if (get_ldev(mdev)) {
3698 int skip_initial_sync =
3699 mdev->state.conn == C_CONNECTED &&
3700 mdev->tconn->agreed_pro_version >= 90 &&
3701 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3702 (p_uuid[UI_FLAGS] & 8);
3703 if (skip_initial_sync) {
3704 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3705 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3706 "clear_n_write from receive_uuids",
3707 BM_LOCKED_TEST_ALLOWED);
3708 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3709 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3710 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3716 } else if (mdev->state.disk < D_INCONSISTENT &&
3717 mdev->state.role == R_PRIMARY) {
3718 /* I am a diskless primary, the peer just created a new current UUID
3720 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3723 /* Before we test for the disk state, we should wait until an eventually
3724 ongoing cluster wide state change is finished. That is important if
3725 we are primary and are detaching from our disk. We need to see the
3726 new disk state... */
3727 mutex_lock(mdev->state_mutex);
3728 mutex_unlock(mdev->state_mutex);
3729 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3730 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3733 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3739 * convert_state() - Converts the peer's view of the cluster state to our point of view
3740 * @ps: The state as seen by the peer.
3742 static union drbd_state convert_state(union drbd_state ps)
3744 union drbd_state ms;
3746 static enum drbd_conns c_tab[] = {
3747 [C_WF_REPORT_PARAMS] = C_WF_REPORT_PARAMS,
3748 [C_CONNECTED] = C_CONNECTED,
3750 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3751 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3752 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3753 [C_VERIFY_S] = C_VERIFY_T,
3759 ms.conn = c_tab[ps.conn];
3764 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3769 static int receive_req_state(struct drbd_tconn *tconn, struct packet_info *pi)
3771 struct drbd_conf *mdev;
3772 struct p_req_state *p = pi->data;
3773 union drbd_state mask, val;
3774 enum drbd_state_rv rv;
3776 mdev = vnr_to_mdev(tconn, pi->vnr);
3780 mask.i = be32_to_cpu(p->mask);
3781 val.i = be32_to_cpu(p->val);
3783 if (test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags) &&
3784 mutex_is_locked(mdev->state_mutex)) {
3785 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3789 mask = convert_state(mask);
3790 val = convert_state(val);
3792 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3793 drbd_send_sr_reply(mdev, rv);
3800 static int receive_req_conn_state(struct drbd_tconn *tconn, struct packet_info *pi)
3802 struct p_req_state *p = pi->data;
3803 union drbd_state mask, val;
3804 enum drbd_state_rv rv;
3806 mask.i = be32_to_cpu(p->mask);
3807 val.i = be32_to_cpu(p->val);
3809 if (test_bit(RESOLVE_CONFLICTS, &tconn->flags) &&
3810 mutex_is_locked(&tconn->cstate_mutex)) {
3811 conn_send_sr_reply(tconn, SS_CONCURRENT_ST_CHG);
3815 mask = convert_state(mask);
3816 val = convert_state(val);
3818 rv = conn_request_state(tconn, mask, val, CS_VERBOSE | CS_LOCAL_ONLY | CS_IGN_OUTD_FAIL);
3819 conn_send_sr_reply(tconn, rv);
3824 static int receive_state(struct drbd_tconn *tconn, struct packet_info *pi)
3826 struct drbd_conf *mdev;
3827 struct p_state *p = pi->data;
3828 union drbd_state os, ns, peer_state;
3829 enum drbd_disk_state real_peer_disk;
3830 enum chg_state_flags cs_flags;
3833 mdev = vnr_to_mdev(tconn, pi->vnr);
3835 return config_unknown_volume(tconn, pi);
3837 peer_state.i = be32_to_cpu(p->state);
3839 real_peer_disk = peer_state.disk;
3840 if (peer_state.disk == D_NEGOTIATING) {
3841 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3842 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3845 spin_lock_irq(&mdev->tconn->req_lock);
3847 os = ns = drbd_read_state(mdev);
3848 spin_unlock_irq(&mdev->tconn->req_lock);
3850 /* If some other part of the code (asender thread, timeout)
3851 * already decided to close the connection again,
3852 * we must not "re-establish" it here. */
3853 if (os.conn <= C_TEAR_DOWN)
3856 /* If this is the "end of sync" confirmation, usually the peer disk
3857 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3858 * set) resync started in PausedSyncT, or if the timing of pause-/
3859 * unpause-sync events has been "just right", the peer disk may
3860 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3862 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3863 real_peer_disk == D_UP_TO_DATE &&
3864 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3865 /* If we are (becoming) SyncSource, but peer is still in sync
3866 * preparation, ignore its uptodate-ness to avoid flapping, it
3867 * will change to inconsistent once the peer reaches active
3869 * It may have changed syncer-paused flags, however, so we
3870 * cannot ignore this completely. */
3871 if (peer_state.conn > C_CONNECTED &&
3872 peer_state.conn < C_SYNC_SOURCE)
3873 real_peer_disk = D_INCONSISTENT;
3875 /* if peer_state changes to connected at the same time,
3876 * it explicitly notifies us that it finished resync.
3877 * Maybe we should finish it up, too? */
3878 else if (os.conn >= C_SYNC_SOURCE &&
3879 peer_state.conn == C_CONNECTED) {
3880 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3881 drbd_resync_finished(mdev);
3886 /* explicit verify finished notification, stop sector reached. */
3887 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3888 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3889 ov_out_of_sync_print(mdev);
3890 drbd_resync_finished(mdev);
3894 /* peer says his disk is inconsistent, while we think it is uptodate,
3895 * and this happens while the peer still thinks we have a sync going on,
3896 * but we think we are already done with the sync.
3897 * We ignore this to avoid flapping pdsk.
3898 * This should not happen, if the peer is a recent version of drbd. */
3899 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3900 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3901 real_peer_disk = D_UP_TO_DATE;
3903 if (ns.conn == C_WF_REPORT_PARAMS)
3904 ns.conn = C_CONNECTED;
3906 if (peer_state.conn == C_AHEAD)
3909 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3910 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3911 int cr; /* consider resync */
3913 /* if we established a new connection */
3914 cr = (os.conn < C_CONNECTED);
3915 /* if we had an established connection
3916 * and one of the nodes newly attaches a disk */
3917 cr |= (os.conn == C_CONNECTED &&
3918 (peer_state.disk == D_NEGOTIATING ||
3919 os.disk == D_NEGOTIATING));
3920 /* if we have both been inconsistent, and the peer has been
3921 * forced to be UpToDate with --overwrite-data */
3922 cr |= test_bit(CONSIDER_RESYNC, &mdev->flags);
3923 /* if we had been plain connected, and the admin requested to
3924 * start a sync by "invalidate" or "invalidate-remote" */
3925 cr |= (os.conn == C_CONNECTED &&
3926 (peer_state.conn >= C_STARTING_SYNC_S &&
3927 peer_state.conn <= C_WF_BITMAP_T));
3930 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3933 if (ns.conn == C_MASK) {
3934 ns.conn = C_CONNECTED;
3935 if (mdev->state.disk == D_NEGOTIATING) {
3936 drbd_force_state(mdev, NS(disk, D_FAILED));
3937 } else if (peer_state.disk == D_NEGOTIATING) {
3938 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3939 peer_state.disk = D_DISKLESS;
3940 real_peer_disk = D_DISKLESS;
3942 if (test_and_clear_bit(CONN_DRY_RUN, &mdev->tconn->flags))
3944 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3945 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3951 spin_lock_irq(&mdev->tconn->req_lock);
3952 if (os.i != drbd_read_state(mdev).i)
3954 clear_bit(CONSIDER_RESYNC, &mdev->flags);
3955 ns.peer = peer_state.role;
3956 ns.pdsk = real_peer_disk;
3957 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3958 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3959 ns.disk = mdev->new_state_tmp.disk;
3960 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3961 if (ns.pdsk == D_CONSISTENT && drbd_suspended(mdev) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3962 test_bit(NEW_CUR_UUID, &mdev->flags)) {
3963 /* Do not allow tl_restart(RESEND) for a rebooted peer. We can only allow this
3964 for temporal network outages! */
3965 spin_unlock_irq(&mdev->tconn->req_lock);
3966 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3967 tl_clear(mdev->tconn);
3968 drbd_uuid_new_current(mdev);
3969 clear_bit(NEW_CUR_UUID, &mdev->flags);
3970 conn_request_state(mdev->tconn, NS2(conn, C_PROTOCOL_ERROR, susp, 0), CS_HARD);
3973 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3974 ns = drbd_read_state(mdev);
3975 spin_unlock_irq(&mdev->tconn->req_lock);
3977 if (rv < SS_SUCCESS) {
3978 conn_request_state(mdev->tconn, NS(conn, C_DISCONNECTING), CS_HARD);
3982 if (os.conn > C_WF_REPORT_PARAMS) {
3983 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3984 peer_state.disk != D_NEGOTIATING ) {
3985 /* we want resync, peer has not yet decided to sync... */
3986 /* Nowadays only used when forcing a node into primary role and
3987 setting its disk to UpToDate with that */
3988 drbd_send_uuids(mdev);
3989 drbd_send_current_state(mdev);
3993 clear_bit(DISCARD_MY_DATA, &mdev->flags);
3995 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
4000 static int receive_sync_uuid(struct drbd_tconn *tconn, struct packet_info *pi)
4002 struct drbd_conf *mdev;
4003 struct p_rs_uuid *p = pi->data;
4005 mdev = vnr_to_mdev(tconn, pi->vnr);
4009 wait_event(mdev->misc_wait,
4010 mdev->state.conn == C_WF_SYNC_UUID ||
4011 mdev->state.conn == C_BEHIND ||
4012 mdev->state.conn < C_CONNECTED ||
4013 mdev->state.disk < D_NEGOTIATING);
4015 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
4017 /* Here the _drbd_uuid_ functions are right, current should
4018 _not_ be rotated into the history */
4019 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
4020 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
4021 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
4023 drbd_print_uuids(mdev, "updated sync uuid");
4024 drbd_start_resync(mdev, C_SYNC_TARGET);
4028 dev_err(DEV, "Ignoring SyncUUID packet!\n");
4034 * receive_bitmap_plain
4036 * Return 0 when done, 1 when another iteration is needed, and a negative error
4037 * code upon failure.
4040 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int size,
4041 unsigned long *p, struct bm_xfer_ctx *c)
4043 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE -
4044 drbd_header_size(mdev->tconn);
4045 unsigned int num_words = min_t(size_t, data_size / sizeof(*p),
4046 c->bm_words - c->word_offset);
4047 unsigned int want = num_words * sizeof(*p);
4051 dev_err(DEV, "%s:want (%u) != size (%u)\n", __func__, want, size);
4056 err = drbd_recv_all(mdev->tconn, p, want);
4060 drbd_bm_merge_lel(mdev, c->word_offset, num_words, p);
4062 c->word_offset += num_words;
4063 c->bit_offset = c->word_offset * BITS_PER_LONG;
4064 if (c->bit_offset > c->bm_bits)
4065 c->bit_offset = c->bm_bits;
4070 static enum drbd_bitmap_code dcbp_get_code(struct p_compressed_bm *p)
4072 return (enum drbd_bitmap_code)(p->encoding & 0x0f);
4075 static int dcbp_get_start(struct p_compressed_bm *p)
4077 return (p->encoding & 0x80) != 0;
4080 static int dcbp_get_pad_bits(struct p_compressed_bm *p)
4082 return (p->encoding >> 4) & 0x7;
4088 * Return 0 when done, 1 when another iteration is needed, and a negative error
4089 * code upon failure.
4092 recv_bm_rle_bits(struct drbd_conf *mdev,
4093 struct p_compressed_bm *p,
4094 struct bm_xfer_ctx *c,
4097 struct bitstream bs;
4101 unsigned long s = c->bit_offset;
4103 int toggle = dcbp_get_start(p);
4107 bitstream_init(&bs, p->code, len, dcbp_get_pad_bits(p));
4109 bits = bitstream_get_bits(&bs, &look_ahead, 64);
4113 for (have = bits; have > 0; s += rl, toggle = !toggle) {
4114 bits = vli_decode_bits(&rl, look_ahead);
4120 if (e >= c->bm_bits) {
4121 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
4124 _drbd_bm_set_bits(mdev, s, e);
4128 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
4129 have, bits, look_ahead,
4130 (unsigned int)(bs.cur.b - p->code),
4131 (unsigned int)bs.buf_len);
4134 look_ahead >>= bits;
4137 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
4140 look_ahead |= tmp << have;
4145 bm_xfer_ctx_bit_to_word_offset(c);
4147 return (s != c->bm_bits);
4153 * Return 0 when done, 1 when another iteration is needed, and a negative error
4154 * code upon failure.
4157 decode_bitmap_c(struct drbd_conf *mdev,
4158 struct p_compressed_bm *p,
4159 struct bm_xfer_ctx *c,
4162 if (dcbp_get_code(p) == RLE_VLI_Bits)
4163 return recv_bm_rle_bits(mdev, p, c, len - sizeof(*p));
4165 /* other variants had been implemented for evaluation,
4166 * but have been dropped as this one turned out to be "best"
4167 * during all our tests. */
4169 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
4170 conn_request_state(mdev->tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4174 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
4175 const char *direction, struct bm_xfer_ctx *c)
4177 /* what would it take to transfer it "plaintext" */
4178 unsigned int header_size = drbd_header_size(mdev->tconn);
4179 unsigned int data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
4180 unsigned int plain =
4181 header_size * (DIV_ROUND_UP(c->bm_words, data_size) + 1) +
4182 c->bm_words * sizeof(unsigned long);
4183 unsigned int total = c->bytes[0] + c->bytes[1];
4186 /* total can not be zero. but just in case: */
4190 /* don't report if not compressed */
4194 /* total < plain. check for overflow, still */
4195 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
4196 : (1000 * total / plain);
4202 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
4203 "total %u; compression: %u.%u%%\n",
4205 c->bytes[1], c->packets[1],
4206 c->bytes[0], c->packets[0],
4207 total, r/10, r % 10);
4210 /* Since we are processing the bitfield from lower addresses to higher,
4211 it does not matter if the process it in 32 bit chunks or 64 bit
4212 chunks as long as it is little endian. (Understand it as byte stream,
4213 beginning with the lowest byte...) If we would use big endian
4214 we would need to process it from the highest address to the lowest,
4215 in order to be agnostic to the 32 vs 64 bits issue.
4217 returns 0 on failure, 1 if we successfully received it. */
4218 static int receive_bitmap(struct drbd_tconn *tconn, struct packet_info *pi)
4220 struct drbd_conf *mdev;
4221 struct bm_xfer_ctx c;
4224 mdev = vnr_to_mdev(tconn, pi->vnr);
4228 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
4229 /* you are supposed to send additional out-of-sync information
4230 * if you actually set bits during this phase */
4232 c = (struct bm_xfer_ctx) {
4233 .bm_bits = drbd_bm_bits(mdev),
4234 .bm_words = drbd_bm_words(mdev),
4238 if (pi->cmd == P_BITMAP)
4239 err = receive_bitmap_plain(mdev, pi->size, pi->data, &c);
4240 else if (pi->cmd == P_COMPRESSED_BITMAP) {
4241 /* MAYBE: sanity check that we speak proto >= 90,
4242 * and the feature is enabled! */
4243 struct p_compressed_bm *p = pi->data;
4245 if (pi->size > DRBD_SOCKET_BUFFER_SIZE - drbd_header_size(tconn)) {
4246 dev_err(DEV, "ReportCBitmap packet too large\n");
4250 if (pi->size <= sizeof(*p)) {
4251 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", pi->size);
4255 err = drbd_recv_all(mdev->tconn, p, pi->size);
4258 err = decode_bitmap_c(mdev, p, &c, pi->size);
4260 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", pi->cmd);
4265 c.packets[pi->cmd == P_BITMAP]++;
4266 c.bytes[pi->cmd == P_BITMAP] += drbd_header_size(tconn) + pi->size;
4273 err = drbd_recv_header(mdev->tconn, pi);
4278 INFO_bm_xfer_stats(mdev, "receive", &c);
4280 if (mdev->state.conn == C_WF_BITMAP_T) {
4281 enum drbd_state_rv rv;
4283 err = drbd_send_bitmap(mdev);
4286 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
4287 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
4288 D_ASSERT(rv == SS_SUCCESS);
4289 } else if (mdev->state.conn != C_WF_BITMAP_S) {
4290 /* admin may have requested C_DISCONNECTING,
4291 * other threads may have noticed network errors */
4292 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
4293 drbd_conn_str(mdev->state.conn));
4298 drbd_bm_unlock(mdev);
4299 if (!err && mdev->state.conn == C_WF_BITMAP_S)
4300 drbd_start_resync(mdev, C_SYNC_SOURCE);
4304 static int receive_skip(struct drbd_tconn *tconn, struct packet_info *pi)
4306 conn_warn(tconn, "skipping unknown optional packet type %d, l: %d!\n",
4309 return ignore_remaining_packet(tconn, pi);
4312 static int receive_UnplugRemote(struct drbd_tconn *tconn, struct packet_info *pi)
4314 /* Make sure we've acked all the TCP data associated
4315 * with the data requests being unplugged */
4316 drbd_tcp_quickack(tconn->data.socket);
4321 static int receive_out_of_sync(struct drbd_tconn *tconn, struct packet_info *pi)
4323 struct drbd_conf *mdev;
4324 struct p_block_desc *p = pi->data;
4326 mdev = vnr_to_mdev(tconn, pi->vnr);
4330 switch (mdev->state.conn) {
4331 case C_WF_SYNC_UUID:
4336 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
4337 drbd_conn_str(mdev->state.conn));
4340 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
4348 int (*fn)(struct drbd_tconn *, struct packet_info *);
4351 static struct data_cmd drbd_cmd_handler[] = {
4352 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
4353 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
4354 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
4355 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
4356 [P_BITMAP] = { 1, 0, receive_bitmap } ,
4357 [P_COMPRESSED_BITMAP] = { 1, 0, receive_bitmap } ,
4358 [P_UNPLUG_REMOTE] = { 0, 0, receive_UnplugRemote },
4359 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4360 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4361 [P_SYNC_PARAM] = { 1, 0, receive_SyncParam },
4362 [P_SYNC_PARAM89] = { 1, 0, receive_SyncParam },
4363 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
4364 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
4365 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
4366 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
4367 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
4368 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
4369 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
4370 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4371 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
4372 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
4373 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
4374 [P_CONN_ST_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_conn_state },
4375 [P_PROTOCOL_UPDATE] = { 1, sizeof(struct p_protocol), receive_protocol },
4378 static void drbdd(struct drbd_tconn *tconn)
4380 struct packet_info pi;
4381 size_t shs; /* sub header size */
4384 while (get_t_state(&tconn->receiver) == RUNNING) {
4385 struct data_cmd *cmd;
4387 drbd_thread_current_set_cpu(&tconn->receiver);
4388 if (drbd_recv_header(tconn, &pi))
4391 cmd = &drbd_cmd_handler[pi.cmd];
4392 if (unlikely(pi.cmd >= ARRAY_SIZE(drbd_cmd_handler) || !cmd->fn)) {
4393 conn_err(tconn, "Unexpected data packet %s (0x%04x)",
4394 cmdname(pi.cmd), pi.cmd);
4398 shs = cmd->pkt_size;
4399 if (pi.size > shs && !cmd->expect_payload) {
4400 conn_err(tconn, "No payload expected %s l:%d\n",
4401 cmdname(pi.cmd), pi.size);
4406 err = drbd_recv_all_warn(tconn, pi.data, shs);
4412 err = cmd->fn(tconn, &pi);
4414 conn_err(tconn, "error receiving %s, e: %d l: %d!\n",
4415 cmdname(pi.cmd), err, pi.size);
4422 conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
4425 void conn_flush_workqueue(struct drbd_tconn *tconn)
4427 struct drbd_wq_barrier barr;
4429 barr.w.cb = w_prev_work_done;
4430 barr.w.tconn = tconn;
4431 init_completion(&barr.done);
4432 drbd_queue_work(&tconn->sender_work, &barr.w);
4433 wait_for_completion(&barr.done);
4436 static void conn_disconnect(struct drbd_tconn *tconn)
4438 struct drbd_conf *mdev;
4442 if (tconn->cstate == C_STANDALONE)
4445 /* We are about to start the cleanup after connection loss.
4446 * Make sure drbd_make_request knows about that.
4447 * Usually we should be in some network failure state already,
4448 * but just in case we are not, we fix it up here.
4450 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
4452 /* asender does not clean up anything. it must not interfere, either */
4453 drbd_thread_stop(&tconn->asender);
4454 drbd_free_sock(tconn);
4457 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
4458 kref_get(&mdev->kref);
4460 drbd_disconnected(mdev);
4461 kref_put(&mdev->kref, &drbd_minor_destroy);
4466 if (!list_empty(&tconn->current_epoch->list))
4467 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
4468 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
4469 atomic_set(&tconn->current_epoch->epoch_size, 0);
4470 tconn->send.seen_any_write_yet = false;
4472 conn_info(tconn, "Connection closed\n");
4474 if (conn_highest_role(tconn) == R_PRIMARY && conn_highest_pdsk(tconn) >= D_UNKNOWN)
4475 conn_try_outdate_peer_async(tconn);
4477 spin_lock_irq(&tconn->req_lock);
4479 if (oc >= C_UNCONNECTED)
4480 _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
4482 spin_unlock_irq(&tconn->req_lock);
4484 if (oc == C_DISCONNECTING)
4485 conn_request_state(tconn, NS(conn, C_STANDALONE), CS_VERBOSE | CS_HARD);
4488 static int drbd_disconnected(struct drbd_conf *mdev)
4492 /* wait for current activity to cease. */
4493 spin_lock_irq(&mdev->tconn->req_lock);
4494 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
4495 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
4496 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
4497 spin_unlock_irq(&mdev->tconn->req_lock);
4499 /* We do not have data structures that would allow us to
4500 * get the rs_pending_cnt down to 0 again.
4501 * * On C_SYNC_TARGET we do not have any data structures describing
4502 * the pending RSDataRequest's we have sent.
4503 * * On C_SYNC_SOURCE there is no data structure that tracks
4504 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
4505 * And no, it is not the sum of the reference counts in the
4506 * resync_LRU. The resync_LRU tracks the whole operation including
4507 * the disk-IO, while the rs_pending_cnt only tracks the blocks
4509 drbd_rs_cancel_all(mdev);
4511 mdev->rs_failed = 0;
4512 atomic_set(&mdev->rs_pending_cnt, 0);
4513 wake_up(&mdev->misc_wait);
4515 del_timer_sync(&mdev->resync_timer);
4516 resync_timer_fn((unsigned long)mdev);
4518 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
4519 * w_make_resync_request etc. which may still be on the worker queue
4520 * to be "canceled" */
4521 drbd_flush_workqueue(mdev);
4523 drbd_finish_peer_reqs(mdev);
4525 /* This second workqueue flush is necessary, since drbd_finish_peer_reqs()
4526 might have issued a work again. The one before drbd_finish_peer_reqs() is
4527 necessary to reclain net_ee in drbd_finish_peer_reqs(). */
4528 drbd_flush_workqueue(mdev);
4530 kfree(mdev->p_uuid);
4531 mdev->p_uuid = NULL;
4533 if (!drbd_suspended(mdev))
4534 tl_clear(mdev->tconn);
4538 /* serialize with bitmap writeout triggered by the state change,
4540 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
4542 /* tcp_close and release of sendpage pages can be deferred. I don't
4543 * want to use SO_LINGER, because apparently it can be deferred for
4544 * more than 20 seconds (longest time I checked).
4546 * Actually we don't care for exactly when the network stack does its
4547 * put_page(), but release our reference on these pages right here.
4549 i = drbd_free_peer_reqs(mdev, &mdev->net_ee);
4551 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
4552 i = atomic_read(&mdev->pp_in_use_by_net);
4554 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
4555 i = atomic_read(&mdev->pp_in_use);
4557 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
4559 D_ASSERT(list_empty(&mdev->read_ee));
4560 D_ASSERT(list_empty(&mdev->active_ee));
4561 D_ASSERT(list_empty(&mdev->sync_ee));
4562 D_ASSERT(list_empty(&mdev->done_ee));
4568 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
4569 * we can agree on is stored in agreed_pro_version.
4571 * feature flags and the reserved array should be enough room for future
4572 * enhancements of the handshake protocol, and possible plugins...
4574 * for now, they are expected to be zero, but ignored.
4576 static int drbd_send_features(struct drbd_tconn *tconn)
4578 struct drbd_socket *sock;
4579 struct p_connection_features *p;
4581 sock = &tconn->data;
4582 p = conn_prepare_command(tconn, sock);
4585 memset(p, 0, sizeof(*p));
4586 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
4587 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
4588 return conn_send_command(tconn, sock, P_CONNECTION_FEATURES, sizeof(*p), NULL, 0);
4593 * 1 yes, we have a valid connection
4594 * 0 oops, did not work out, please try again
4595 * -1 peer talks different language,
4596 * no point in trying again, please go standalone.
4598 static int drbd_do_features(struct drbd_tconn *tconn)
4600 /* ASSERT current == tconn->receiver ... */
4601 struct p_connection_features *p;
4602 const int expect = sizeof(struct p_connection_features);
4603 struct packet_info pi;
4606 err = drbd_send_features(tconn);
4610 err = drbd_recv_header(tconn, &pi);
4614 if (pi.cmd != P_CONNECTION_FEATURES) {
4615 conn_err(tconn, "expected ConnectionFeatures packet, received: %s (0x%04x)\n",
4616 cmdname(pi.cmd), pi.cmd);
4620 if (pi.size != expect) {
4621 conn_err(tconn, "expected ConnectionFeatures length: %u, received: %u\n",
4627 err = drbd_recv_all_warn(tconn, p, expect);
4631 p->protocol_min = be32_to_cpu(p->protocol_min);
4632 p->protocol_max = be32_to_cpu(p->protocol_max);
4633 if (p->protocol_max == 0)
4634 p->protocol_max = p->protocol_min;
4636 if (PRO_VERSION_MAX < p->protocol_min ||
4637 PRO_VERSION_MIN > p->protocol_max)
4640 tconn->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4642 conn_info(tconn, "Handshake successful: "
4643 "Agreed network protocol version %d\n", tconn->agreed_pro_version);
4648 conn_err(tconn, "incompatible DRBD dialects: "
4649 "I support %d-%d, peer supports %d-%d\n",
4650 PRO_VERSION_MIN, PRO_VERSION_MAX,
4651 p->protocol_min, p->protocol_max);
4655 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4656 static int drbd_do_auth(struct drbd_tconn *tconn)
4658 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4659 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4663 #define CHALLENGE_LEN 64
4667 0 - failed, try again (network error),
4668 -1 - auth failed, don't try again.
4671 static int drbd_do_auth(struct drbd_tconn *tconn)
4673 struct drbd_socket *sock;
4674 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4675 struct scatterlist sg;
4676 char *response = NULL;
4677 char *right_response = NULL;
4678 char *peers_ch = NULL;
4679 unsigned int key_len;
4680 char secret[SHARED_SECRET_MAX]; /* 64 byte */
4681 unsigned int resp_size;
4682 struct hash_desc desc;
4683 struct packet_info pi;
4684 struct net_conf *nc;
4687 /* FIXME: Put the challenge/response into the preallocated socket buffer. */
4690 nc = rcu_dereference(tconn->net_conf);
4691 key_len = strlen(nc->shared_secret);
4692 memcpy(secret, nc->shared_secret, key_len);
4695 desc.tfm = tconn->cram_hmac_tfm;
4698 rv = crypto_hash_setkey(tconn->cram_hmac_tfm, (u8 *)secret, key_len);
4700 conn_err(tconn, "crypto_hash_setkey() failed with %d\n", rv);
4705 get_random_bytes(my_challenge, CHALLENGE_LEN);
4707 sock = &tconn->data;
4708 if (!conn_prepare_command(tconn, sock)) {
4712 rv = !conn_send_command(tconn, sock, P_AUTH_CHALLENGE, 0,
4713 my_challenge, CHALLENGE_LEN);
4717 err = drbd_recv_header(tconn, &pi);
4723 if (pi.cmd != P_AUTH_CHALLENGE) {
4724 conn_err(tconn, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4725 cmdname(pi.cmd), pi.cmd);
4730 if (pi.size > CHALLENGE_LEN * 2) {
4731 conn_err(tconn, "expected AuthChallenge payload too big.\n");
4736 peers_ch = kmalloc(pi.size, GFP_NOIO);
4737 if (peers_ch == NULL) {
4738 conn_err(tconn, "kmalloc of peers_ch failed\n");
4743 err = drbd_recv_all_warn(tconn, peers_ch, pi.size);
4749 resp_size = crypto_hash_digestsize(tconn->cram_hmac_tfm);
4750 response = kmalloc(resp_size, GFP_NOIO);
4751 if (response == NULL) {
4752 conn_err(tconn, "kmalloc of response failed\n");
4757 sg_init_table(&sg, 1);
4758 sg_set_buf(&sg, peers_ch, pi.size);
4760 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4762 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4767 if (!conn_prepare_command(tconn, sock)) {
4771 rv = !conn_send_command(tconn, sock, P_AUTH_RESPONSE, 0,
4772 response, resp_size);
4776 err = drbd_recv_header(tconn, &pi);
4782 if (pi.cmd != P_AUTH_RESPONSE) {
4783 conn_err(tconn, "expected AuthResponse packet, received: %s (0x%04x)\n",
4784 cmdname(pi.cmd), pi.cmd);
4789 if (pi.size != resp_size) {
4790 conn_err(tconn, "expected AuthResponse payload of wrong size\n");
4795 err = drbd_recv_all_warn(tconn, response , resp_size);
4801 right_response = kmalloc(resp_size, GFP_NOIO);
4802 if (right_response == NULL) {
4803 conn_err(tconn, "kmalloc of right_response failed\n");
4808 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4810 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4812 conn_err(tconn, "crypto_hash_digest() failed with %d\n", rv);
4817 rv = !memcmp(response, right_response, resp_size);
4820 conn_info(tconn, "Peer authenticated using %d bytes HMAC\n",
4828 kfree(right_response);
4834 int drbdd_init(struct drbd_thread *thi)
4836 struct drbd_tconn *tconn = thi->tconn;
4839 conn_info(tconn, "receiver (re)started\n");
4842 h = conn_connect(tconn);
4844 conn_disconnect(tconn);
4845 schedule_timeout_interruptible(HZ);
4848 conn_warn(tconn, "Discarding network configuration.\n");
4849 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
4856 conn_disconnect(tconn);
4858 conn_info(tconn, "receiver terminated\n");
4862 /* ********* acknowledge sender ******** */
4864 static int got_conn_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4866 struct p_req_state_reply *p = pi->data;
4867 int retcode = be32_to_cpu(p->retcode);
4869 if (retcode >= SS_SUCCESS) {
4870 set_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags);
4872 set_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags);
4873 conn_err(tconn, "Requested state change failed by peer: %s (%d)\n",
4874 drbd_set_st_err_str(retcode), retcode);
4876 wake_up(&tconn->ping_wait);
4881 static int got_RqSReply(struct drbd_tconn *tconn, struct packet_info *pi)
4883 struct drbd_conf *mdev;
4884 struct p_req_state_reply *p = pi->data;
4885 int retcode = be32_to_cpu(p->retcode);
4887 mdev = vnr_to_mdev(tconn, pi->vnr);
4891 if (test_bit(CONN_WD_ST_CHG_REQ, &tconn->flags)) {
4892 D_ASSERT(tconn->agreed_pro_version < 100);
4893 return got_conn_RqSReply(tconn, pi);
4896 if (retcode >= SS_SUCCESS) {
4897 set_bit(CL_ST_CHG_SUCCESS, &mdev->flags);
4899 set_bit(CL_ST_CHG_FAIL, &mdev->flags);
4900 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4901 drbd_set_st_err_str(retcode), retcode);
4903 wake_up(&mdev->state_wait);
4908 static int got_Ping(struct drbd_tconn *tconn, struct packet_info *pi)
4910 return drbd_send_ping_ack(tconn);
4914 static int got_PingAck(struct drbd_tconn *tconn, struct packet_info *pi)
4916 /* restore idle timeout */
4917 tconn->meta.socket->sk->sk_rcvtimeo = tconn->net_conf->ping_int*HZ;
4918 if (!test_and_set_bit(GOT_PING_ACK, &tconn->flags))
4919 wake_up(&tconn->ping_wait);
4924 static int got_IsInSync(struct drbd_tconn *tconn, struct packet_info *pi)
4926 struct drbd_conf *mdev;
4927 struct p_block_ack *p = pi->data;
4928 sector_t sector = be64_to_cpu(p->sector);
4929 int blksize = be32_to_cpu(p->blksize);
4931 mdev = vnr_to_mdev(tconn, pi->vnr);
4935 D_ASSERT(mdev->tconn->agreed_pro_version >= 89);
4937 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4939 if (get_ldev(mdev)) {
4940 drbd_rs_complete_io(mdev, sector);
4941 drbd_set_in_sync(mdev, sector, blksize);
4942 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4943 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4946 dec_rs_pending(mdev);
4947 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4953 validate_req_change_req_state(struct drbd_conf *mdev, u64 id, sector_t sector,
4954 struct rb_root *root, const char *func,
4955 enum drbd_req_event what, bool missing_ok)
4957 struct drbd_request *req;
4958 struct bio_and_error m;
4960 spin_lock_irq(&mdev->tconn->req_lock);
4961 req = find_request(mdev, root, id, sector, missing_ok, func);
4962 if (unlikely(!req)) {
4963 spin_unlock_irq(&mdev->tconn->req_lock);
4966 __req_mod(req, what, &m);
4967 spin_unlock_irq(&mdev->tconn->req_lock);
4970 complete_master_bio(mdev, &m);
4974 static int got_BlockAck(struct drbd_tconn *tconn, struct packet_info *pi)
4976 struct drbd_conf *mdev;
4977 struct p_block_ack *p = pi->data;
4978 sector_t sector = be64_to_cpu(p->sector);
4979 int blksize = be32_to_cpu(p->blksize);
4980 enum drbd_req_event what;
4982 mdev = vnr_to_mdev(tconn, pi->vnr);
4986 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4988 if (p->block_id == ID_SYNCER) {
4989 drbd_set_in_sync(mdev, sector, blksize);
4990 dec_rs_pending(mdev);
4994 case P_RS_WRITE_ACK:
4995 what = WRITE_ACKED_BY_PEER_AND_SIS;
4998 what = WRITE_ACKED_BY_PEER;
5001 what = RECV_ACKED_BY_PEER;
5004 what = CONFLICT_RESOLVED;
5007 what = POSTPONE_WRITE;
5013 return validate_req_change_req_state(mdev, p->block_id, sector,
5014 &mdev->write_requests, __func__,
5018 static int got_NegAck(struct drbd_tconn *tconn, struct packet_info *pi)
5020 struct drbd_conf *mdev;
5021 struct p_block_ack *p = pi->data;
5022 sector_t sector = be64_to_cpu(p->sector);
5023 int size = be32_to_cpu(p->blksize);
5026 mdev = vnr_to_mdev(tconn, pi->vnr);
5030 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5032 if (p->block_id == ID_SYNCER) {
5033 dec_rs_pending(mdev);
5034 drbd_rs_failed_io(mdev, sector, size);
5038 err = validate_req_change_req_state(mdev, p->block_id, sector,
5039 &mdev->write_requests, __func__,
5042 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
5043 The master bio might already be completed, therefore the
5044 request is no longer in the collision hash. */
5045 /* In Protocol B we might already have got a P_RECV_ACK
5046 but then get a P_NEG_ACK afterwards. */
5047 drbd_set_out_of_sync(mdev, sector, size);
5052 static int got_NegDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5054 struct drbd_conf *mdev;
5055 struct p_block_ack *p = pi->data;
5056 sector_t sector = be64_to_cpu(p->sector);
5058 mdev = vnr_to_mdev(tconn, pi->vnr);
5062 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5064 dev_err(DEV, "Got NegDReply; Sector %llus, len %u.\n",
5065 (unsigned long long)sector, be32_to_cpu(p->blksize));
5067 return validate_req_change_req_state(mdev, p->block_id, sector,
5068 &mdev->read_requests, __func__,
5072 static int got_NegRSDReply(struct drbd_tconn *tconn, struct packet_info *pi)
5074 struct drbd_conf *mdev;
5077 struct p_block_ack *p = pi->data;
5079 mdev = vnr_to_mdev(tconn, pi->vnr);
5083 sector = be64_to_cpu(p->sector);
5084 size = be32_to_cpu(p->blksize);
5086 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5088 dec_rs_pending(mdev);
5090 if (get_ldev_if_state(mdev, D_FAILED)) {
5091 drbd_rs_complete_io(mdev, sector);
5093 case P_NEG_RS_DREPLY:
5094 drbd_rs_failed_io(mdev, sector, size);
5106 static int got_BarrierAck(struct drbd_tconn *tconn, struct packet_info *pi)
5108 struct p_barrier_ack *p = pi->data;
5109 struct drbd_conf *mdev;
5112 tl_release(tconn, p->barrier, be32_to_cpu(p->set_size));
5115 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5116 if (mdev->state.conn == C_AHEAD &&
5117 atomic_read(&mdev->ap_in_flight) == 0 &&
5118 !test_and_set_bit(AHEAD_TO_SYNC_SOURCE, &mdev->flags)) {
5119 mdev->start_resync_timer.expires = jiffies + HZ;
5120 add_timer(&mdev->start_resync_timer);
5128 static int got_OVResult(struct drbd_tconn *tconn, struct packet_info *pi)
5130 struct drbd_conf *mdev;
5131 struct p_block_ack *p = pi->data;
5132 struct drbd_work *w;
5136 mdev = vnr_to_mdev(tconn, pi->vnr);
5140 sector = be64_to_cpu(p->sector);
5141 size = be32_to_cpu(p->blksize);
5143 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
5145 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
5146 drbd_ov_out_of_sync_found(mdev, sector, size);
5148 ov_out_of_sync_print(mdev);
5150 if (!get_ldev(mdev))
5153 drbd_rs_complete_io(mdev, sector);
5154 dec_rs_pending(mdev);
5158 /* let's advance progress step marks only for every other megabyte */
5159 if ((mdev->ov_left & 0x200) == 0x200)
5160 drbd_advance_rs_marks(mdev, mdev->ov_left);
5162 if (mdev->ov_left == 0) {
5163 w = kmalloc(sizeof(*w), GFP_NOIO);
5165 w->cb = w_ov_finished;
5167 drbd_queue_work(&mdev->tconn->sender_work, w);
5169 dev_err(DEV, "kmalloc(w) failed.");
5170 ov_out_of_sync_print(mdev);
5171 drbd_resync_finished(mdev);
5178 static int got_skip(struct drbd_tconn *tconn, struct packet_info *pi)
5183 static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
5185 struct drbd_conf *mdev;
5186 int vnr, not_empty = 0;
5189 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5190 flush_signals(current);
5193 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5194 kref_get(&mdev->kref);
5196 if (drbd_finish_peer_reqs(mdev)) {
5197 kref_put(&mdev->kref, &drbd_minor_destroy);
5200 kref_put(&mdev->kref, &drbd_minor_destroy);
5203 set_bit(SIGNAL_ASENDER, &tconn->flags);
5205 spin_lock_irq(&tconn->req_lock);
5206 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
5207 not_empty = !list_empty(&mdev->done_ee);
5211 spin_unlock_irq(&tconn->req_lock);
5213 } while (not_empty);
5218 struct asender_cmd {
5220 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
5223 static struct asender_cmd asender_tbl[] = {
5224 [P_PING] = { 0, got_Ping },
5225 [P_PING_ACK] = { 0, got_PingAck },
5226 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5227 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5228 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
5229 [P_SUPERSEDED] = { sizeof(struct p_block_ack), got_BlockAck },
5230 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
5231 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
5232 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply },
5233 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
5234 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
5235 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
5236 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
5237 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
5238 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply },
5239 [P_CONN_ST_CHG_REPLY]={ sizeof(struct p_req_state_reply), got_conn_RqSReply },
5240 [P_RETRY_WRITE] = { sizeof(struct p_block_ack), got_BlockAck },
5243 int drbd_asender(struct drbd_thread *thi)
5245 struct drbd_tconn *tconn = thi->tconn;
5246 struct asender_cmd *cmd = NULL;
5247 struct packet_info pi;
5249 void *buf = tconn->meta.rbuf;
5251 unsigned int header_size = drbd_header_size(tconn);
5252 int expect = header_size;
5253 bool ping_timeout_active = false;
5254 struct net_conf *nc;
5255 int ping_timeo, tcp_cork, ping_int;
5257 current->policy = SCHED_RR; /* Make this a realtime task! */
5258 current->rt_priority = 2; /* more important than all other tasks */
5260 while (get_t_state(thi) == RUNNING) {
5261 drbd_thread_current_set_cpu(thi);
5264 nc = rcu_dereference(tconn->net_conf);
5265 ping_timeo = nc->ping_timeo;
5266 tcp_cork = nc->tcp_cork;
5267 ping_int = nc->ping_int;
5270 if (test_and_clear_bit(SEND_PING, &tconn->flags)) {
5271 if (drbd_send_ping(tconn)) {
5272 conn_err(tconn, "drbd_send_ping has failed\n");
5275 tconn->meta.socket->sk->sk_rcvtimeo = ping_timeo * HZ / 10;
5276 ping_timeout_active = true;
5279 /* TODO: conditionally cork; it may hurt latency if we cork without
5282 drbd_tcp_cork(tconn->meta.socket);
5283 if (tconn_finish_peer_reqs(tconn)) {
5284 conn_err(tconn, "tconn_finish_peer_reqs() failed\n");
5287 /* but unconditionally uncork unless disabled */
5289 drbd_tcp_uncork(tconn->meta.socket);
5291 /* short circuit, recv_msg would return EINTR anyways. */
5292 if (signal_pending(current))
5295 rv = drbd_recv_short(tconn->meta.socket, buf, expect-received, 0);
5296 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5298 flush_signals(current);
5301 * -EINTR (on meta) we got a signal
5302 * -EAGAIN (on meta) rcvtimeo expired
5303 * -ECONNRESET other side closed the connection
5304 * -ERESTARTSYS (on data) we got a signal
5305 * rv < 0 other than above: unexpected error!
5306 * rv == expected: full header or command
5307 * rv < expected: "woken" by signal during receive
5308 * rv == 0 : "connection shut down by peer"
5310 if (likely(rv > 0)) {
5313 } else if (rv == 0) {
5314 conn_err(tconn, "meta connection shut down by peer.\n");
5316 } else if (rv == -EAGAIN) {
5317 /* If the data socket received something meanwhile,
5318 * that is good enough: peer is still alive. */
5319 if (time_after(tconn->last_received,
5320 jiffies - tconn->meta.socket->sk->sk_rcvtimeo))
5322 if (ping_timeout_active) {
5323 conn_err(tconn, "PingAck did not arrive in time.\n");
5326 set_bit(SEND_PING, &tconn->flags);
5328 } else if (rv == -EINTR) {
5331 conn_err(tconn, "sock_recvmsg returned %d\n", rv);
5335 if (received == expect && cmd == NULL) {
5336 if (decode_header(tconn, tconn->meta.rbuf, &pi))
5338 cmd = &asender_tbl[pi.cmd];
5339 if (pi.cmd >= ARRAY_SIZE(asender_tbl) || !cmd->fn) {
5340 conn_err(tconn, "Unexpected meta packet %s (0x%04x)\n",
5341 cmdname(pi.cmd), pi.cmd);
5344 expect = header_size + cmd->pkt_size;
5345 if (pi.size != expect - header_size) {
5346 conn_err(tconn, "Wrong packet size on meta (c: %d, l: %d)\n",
5351 if (received == expect) {
5354 err = cmd->fn(tconn, &pi);
5356 conn_err(tconn, "%pf failed\n", cmd->fn);
5360 tconn->last_received = jiffies;
5362 if (cmd == &asender_tbl[P_PING_ACK]) {
5363 /* restore idle timeout */
5364 tconn->meta.socket->sk->sk_rcvtimeo = ping_int * HZ;
5365 ping_timeout_active = false;
5368 buf = tconn->meta.rbuf;
5370 expect = header_size;
5377 conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD);
5381 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
5383 clear_bit(SIGNAL_ASENDER, &tconn->flags);
5385 conn_info(tconn, "asender terminated\n");