4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
28 #include <asm/uaccess.h>
31 #include <linux/drbd.h>
33 #include <linux/file.h>
36 #include <linux/memcontrol.h>
37 #include <linux/mm_inline.h>
38 #include <linux/slab.h>
39 #include <linux/pkt_sched.h>
40 #define __KERNEL_SYSCALLS__
41 #include <linux/unistd.h>
42 #include <linux/vmalloc.h>
43 #include <linux/random.h>
44 #include <linux/string.h>
45 #include <linux/scatterlist.h>
57 static int drbd_do_handshake(struct drbd_conf *mdev);
58 static int drbd_do_auth(struct drbd_conf *mdev);
60 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *, struct drbd_epoch *, enum epoch_event);
61 static int e_end_block(struct drbd_conf *, struct drbd_work *, int);
64 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
67 * some helper functions to deal with single linked page lists,
68 * page->private being our "next" pointer.
71 /* If at least n pages are linked at head, get n pages off.
72 * Otherwise, don't modify head, and return NULL.
73 * Locking is the responsibility of the caller.
75 static struct page *page_chain_del(struct page **head, int n)
89 tmp = page_chain_next(page);
91 break; /* found sufficient pages */
93 /* insufficient pages, don't use any of them. */
98 /* add end of list marker for the returned list */
99 set_page_private(page, 0);
100 /* actual return value, and adjustment of head */
106 /* may be used outside of locks to find the tail of a (usually short)
107 * "private" page chain, before adding it back to a global chain head
108 * with page_chain_add() under a spinlock. */
109 static struct page *page_chain_tail(struct page *page, int *len)
113 while ((tmp = page_chain_next(page)))
120 static int page_chain_free(struct page *page)
124 page_chain_for_each_safe(page, tmp) {
131 static void page_chain_add(struct page **head,
132 struct page *chain_first, struct page *chain_last)
136 tmp = page_chain_tail(chain_first, NULL);
137 BUG_ON(tmp != chain_last);
140 /* add chain to head */
141 set_page_private(chain_last, (unsigned long)*head);
145 static struct page *drbd_pp_first_pages_or_try_alloc(struct drbd_conf *mdev, int number)
147 struct page *page = NULL;
148 struct page *tmp = NULL;
151 /* Yes, testing drbd_pp_vacant outside the lock is racy.
152 * So what. It saves a spin_lock. */
153 if (drbd_pp_vacant >= number) {
154 spin_lock(&drbd_pp_lock);
155 page = page_chain_del(&drbd_pp_pool, number);
157 drbd_pp_vacant -= number;
158 spin_unlock(&drbd_pp_lock);
163 /* GFP_TRY, because we must not cause arbitrary write-out: in a DRBD
164 * "criss-cross" setup, that might cause write-out on some other DRBD,
165 * which in turn might block on the other node at this very place. */
166 for (i = 0; i < number; i++) {
167 tmp = alloc_page(GFP_TRY);
170 set_page_private(tmp, (unsigned long)page);
177 /* Not enough pages immediately available this time.
178 * No need to jump around here, drbd_pp_alloc will retry this
179 * function "soon". */
181 tmp = page_chain_tail(page, NULL);
182 spin_lock(&drbd_pp_lock);
183 page_chain_add(&drbd_pp_pool, page, tmp);
185 spin_unlock(&drbd_pp_lock);
190 static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
192 struct drbd_epoch_entry *e;
193 struct list_head *le, *tle;
195 /* The EEs are always appended to the end of the list. Since
196 they are sent in order over the wire, they have to finish
197 in order. As soon as we see the first not finished we can
198 stop to examine the list... */
200 list_for_each_safe(le, tle, &mdev->net_ee) {
201 e = list_entry(le, struct drbd_epoch_entry, w.list);
202 if (drbd_ee_has_active_page(e))
204 list_move(le, to_be_freed);
208 static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
210 LIST_HEAD(reclaimed);
211 struct drbd_epoch_entry *e, *t;
213 spin_lock_irq(&mdev->req_lock);
214 reclaim_net_ee(mdev, &reclaimed);
215 spin_unlock_irq(&mdev->req_lock);
217 list_for_each_entry_safe(e, t, &reclaimed, w.list)
218 drbd_free_net_ee(mdev, e);
222 * drbd_pp_alloc() - Returns @number pages, retries forever (or until signalled)
223 * @mdev: DRBD device.
224 * @number: number of pages requested
225 * @retry: whether to retry, if not enough pages are available right now
227 * Tries to allocate number pages, first from our own page pool, then from
228 * the kernel, unless this allocation would exceed the max_buffers setting.
229 * Possibly retry until DRBD frees sufficient pages somewhere else.
231 * Returns a page chain linked via page->private.
233 static struct page *drbd_pp_alloc(struct drbd_conf *mdev, unsigned number, bool retry)
235 struct page *page = NULL;
238 /* Yes, we may run up to @number over max_buffers. If we
239 * follow it strictly, the admin will get it wrong anyways. */
240 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers)
241 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
243 while (page == NULL) {
244 prepare_to_wait(&drbd_pp_wait, &wait, TASK_INTERRUPTIBLE);
246 drbd_kick_lo_and_reclaim_net(mdev);
248 if (atomic_read(&mdev->pp_in_use) < mdev->net_conf->max_buffers) {
249 page = drbd_pp_first_pages_or_try_alloc(mdev, number);
257 if (signal_pending(current)) {
258 dev_warn(DEV, "drbd_pp_alloc interrupted!\n");
264 finish_wait(&drbd_pp_wait, &wait);
267 atomic_add(number, &mdev->pp_in_use);
271 /* Must not be used from irq, as that may deadlock: see drbd_pp_alloc.
272 * Is also used from inside an other spin_lock_irq(&mdev->req_lock);
273 * Either links the page chain back to the global pool,
274 * or returns all pages to the system. */
275 static void drbd_pp_free(struct drbd_conf *mdev, struct page *page, int is_net)
277 atomic_t *a = is_net ? &mdev->pp_in_use_by_net : &mdev->pp_in_use;
283 if (drbd_pp_vacant > (DRBD_MAX_BIO_SIZE/PAGE_SIZE)*minor_count)
284 i = page_chain_free(page);
287 tmp = page_chain_tail(page, &i);
288 spin_lock(&drbd_pp_lock);
289 page_chain_add(&drbd_pp_pool, page, tmp);
291 spin_unlock(&drbd_pp_lock);
293 i = atomic_sub_return(i, a);
295 dev_warn(DEV, "ASSERTION FAILED: %s: %d < 0\n",
296 is_net ? "pp_in_use_by_net" : "pp_in_use", i);
297 wake_up(&drbd_pp_wait);
301 You need to hold the req_lock:
302 _drbd_wait_ee_list_empty()
304 You must not have the req_lock:
310 drbd_process_done_ee()
312 drbd_wait_ee_list_empty()
315 struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
318 unsigned int data_size,
319 gfp_t gfp_mask) __must_hold(local)
321 struct drbd_epoch_entry *e;
322 struct page *page = NULL;
323 unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
325 if (drbd_insert_fault(mdev, DRBD_FAULT_AL_EE))
328 e = mempool_alloc(drbd_ee_mempool, gfp_mask & ~__GFP_HIGHMEM);
330 if (!(gfp_mask & __GFP_NOWARN))
331 dev_err(DEV, "alloc_ee: Allocation of an EE failed\n");
336 page = drbd_pp_alloc(mdev, nr_pages, (gfp_mask & __GFP_WAIT));
341 INIT_HLIST_NODE(&e->collision);
345 atomic_set(&e->pending_bios, 0);
354 mempool_free(e, drbd_ee_mempool);
358 void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
360 if (e->flags & EE_HAS_DIGEST)
362 drbd_pp_free(mdev, e->pages, is_net);
363 D_ASSERT(atomic_read(&e->pending_bios) == 0);
364 D_ASSERT(hlist_unhashed(&e->collision));
365 mempool_free(e, drbd_ee_mempool);
368 int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
370 LIST_HEAD(work_list);
371 struct drbd_epoch_entry *e, *t;
373 int is_net = list == &mdev->net_ee;
375 spin_lock_irq(&mdev->req_lock);
376 list_splice_init(list, &work_list);
377 spin_unlock_irq(&mdev->req_lock);
379 list_for_each_entry_safe(e, t, &work_list, w.list) {
380 drbd_free_some_ee(mdev, e, is_net);
388 * This function is called from _asender only_
389 * but see also comments in _req_mod(,barrier_acked)
390 * and receive_Barrier.
392 * Move entries from net_ee to done_ee, if ready.
393 * Grab done_ee, call all callbacks, free the entries.
394 * The callbacks typically send out ACKs.
396 static int drbd_process_done_ee(struct drbd_conf *mdev)
398 LIST_HEAD(work_list);
399 LIST_HEAD(reclaimed);
400 struct drbd_epoch_entry *e, *t;
401 int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
403 spin_lock_irq(&mdev->req_lock);
404 reclaim_net_ee(mdev, &reclaimed);
405 list_splice_init(&mdev->done_ee, &work_list);
406 spin_unlock_irq(&mdev->req_lock);
408 list_for_each_entry_safe(e, t, &reclaimed, w.list)
409 drbd_free_net_ee(mdev, e);
411 /* possible callbacks here:
412 * e_end_block, and e_end_resync_block, e_send_discard_ack.
413 * all ignore the last argument.
415 list_for_each_entry_safe(e, t, &work_list, w.list) {
416 /* list_del not necessary, next/prev members not touched */
417 ok = e->w.cb(mdev, &e->w, !ok) && ok;
418 drbd_free_ee(mdev, e);
420 wake_up(&mdev->ee_wait);
425 void _drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
429 /* avoids spin_lock/unlock
430 * and calling prepare_to_wait in the fast path */
431 while (!list_empty(head)) {
432 prepare_to_wait(&mdev->ee_wait, &wait, TASK_UNINTERRUPTIBLE);
433 spin_unlock_irq(&mdev->req_lock);
435 finish_wait(&mdev->ee_wait, &wait);
436 spin_lock_irq(&mdev->req_lock);
440 void drbd_wait_ee_list_empty(struct drbd_conf *mdev, struct list_head *head)
442 spin_lock_irq(&mdev->req_lock);
443 _drbd_wait_ee_list_empty(mdev, head);
444 spin_unlock_irq(&mdev->req_lock);
447 /* see also kernel_accept; which is only present since 2.6.18.
448 * also we want to log which part of it failed, exactly */
449 static int drbd_accept(struct drbd_conf *mdev, const char **what,
450 struct socket *sock, struct socket **newsock)
452 struct sock *sk = sock->sk;
456 err = sock->ops->listen(sock, 5);
460 *what = "sock_create_lite";
461 err = sock_create_lite(sk->sk_family, sk->sk_type, sk->sk_protocol,
467 err = sock->ops->accept(sock, *newsock, 0);
469 sock_release(*newsock);
473 (*newsock)->ops = sock->ops;
474 __module_get((*newsock)->ops->owner);
480 static int drbd_recv_short(struct drbd_conf *mdev, struct socket *sock,
481 void *buf, size_t size, int flags)
488 struct msghdr msg = {
490 .msg_iov = (struct iovec *)&iov,
491 .msg_flags = (flags ? flags : MSG_WAITALL | MSG_NOSIGNAL)
497 rv = sock_recvmsg(sock, &msg, size, msg.msg_flags);
503 static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
510 struct msghdr msg = {
512 .msg_iov = (struct iovec *)&iov,
513 .msg_flags = MSG_WAITALL | MSG_NOSIGNAL
519 rv = sock_recvmsg(mdev->data.socket, &msg, size, msg.msg_flags);
523 if (rv == -ECONNRESET)
524 dev_info(DEV, "sock was reset by peer\n");
525 else if (rv != -ERESTARTSYS)
526 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
527 } else if (rv == 0) {
528 if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
529 long t; /* time_left */
530 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
531 mdev->net_conf->ping_timeo * HZ/10);
535 dev_info(DEV, "sock was shut down by peer\n");
539 drbd_force_state(mdev, NS(conn, C_BROKEN_PIPE));
546 * On individual connections, the socket buffer size must be set prior to the
547 * listen(2) or connect(2) calls in order to have it take effect.
548 * This is our wrapper to do so.
550 static void drbd_setbufsize(struct socket *sock, unsigned int snd,
553 /* open coded SO_SNDBUF, SO_RCVBUF */
555 sock->sk->sk_sndbuf = snd;
556 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
559 sock->sk->sk_rcvbuf = rcv;
560 sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
564 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
568 struct sockaddr_in6 src_in6;
570 int disconnect_on_error = 1;
572 if (!get_net_conf(mdev))
575 what = "sock_create_kern";
576 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
577 SOCK_STREAM, IPPROTO_TCP, &sock);
583 sock->sk->sk_rcvtimeo =
584 sock->sk->sk_sndtimeo = mdev->net_conf->try_connect_int*HZ;
585 drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
586 mdev->net_conf->rcvbuf_size);
588 /* explicitly bind to the configured IP as source IP
589 * for the outgoing connections.
590 * This is needed for multihomed hosts and to be
591 * able to use lo: interfaces for drbd.
592 * Make sure to use 0 as port number, so linux selects
593 * a free one dynamically.
595 memcpy(&src_in6, mdev->net_conf->my_addr,
596 min_t(int, mdev->net_conf->my_addr_len, sizeof(src_in6)));
597 if (((struct sockaddr *)mdev->net_conf->my_addr)->sa_family == AF_INET6)
598 src_in6.sin6_port = 0;
600 ((struct sockaddr_in *)&src_in6)->sin_port = 0; /* AF_INET & AF_SCI */
602 what = "bind before connect";
603 err = sock->ops->bind(sock,
604 (struct sockaddr *) &src_in6,
605 mdev->net_conf->my_addr_len);
609 /* connect may fail, peer not yet available.
610 * stay C_WF_CONNECTION, don't go Disconnecting! */
611 disconnect_on_error = 0;
613 err = sock->ops->connect(sock,
614 (struct sockaddr *)mdev->net_conf->peer_addr,
615 mdev->net_conf->peer_addr_len, 0);
624 /* timeout, busy, signal pending */
625 case ETIMEDOUT: case EAGAIN: case EINPROGRESS:
626 case EINTR: case ERESTARTSYS:
627 /* peer not (yet) available, network problem */
628 case ECONNREFUSED: case ENETUNREACH:
629 case EHOSTDOWN: case EHOSTUNREACH:
630 disconnect_on_error = 0;
633 dev_err(DEV, "%s failed, err = %d\n", what, err);
635 if (disconnect_on_error)
636 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
642 static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
645 struct socket *s_estab = NULL, *s_listen;
648 if (!get_net_conf(mdev))
651 what = "sock_create_kern";
652 err = sock_create_kern(((struct sockaddr *)mdev->net_conf->my_addr)->sa_family,
653 SOCK_STREAM, IPPROTO_TCP, &s_listen);
659 timeo = mdev->net_conf->try_connect_int * HZ;
660 timeo += (random32() & 1) ? timeo / 7 : -timeo / 7; /* 28.5% random jitter */
662 s_listen->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
663 s_listen->sk->sk_rcvtimeo = timeo;
664 s_listen->sk->sk_sndtimeo = timeo;
665 drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
666 mdev->net_conf->rcvbuf_size);
668 what = "bind before listen";
669 err = s_listen->ops->bind(s_listen,
670 (struct sockaddr *) mdev->net_conf->my_addr,
671 mdev->net_conf->my_addr_len);
675 err = drbd_accept(mdev, &what, s_listen, &s_estab);
679 sock_release(s_listen);
681 if (err != -EAGAIN && err != -EINTR && err != -ERESTARTSYS) {
682 dev_err(DEV, "%s failed, err = %d\n", what, err);
683 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
691 static int drbd_send_fp(struct drbd_conf *mdev,
692 struct socket *sock, enum drbd_packets cmd)
694 struct p_header80 *h = &mdev->data.sbuf.header.h80;
696 return _drbd_send_cmd(mdev, sock, cmd, h, sizeof(*h), 0);
699 static enum drbd_packets drbd_recv_fp(struct drbd_conf *mdev, struct socket *sock)
701 struct p_header80 *h = &mdev->data.rbuf.header.h80;
704 rr = drbd_recv_short(mdev, sock, h, sizeof(*h), 0);
706 if (rr == sizeof(*h) && h->magic == BE_DRBD_MAGIC)
707 return be16_to_cpu(h->command);
713 * drbd_socket_okay() - Free the socket if its connection is not okay
714 * @mdev: DRBD device.
715 * @sock: pointer to the pointer to the socket.
717 static int drbd_socket_okay(struct drbd_conf *mdev, struct socket **sock)
725 rr = drbd_recv_short(mdev, *sock, tb, 4, MSG_DONTWAIT | MSG_PEEK);
727 if (rr > 0 || rr == -EAGAIN) {
738 * 1 yes, we have a valid connection
739 * 0 oops, did not work out, please try again
740 * -1 peer talks different language,
741 * no point in trying again, please go standalone.
742 * -2 We do not have a network config...
744 static int drbd_connect(struct drbd_conf *mdev)
746 struct socket *s, *sock, *msock;
748 enum drbd_state_rv rv;
750 D_ASSERT(!mdev->data.socket);
752 drbd_clear_flag(mdev, DISCONNECT_SENT);
753 if (drbd_request_state(mdev, NS(conn, C_WF_CONNECTION)) < SS_SUCCESS)
761 /* 3 tries, this should take less than a second! */
762 s = drbd_try_connect(mdev);
765 /* give the other side time to call bind() & listen() */
766 schedule_timeout_interruptible(HZ / 10);
771 drbd_send_fp(mdev, s, P_HAND_SHAKE_S);
775 drbd_clear_flag(mdev, DISCARD_CONCURRENT);
776 drbd_send_fp(mdev, s, P_HAND_SHAKE_M);
780 dev_err(DEV, "Logic error in drbd_connect()\n");
781 goto out_release_sockets;
786 schedule_timeout_interruptible(mdev->net_conf->ping_timeo*HZ/10);
787 ok = drbd_socket_okay(mdev, &sock);
788 ok = drbd_socket_okay(mdev, &msock) && ok;
794 s = drbd_wait_for_connect(mdev);
796 try = drbd_recv_fp(mdev, s);
797 drbd_socket_okay(mdev, &sock);
798 drbd_socket_okay(mdev, &msock);
802 dev_warn(DEV, "initial packet S crossed\n");
809 dev_warn(DEV, "initial packet M crossed\n");
813 drbd_set_flag(mdev, DISCARD_CONCURRENT);
816 dev_warn(DEV, "Error receiving initial packet\n");
823 if (mdev->state.conn <= C_DISCONNECTING)
824 goto out_release_sockets;
825 if (signal_pending(current)) {
826 flush_signals(current);
828 if (get_t_state(&mdev->receiver) == Exiting)
829 goto out_release_sockets;
833 ok = drbd_socket_okay(mdev, &sock);
834 ok = drbd_socket_okay(mdev, &msock) && ok;
840 msock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
841 sock->sk->sk_reuse = SK_CAN_REUSE; /* SO_REUSEADDR */
843 sock->sk->sk_allocation = GFP_NOIO;
844 msock->sk->sk_allocation = GFP_NOIO;
846 sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
847 msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
850 * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
851 * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
852 * first set it to the P_HAND_SHAKE timeout,
853 * which we set to 4x the configured ping_timeout. */
854 sock->sk->sk_sndtimeo =
855 sock->sk->sk_rcvtimeo = mdev->net_conf->ping_timeo*4*HZ/10;
857 msock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
858 msock->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
860 /* we don't want delays.
861 * we use TCP_CORK where appropriate, though */
862 drbd_tcp_nodelay(sock);
863 drbd_tcp_nodelay(msock);
865 mdev->data.socket = sock;
866 mdev->meta.socket = msock;
867 mdev->last_received = jiffies;
869 D_ASSERT(mdev->asender.task == NULL);
871 h = drbd_do_handshake(mdev);
875 if (mdev->cram_hmac_tfm) {
876 /* drbd_request_state(mdev, NS(conn, WFAuth)); */
877 switch (drbd_do_auth(mdev)) {
879 dev_err(DEV, "Authentication of peer failed\n");
882 dev_err(DEV, "Authentication of peer failed, trying again.\n");
887 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
888 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
890 atomic_set(&mdev->packet_seq, 0);
893 if (drbd_send_protocol(mdev) == -1)
895 drbd_set_flag(mdev, STATE_SENT);
896 drbd_send_sync_param(mdev, &mdev->sync_conf);
897 drbd_send_sizes(mdev, 0, 0);
898 drbd_send_uuids(mdev);
899 drbd_send_current_state(mdev);
900 drbd_clear_flag(mdev, USE_DEGR_WFC_T);
901 drbd_clear_flag(mdev, RESIZE_PENDING);
903 spin_lock_irq(&mdev->req_lock);
904 rv = _drbd_set_state(_NS(mdev, conn, C_WF_REPORT_PARAMS), CS_VERBOSE, NULL);
905 if (mdev->state.conn != C_WF_REPORT_PARAMS)
906 drbd_clear_flag(mdev, STATE_SENT);
907 spin_unlock_irq(&mdev->req_lock);
912 drbd_thread_start(&mdev->asender);
913 mod_timer(&mdev->request_timer, jiffies + HZ); /* just start it here. */
925 static int drbd_recv_header(struct drbd_conf *mdev, enum drbd_packets *cmd, unsigned int *packet_size)
927 union p_header *h = &mdev->data.rbuf.header;
930 r = drbd_recv(mdev, h, sizeof(*h));
931 if (unlikely(r != sizeof(*h))) {
932 if (!signal_pending(current))
933 dev_warn(DEV, "short read expecting header on sock: r=%d\n", r);
937 if (likely(h->h80.magic == BE_DRBD_MAGIC)) {
938 *cmd = be16_to_cpu(h->h80.command);
939 *packet_size = be16_to_cpu(h->h80.length);
940 } else if (h->h95.magic == BE_DRBD_MAGIC_BIG) {
941 *cmd = be16_to_cpu(h->h95.command);
942 *packet_size = be32_to_cpu(h->h95.length);
944 dev_err(DEV, "magic?? on data m: 0x%08x c: %d l: %d\n",
945 be32_to_cpu(h->h80.magic),
946 be16_to_cpu(h->h80.command),
947 be16_to_cpu(h->h80.length));
950 mdev->last_received = jiffies;
955 static void drbd_flush(struct drbd_conf *mdev)
959 if (mdev->write_ordering >= WO_bdev_flush && get_ldev(mdev)) {
960 rv = blkdev_issue_flush(mdev->ldev->backing_bdev, GFP_NOIO,
963 dev_info(DEV, "local disk flush failed with status %d\n", rv);
964 /* would rather check on EOPNOTSUPP, but that is not reliable.
965 * don't try again for ANY return value != 0
966 * if (rv == -EOPNOTSUPP) */
967 drbd_bump_write_ordering(mdev, WO_drain_io);
974 * drbd_may_finish_epoch() - Applies an epoch_event to the epoch's state, eventually finishes it.
975 * @mdev: DRBD device.
976 * @epoch: Epoch object.
979 static enum finish_epoch drbd_may_finish_epoch(struct drbd_conf *mdev,
980 struct drbd_epoch *epoch,
984 struct drbd_epoch *next_epoch;
985 enum finish_epoch rv = FE_STILL_LIVE;
987 spin_lock(&mdev->epoch_lock);
991 epoch_size = atomic_read(&epoch->epoch_size);
993 switch (ev & ~EV_CLEANUP) {
995 atomic_dec(&epoch->active);
997 case EV_GOT_BARRIER_NR:
998 set_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags);
1000 case EV_BECAME_LAST:
1005 if (epoch_size != 0 &&
1006 atomic_read(&epoch->active) == 0 &&
1007 (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags) || ev & EV_CLEANUP)) {
1008 if (!(ev & EV_CLEANUP)) {
1009 spin_unlock(&mdev->epoch_lock);
1010 drbd_send_b_ack(mdev, epoch->barrier_nr, epoch_size);
1011 spin_lock(&mdev->epoch_lock);
1013 if (test_bit(DE_HAVE_BARRIER_NUMBER, &epoch->flags))
1016 if (mdev->current_epoch != epoch) {
1017 next_epoch = list_entry(epoch->list.next, struct drbd_epoch, list);
1018 list_del(&epoch->list);
1019 ev = EV_BECAME_LAST | (ev & EV_CLEANUP);
1023 if (rv == FE_STILL_LIVE)
1027 atomic_set(&epoch->epoch_size, 0);
1028 /* atomic_set(&epoch->active, 0); is already zero */
1029 if (rv == FE_STILL_LIVE)
1031 wake_up(&mdev->ee_wait);
1041 spin_unlock(&mdev->epoch_lock);
1047 * drbd_bump_write_ordering() - Fall back to an other write ordering method
1048 * @mdev: DRBD device.
1049 * @wo: Write ordering method to try.
1051 void drbd_bump_write_ordering(struct drbd_conf *mdev, enum write_ordering_e wo) __must_hold(local)
1053 enum write_ordering_e pwo;
1054 static char *write_ordering_str[] = {
1056 [WO_drain_io] = "drain",
1057 [WO_bdev_flush] = "flush",
1060 pwo = mdev->write_ordering;
1062 if (wo == WO_bdev_flush && mdev->ldev->dc.no_disk_flush)
1064 if (wo == WO_drain_io && mdev->ldev->dc.no_disk_drain)
1066 mdev->write_ordering = wo;
1067 if (pwo != mdev->write_ordering || wo == WO_bdev_flush)
1068 dev_info(DEV, "Method to ensure write ordering: %s\n", write_ordering_str[mdev->write_ordering]);
1073 * @mdev: DRBD device.
1075 * @rw: flag field, see bio->bi_rw
1077 * May spread the pages to multiple bios,
1078 * depending on bio_add_page restrictions.
1080 * Returns 0 if all bios have been submitted,
1081 * -ENOMEM if we could not allocate enough bios,
1082 * -ENOSPC (any better suggestion?) if we have not been able to bio_add_page a
1083 * single page to an empty bio (which should never happen and likely indicates
1084 * that the lower level IO stack is in some way broken). This has been observed
1085 * on certain Xen deployments.
1087 /* TODO allocate from our own bio_set. */
1088 int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
1089 const unsigned rw, const int fault_type)
1091 struct bio *bios = NULL;
1093 struct page *page = e->pages;
1094 sector_t sector = e->sector;
1095 unsigned ds = e->size;
1096 unsigned n_bios = 0;
1097 unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
1100 /* In most cases, we will only need one bio. But in case the lower
1101 * level restrictions happen to be different at this offset on this
1102 * side than those of the sending peer, we may need to submit the
1103 * request in more than one bio.
1105 * Plain bio_alloc is good enough here, this is no DRBD internally
1106 * generated bio, but a bio allocated on behalf of the peer.
1109 bio = bio_alloc(GFP_NOIO, nr_pages);
1111 dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
1114 /* > e->sector, unless this is the first bio */
1115 bio->bi_sector = sector;
1116 bio->bi_bdev = mdev->ldev->backing_bdev;
1118 bio->bi_private = e;
1119 bio->bi_end_io = drbd_endio_sec;
1121 bio->bi_next = bios;
1125 page_chain_for_each(page) {
1126 unsigned len = min_t(unsigned, ds, PAGE_SIZE);
1127 if (!bio_add_page(bio, page, len, 0)) {
1128 /* A single page must always be possible!
1129 * But in case it fails anyways,
1130 * we deal with it, and complain (below). */
1131 if (bio->bi_vcnt == 0) {
1133 "bio_add_page failed for len=%u, "
1134 "bi_vcnt=0 (bi_sector=%llu)\n",
1135 len, (unsigned long long)bio->bi_sector);
1145 D_ASSERT(page == NULL);
1148 atomic_set(&e->pending_bios, n_bios);
1151 bios = bios->bi_next;
1152 bio->bi_next = NULL;
1154 drbd_generic_make_request(mdev, fault_type, bio);
1161 bios = bios->bi_next;
1167 static int receive_Barrier(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1170 struct p_barrier *p = &mdev->data.rbuf.barrier;
1171 struct drbd_epoch *epoch;
1175 mdev->current_epoch->barrier_nr = p->barrier;
1176 rv = drbd_may_finish_epoch(mdev, mdev->current_epoch, EV_GOT_BARRIER_NR);
1178 /* P_BARRIER_ACK may imply that the corresponding extent is dropped from
1179 * the activity log, which means it would not be resynced in case the
1180 * R_PRIMARY crashes now.
1181 * Therefore we must send the barrier_ack after the barrier request was
1183 switch (mdev->write_ordering) {
1185 if (rv == FE_RECYCLED)
1188 /* receiver context, in the writeout path of the other node.
1189 * avoid potential distributed deadlock */
1190 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1194 dev_warn(DEV, "Allocation of an epoch failed, slowing down\n");
1199 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
1202 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1203 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
1208 epoch = mdev->current_epoch;
1209 wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
1211 D_ASSERT(atomic_read(&epoch->active) == 0);
1212 D_ASSERT(epoch->flags == 0);
1216 dev_err(DEV, "Strangeness in mdev->write_ordering %d\n", mdev->write_ordering);
1221 atomic_set(&epoch->epoch_size, 0);
1222 atomic_set(&epoch->active, 0);
1224 spin_lock(&mdev->epoch_lock);
1225 if (atomic_read(&mdev->current_epoch->epoch_size)) {
1226 list_add(&epoch->list, &mdev->current_epoch->list);
1227 mdev->current_epoch = epoch;
1230 /* The current_epoch got recycled while we allocated this one... */
1233 spin_unlock(&mdev->epoch_lock);
1238 /* used from receive_RSDataReply (recv_resync_read)
1239 * and from receive_Data */
1240 static struct drbd_epoch_entry *
1241 read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
1243 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1244 struct drbd_epoch_entry *e;
1247 void *dig_in = mdev->int_dig_in;
1248 void *dig_vv = mdev->int_dig_vv;
1249 unsigned long *data;
1251 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1252 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1255 rr = drbd_recv(mdev, dig_in, dgs);
1257 if (!signal_pending(current))
1259 "short read receiving data digest: read %d expected %d\n",
1267 ERR_IF(data_size & 0x1ff) return NULL;
1268 ERR_IF(data_size > DRBD_MAX_BIO_SIZE) return NULL;
1270 /* even though we trust out peer,
1271 * we sometimes have to double check. */
1272 if (sector + (data_size>>9) > capacity) {
1273 dev_err(DEV, "request from peer beyond end of local disk: "
1274 "capacity: %llus < sector: %llus + size: %u\n",
1275 (unsigned long long)capacity,
1276 (unsigned long long)sector, data_size);
1280 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
1281 * "criss-cross" setup, that might cause write-out on some other DRBD,
1282 * which in turn might block on the other node at this very place. */
1283 e = drbd_alloc_ee(mdev, id, sector, data_size, GFP_NOIO);
1292 page_chain_for_each(page) {
1293 unsigned len = min_t(int, ds, PAGE_SIZE);
1295 rr = drbd_recv(mdev, data, len);
1296 if (drbd_insert_fault(mdev, DRBD_FAULT_RECEIVE)) {
1297 dev_err(DEV, "Fault injection: Corrupting data on receive\n");
1298 data[0] = data[0] ^ (unsigned long)-1;
1302 drbd_free_ee(mdev, e);
1303 if (!signal_pending(current))
1304 dev_warn(DEV, "short read receiving data: read %d expected %d\n",
1312 drbd_csum_ee(mdev, mdev->integrity_r_tfm, e, dig_vv);
1313 if (memcmp(dig_in, dig_vv, dgs)) {
1314 dev_err(DEV, "Digest integrity check FAILED: %llus +%u\n",
1315 (unsigned long long)sector, data_size);
1316 drbd_bcast_ee(mdev, "digest failed",
1317 dgs, dig_in, dig_vv, e);
1318 drbd_free_ee(mdev, e);
1322 mdev->recv_cnt += data_size>>9;
1326 /* drbd_drain_block() just takes a data block
1327 * out of the socket input buffer, and discards it.
1329 static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1338 page = drbd_pp_alloc(mdev, 1, 1);
1342 rr = drbd_recv(mdev, data, min_t(int, data_size, PAGE_SIZE));
1343 if (rr != min_t(int, data_size, PAGE_SIZE)) {
1345 if (!signal_pending(current))
1347 "short read receiving data: read %d expected %d\n",
1348 rr, min_t(int, data_size, PAGE_SIZE));
1354 drbd_pp_free(mdev, page, 0);
1358 static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1359 sector_t sector, int data_size)
1361 struct bio_vec *bvec;
1363 int dgs, rr, i, expect;
1364 void *dig_in = mdev->int_dig_in;
1365 void *dig_vv = mdev->int_dig_vv;
1367 dgs = (mdev->agreed_pro_version >= 87 && mdev->integrity_r_tfm) ?
1368 crypto_hash_digestsize(mdev->integrity_r_tfm) : 0;
1371 rr = drbd_recv(mdev, dig_in, dgs);
1373 if (!signal_pending(current))
1375 "short read receiving data reply digest: read %d expected %d\n",
1383 /* optimistically update recv_cnt. if receiving fails below,
1384 * we disconnect anyways, and counters will be reset. */
1385 mdev->recv_cnt += data_size>>9;
1387 bio = req->master_bio;
1388 D_ASSERT(sector == bio->bi_sector);
1390 bio_for_each_segment(bvec, bio, i) {
1391 expect = min_t(int, data_size, bvec->bv_len);
1392 rr = drbd_recv(mdev,
1393 kmap(bvec->bv_page)+bvec->bv_offset,
1395 kunmap(bvec->bv_page);
1397 if (!signal_pending(current))
1398 dev_warn(DEV, "short read receiving data reply: "
1399 "read %d expected %d\n",
1407 drbd_csum_bio(mdev, mdev->integrity_r_tfm, bio, dig_vv);
1408 if (memcmp(dig_in, dig_vv, dgs)) {
1409 dev_err(DEV, "Digest integrity check FAILED. Broken NICs?\n");
1414 D_ASSERT(data_size == 0);
1418 /* e_end_resync_block() is called via
1419 * drbd_process_done_ee() by asender only */
1420 static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1422 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1423 sector_t sector = e->sector;
1426 D_ASSERT(hlist_unhashed(&e->collision));
1428 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1429 drbd_set_in_sync(mdev, sector, e->size);
1430 ok = drbd_send_ack(mdev, P_RS_WRITE_ACK, e);
1432 /* Record failure to sync */
1433 drbd_rs_failed_io(mdev, sector, e->size);
1435 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1442 static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
1444 struct drbd_epoch_entry *e;
1446 e = read_in_block(mdev, ID_SYNCER, sector, data_size);
1450 dec_rs_pending(mdev);
1453 /* corresponding dec_unacked() in e_end_resync_block()
1454 * respective _drbd_clear_done_ee */
1456 e->w.cb = e_end_resync_block;
1458 spin_lock_irq(&mdev->req_lock);
1459 list_add(&e->w.list, &mdev->sync_ee);
1460 spin_unlock_irq(&mdev->req_lock);
1462 atomic_add(data_size >> 9, &mdev->rs_sect_ev);
1463 if (drbd_submit_ee(mdev, e, WRITE, DRBD_FAULT_RS_WR) == 0)
1466 /* don't care for the reason here */
1467 dev_err(DEV, "submit failed, triggering re-connect\n");
1468 spin_lock_irq(&mdev->req_lock);
1469 list_del(&e->w.list);
1470 spin_unlock_irq(&mdev->req_lock);
1472 drbd_free_ee(mdev, e);
1478 static int receive_DataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1480 struct drbd_request *req;
1483 struct p_data *p = &mdev->data.rbuf.data;
1485 sector = be64_to_cpu(p->sector);
1487 spin_lock_irq(&mdev->req_lock);
1488 req = _ar_id_to_req(mdev, p->block_id, sector);
1489 spin_unlock_irq(&mdev->req_lock);
1490 if (unlikely(!req)) {
1491 dev_err(DEV, "Got a corrupt block_id/sector pair(1).\n");
1495 /* hlist_del(&req->collision) is done in _req_may_be_done, to avoid
1496 * special casing it there for the various failure cases.
1497 * still no race with drbd_fail_pending_reads */
1498 ok = recv_dless_read(mdev, req, sector, data_size);
1501 req_mod(req, data_received);
1502 /* else: nothing. handled from drbd_disconnect...
1503 * I don't think we may complete this just yet
1504 * in case we are "on-disconnect: freeze" */
1509 static int receive_RSDataReply(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1513 struct p_data *p = &mdev->data.rbuf.data;
1515 sector = be64_to_cpu(p->sector);
1516 D_ASSERT(p->block_id == ID_SYNCER);
1518 if (get_ldev(mdev)) {
1519 /* data is submitted to disk within recv_resync_read.
1520 * corresponding put_ldev done below on error,
1521 * or in drbd_endio_write_sec. */
1522 ok = recv_resync_read(mdev, sector, data_size);
1524 if (__ratelimit(&drbd_ratelimit_state))
1525 dev_err(DEV, "Can not write resync data to local disk.\n");
1527 ok = drbd_drain_block(mdev, data_size);
1529 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1532 atomic_add(data_size >> 9, &mdev->rs_sect_in);
1537 /* e_end_block() is called via drbd_process_done_ee().
1538 * this means this function only runs in the asender thread
1540 static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
1542 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1543 sector_t sector = e->sector;
1546 if (mdev->net_conf->wire_protocol == DRBD_PROT_C) {
1547 if (likely((e->flags & EE_WAS_ERROR) == 0)) {
1548 pcmd = (mdev->state.conn >= C_SYNC_SOURCE &&
1549 mdev->state.conn <= C_PAUSED_SYNC_T &&
1550 e->flags & EE_MAY_SET_IN_SYNC) ?
1551 P_RS_WRITE_ACK : P_WRITE_ACK;
1552 ok &= drbd_send_ack(mdev, pcmd, e);
1553 if (pcmd == P_RS_WRITE_ACK)
1554 drbd_set_in_sync(mdev, sector, e->size);
1556 ok = drbd_send_ack(mdev, P_NEG_ACK, e);
1557 /* we expect it to be marked out of sync anyways...
1558 * maybe assert this? */
1562 /* we delete from the conflict detection hash _after_ we sent out the
1563 * P_WRITE_ACK / P_NEG_ACK, to get the sequence number right. */
1564 if (mdev->net_conf->two_primaries) {
1565 spin_lock_irq(&mdev->req_lock);
1566 D_ASSERT(!hlist_unhashed(&e->collision));
1567 hlist_del_init(&e->collision);
1568 spin_unlock_irq(&mdev->req_lock);
1570 D_ASSERT(hlist_unhashed(&e->collision));
1573 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + (cancel ? EV_CLEANUP : 0));
1578 static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
1580 struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
1583 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1584 ok = drbd_send_ack(mdev, P_DISCARD_ACK, e);
1586 spin_lock_irq(&mdev->req_lock);
1587 D_ASSERT(!hlist_unhashed(&e->collision));
1588 hlist_del_init(&e->collision);
1589 spin_unlock_irq(&mdev->req_lock);
1596 static bool overlapping_resync_write(struct drbd_conf *mdev, struct drbd_epoch_entry *data_e)
1599 struct drbd_epoch_entry *rs_e;
1602 spin_lock_irq(&mdev->req_lock);
1603 list_for_each_entry(rs_e, &mdev->sync_ee, w.list) {
1604 if (overlaps(data_e->sector, data_e->size, rs_e->sector, rs_e->size)) {
1609 spin_unlock_irq(&mdev->req_lock);
1614 /* Called from receive_Data.
1615 * Synchronize packets on sock with packets on msock.
1617 * This is here so even when a P_DATA packet traveling via sock overtook an Ack
1618 * packet traveling on msock, they are still processed in the order they have
1621 * Note: we don't care for Ack packets overtaking P_DATA packets.
1623 * In case packet_seq is larger than mdev->peer_seq number, there are
1624 * outstanding packets on the msock. We wait for them to arrive.
1625 * In case we are the logically next packet, we update mdev->peer_seq
1626 * ourselves. Correctly handles 32bit wrap around.
1628 * Assume we have a 10 GBit connection, that is about 1<<30 byte per second,
1629 * about 1<<21 sectors per second. So "worst" case, we have 1<<3 == 8 seconds
1630 * for the 24bit wrap (historical atomic_t guarantee on some archs), and we have
1631 * 1<<9 == 512 seconds aka ages for the 32bit wrap around...
1633 * returns 0 if we may process the packet,
1634 * -ERESTARTSYS if we were interrupted (by disconnect signal). */
1635 static int drbd_wait_peer_seq(struct drbd_conf *mdev, const u32 packet_seq)
1641 spin_lock(&mdev->peer_seq_lock);
1643 prepare_to_wait(&mdev->seq_wait, &wait, TASK_INTERRUPTIBLE);
1644 if (seq_le(packet_seq, mdev->peer_seq+1))
1646 if (signal_pending(current)) {
1650 p_seq = mdev->peer_seq;
1651 spin_unlock(&mdev->peer_seq_lock);
1652 timeout = schedule_timeout(30*HZ);
1653 spin_lock(&mdev->peer_seq_lock);
1654 if (timeout == 0 && p_seq == mdev->peer_seq) {
1656 dev_err(DEV, "ASSERT FAILED waited 30 seconds for sequence update, forcing reconnect\n");
1660 finish_wait(&mdev->seq_wait, &wait);
1661 if (mdev->peer_seq+1 == packet_seq)
1663 spin_unlock(&mdev->peer_seq_lock);
1667 /* see also bio_flags_to_wire()
1668 * DRBD_REQ_*, because we need to semantically map the flags to data packet
1669 * flags and back. We may replicate to other kernel versions. */
1670 static unsigned long wire_flags_to_bio(struct drbd_conf *mdev, u32 dpf)
1672 return (dpf & DP_RW_SYNC ? REQ_SYNC : 0) |
1673 (dpf & DP_FUA ? REQ_FUA : 0) |
1674 (dpf & DP_FLUSH ? REQ_FLUSH : 0) |
1675 (dpf & DP_DISCARD ? REQ_DISCARD : 0);
1678 /* mirrored write */
1679 static int receive_Data(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
1682 struct drbd_epoch_entry *e;
1683 struct p_data *p = &mdev->data.rbuf.data;
1687 if (!get_ldev(mdev)) {
1688 spin_lock(&mdev->peer_seq_lock);
1689 if (mdev->peer_seq+1 == be32_to_cpu(p->seq_num))
1691 spin_unlock(&mdev->peer_seq_lock);
1693 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
1694 atomic_inc(&mdev->current_epoch->epoch_size);
1695 return drbd_drain_block(mdev, data_size);
1698 /* get_ldev(mdev) successful.
1699 * Corresponding put_ldev done either below (on various errors),
1700 * or in drbd_endio_write_sec, if we successfully submit the data at
1701 * the end of this function. */
1703 sector = be64_to_cpu(p->sector);
1704 e = read_in_block(mdev, p->block_id, sector, data_size);
1710 e->w.cb = e_end_block;
1712 dp_flags = be32_to_cpu(p->dp_flags);
1713 rw |= wire_flags_to_bio(mdev, dp_flags);
1714 if (e->pages == NULL) {
1715 D_ASSERT(e->size == 0);
1716 D_ASSERT(dp_flags & DP_FLUSH);
1719 if (dp_flags & DP_MAY_SET_IN_SYNC)
1720 e->flags |= EE_MAY_SET_IN_SYNC;
1722 spin_lock(&mdev->epoch_lock);
1723 e->epoch = mdev->current_epoch;
1724 atomic_inc(&e->epoch->epoch_size);
1725 atomic_inc(&e->epoch->active);
1726 spin_unlock(&mdev->epoch_lock);
1728 /* I'm the receiver, I do hold a net_cnt reference. */
1729 if (!mdev->net_conf->two_primaries) {
1730 spin_lock_irq(&mdev->req_lock);
1732 /* don't get the req_lock yet,
1733 * we may sleep in drbd_wait_peer_seq */
1734 const int size = e->size;
1735 const int discard = drbd_test_flag(mdev, DISCARD_CONCURRENT);
1737 struct drbd_request *i;
1738 struct hlist_node *n;
1739 struct hlist_head *slot;
1742 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
1743 BUG_ON(mdev->ee_hash == NULL);
1744 BUG_ON(mdev->tl_hash == NULL);
1746 /* conflict detection and handling:
1747 * 1. wait on the sequence number,
1748 * in case this data packet overtook ACK packets.
1749 * 2. check our hash tables for conflicting requests.
1750 * we only need to walk the tl_hash, since an ee can not
1751 * have a conflict with an other ee: on the submitting
1752 * node, the corresponding req had already been conflicting,
1753 * and a conflicting req is never sent.
1755 * Note: for two_primaries, we are protocol C,
1756 * so there cannot be any request that is DONE
1757 * but still on the transfer log.
1759 * unconditionally add to the ee_hash.
1761 * if no conflicting request is found:
1764 * if any conflicting request is found
1765 * that has not yet been acked,
1766 * AND I have the "discard concurrent writes" flag:
1767 * queue (via done_ee) the P_DISCARD_ACK; OUT.
1769 * if any conflicting request is found:
1770 * block the receiver, waiting on misc_wait
1771 * until no more conflicting requests are there,
1772 * or we get interrupted (disconnect).
1774 * we do not just write after local io completion of those
1775 * requests, but only after req is done completely, i.e.
1776 * we wait for the P_DISCARD_ACK to arrive!
1778 * then proceed normally, i.e. submit.
1780 if (drbd_wait_peer_seq(mdev, be32_to_cpu(p->seq_num)))
1781 goto out_interrupted;
1783 spin_lock_irq(&mdev->req_lock);
1785 hlist_add_head(&e->collision, ee_hash_slot(mdev, sector));
1787 #define OVERLAPS overlaps(i->sector, i->size, sector, size)
1788 slot = tl_hash_slot(mdev, sector);
1791 int have_unacked = 0;
1792 int have_conflict = 0;
1793 prepare_to_wait(&mdev->misc_wait, &wait,
1794 TASK_INTERRUPTIBLE);
1795 hlist_for_each_entry(i, n, slot, collision) {
1797 /* only ALERT on first iteration,
1798 * we may be woken up early... */
1800 dev_alert(DEV, "%s[%u] Concurrent local write detected!"
1801 " new: %llus +%u; pending: %llus +%u\n",
1802 current->comm, current->pid,
1803 (unsigned long long)sector, size,
1804 (unsigned long long)i->sector, i->size);
1805 if (i->rq_state & RQ_NET_PENDING)
1814 /* Discard Ack only for the _first_ iteration */
1815 if (first && discard && have_unacked) {
1816 dev_alert(DEV, "Concurrent write! [DISCARD BY FLAG] sec=%llus\n",
1817 (unsigned long long)sector);
1819 e->w.cb = e_send_discard_ack;
1820 list_add_tail(&e->w.list, &mdev->done_ee);
1822 spin_unlock_irq(&mdev->req_lock);
1824 /* we could probably send that P_DISCARD_ACK ourselves,
1825 * but I don't like the receiver using the msock */
1829 finish_wait(&mdev->misc_wait, &wait);
1833 if (signal_pending(current)) {
1834 hlist_del_init(&e->collision);
1836 spin_unlock_irq(&mdev->req_lock);
1838 finish_wait(&mdev->misc_wait, &wait);
1839 goto out_interrupted;
1842 spin_unlock_irq(&mdev->req_lock);
1845 dev_alert(DEV, "Concurrent write! [W AFTERWARDS] "
1846 "sec=%llus\n", (unsigned long long)sector);
1847 } else if (discard) {
1848 /* we had none on the first iteration.
1849 * there must be none now. */
1850 D_ASSERT(have_unacked == 0);
1853 spin_lock_irq(&mdev->req_lock);
1855 finish_wait(&mdev->misc_wait, &wait);
1858 list_add(&e->w.list, &mdev->active_ee);
1859 spin_unlock_irq(&mdev->req_lock);
1861 if (mdev->state.conn == C_SYNC_TARGET)
1862 wait_event(mdev->ee_wait, !overlapping_resync_write(mdev, e));
1864 switch (mdev->net_conf->wire_protocol) {
1867 /* corresponding dec_unacked() in e_end_block()
1868 * respective _drbd_clear_done_ee */
1871 /* I really don't like it that the receiver thread
1872 * sends on the msock, but anyways */
1873 drbd_send_ack(mdev, P_RECV_ACK, e);
1880 if (mdev->state.pdsk < D_INCONSISTENT) {
1881 /* In case we have the only disk of the cluster, */
1882 drbd_set_out_of_sync(mdev, e->sector, e->size);
1883 e->flags |= EE_CALL_AL_COMPLETE_IO;
1884 e->flags &= ~EE_MAY_SET_IN_SYNC;
1885 drbd_al_begin_io(mdev, e->sector);
1888 if (drbd_submit_ee(mdev, e, rw, DRBD_FAULT_DT_WR) == 0)
1891 /* don't care for the reason here */
1892 dev_err(DEV, "submit failed, triggering re-connect\n");
1893 spin_lock_irq(&mdev->req_lock);
1894 list_del(&e->w.list);
1895 hlist_del_init(&e->collision);
1896 spin_unlock_irq(&mdev->req_lock);
1897 if (e->flags & EE_CALL_AL_COMPLETE_IO)
1898 drbd_al_complete_io(mdev, e->sector);
1901 drbd_may_finish_epoch(mdev, e->epoch, EV_PUT + EV_CLEANUP);
1903 drbd_free_ee(mdev, e);
1907 /* We may throttle resync, if the lower device seems to be busy,
1908 * and current sync rate is above c_min_rate.
1910 * To decide whether or not the lower device is busy, we use a scheme similar
1911 * to MD RAID is_mddev_idle(): if the partition stats reveal "significant"
1912 * (more than 64 sectors) of activity we cannot account for with our own resync
1913 * activity, it obviously is "busy".
1915 * The current sync rate used here uses only the most recent two step marks,
1916 * to have a short time average so we can react faster.
1918 int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector)
1920 struct gendisk *disk = mdev->ldev->backing_bdev->bd_contains->bd_disk;
1921 unsigned long db, dt, dbdt;
1922 struct lc_element *tmp;
1926 /* feature disabled? */
1927 if (mdev->sync_conf.c_min_rate == 0)
1930 spin_lock_irq(&mdev->al_lock);
1931 tmp = lc_find(mdev->resync, BM_SECT_TO_EXT(sector));
1933 struct bm_extent *bm_ext = lc_entry(tmp, struct bm_extent, lce);
1934 if (test_bit(BME_PRIORITY, &bm_ext->flags)) {
1935 spin_unlock_irq(&mdev->al_lock);
1938 /* Do not slow down if app IO is already waiting for this extent */
1940 spin_unlock_irq(&mdev->al_lock);
1942 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
1943 (int)part_stat_read(&disk->part0, sectors[1]) -
1944 atomic_read(&mdev->rs_sect_ev);
1946 if (!mdev->rs_last_events || curr_events - mdev->rs_last_events > 64) {
1947 unsigned long rs_left;
1950 mdev->rs_last_events = curr_events;
1952 /* sync speed average over the last 2*DRBD_SYNC_MARK_STEP,
1954 i = (mdev->rs_last_mark + DRBD_SYNC_MARKS-1) % DRBD_SYNC_MARKS;
1956 if (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T)
1957 rs_left = mdev->ov_left;
1959 rs_left = drbd_bm_total_weight(mdev) - mdev->rs_failed;
1961 dt = ((long)jiffies - (long)mdev->rs_mark_time[i]) / HZ;
1964 db = mdev->rs_mark_left[i] - rs_left;
1965 dbdt = Bit2KB(db/dt);
1967 if (dbdt > mdev->sync_conf.c_min_rate)
1974 static int receive_DataRequest(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int digest_size)
1977 const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
1978 struct drbd_epoch_entry *e;
1979 struct digest_info *di = NULL;
1981 unsigned int fault_type;
1982 struct p_block_req *p = &mdev->data.rbuf.block_req;
1984 sector = be64_to_cpu(p->sector);
1985 size = be32_to_cpu(p->blksize);
1987 if (size <= 0 || (size & 0x1ff) != 0 || size > DRBD_MAX_BIO_SIZE) {
1988 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1989 (unsigned long long)sector, size);
1992 if (sector + (size>>9) > capacity) {
1993 dev_err(DEV, "%s:%d: sector: %llus, size: %u\n", __FILE__, __LINE__,
1994 (unsigned long long)sector, size);
1998 if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) {
2001 case P_DATA_REQUEST:
2002 drbd_send_ack_rp(mdev, P_NEG_DREPLY, p);
2004 case P_RS_DATA_REQUEST:
2005 case P_CSUM_RS_REQUEST:
2007 drbd_send_ack_rp(mdev, P_NEG_RS_DREPLY , p);
2011 dec_rs_pending(mdev);
2012 drbd_send_ack_ex(mdev, P_OV_RESULT, sector, size, ID_IN_SYNC);
2015 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2018 if (verb && __ratelimit(&drbd_ratelimit_state))
2019 dev_err(DEV, "Can not satisfy peer's read request, "
2020 "no local data.\n");
2022 /* drain possibly payload */
2023 return drbd_drain_block(mdev, digest_size);
2026 /* GFP_NOIO, because we must not cause arbitrary write-out: in a DRBD
2027 * "criss-cross" setup, that might cause write-out on some other DRBD,
2028 * which in turn might block on the other node at this very place. */
2029 e = drbd_alloc_ee(mdev, p->block_id, sector, size, GFP_NOIO);
2036 case P_DATA_REQUEST:
2037 e->w.cb = w_e_end_data_req;
2038 fault_type = DRBD_FAULT_DT_RD;
2039 /* application IO, don't drbd_rs_begin_io */
2042 case P_RS_DATA_REQUEST:
2043 e->w.cb = w_e_end_rsdata_req;
2044 fault_type = DRBD_FAULT_RS_RD;
2045 /* used in the sector offset progress display */
2046 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2050 case P_CSUM_RS_REQUEST:
2051 fault_type = DRBD_FAULT_RS_RD;
2052 di = kmalloc(sizeof(*di) + digest_size, GFP_NOIO);
2056 di->digest_size = digest_size;
2057 di->digest = (((char *)di)+sizeof(struct digest_info));
2060 e->flags |= EE_HAS_DIGEST;
2062 if (drbd_recv(mdev, di->digest, digest_size) != digest_size)
2065 if (cmd == P_CSUM_RS_REQUEST) {
2066 D_ASSERT(mdev->agreed_pro_version >= 89);
2067 e->w.cb = w_e_end_csum_rs_req;
2068 /* used in the sector offset progress display */
2069 mdev->bm_resync_fo = BM_SECT_TO_BIT(sector);
2070 } else if (cmd == P_OV_REPLY) {
2071 /* track progress, we may need to throttle */
2072 atomic_add(size >> 9, &mdev->rs_sect_in);
2073 e->w.cb = w_e_end_ov_reply;
2074 dec_rs_pending(mdev);
2075 /* drbd_rs_begin_io done when we sent this request,
2076 * but accounting still needs to be done. */
2077 goto submit_for_resync;
2082 if (mdev->ov_start_sector == ~(sector_t)0 &&
2083 mdev->agreed_pro_version >= 90) {
2084 unsigned long now = jiffies;
2086 mdev->ov_start_sector = sector;
2087 mdev->ov_position = sector;
2088 mdev->ov_left = drbd_bm_bits(mdev) - BM_SECT_TO_BIT(sector);
2089 mdev->rs_total = mdev->ov_left;
2090 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2091 mdev->rs_mark_left[i] = mdev->ov_left;
2092 mdev->rs_mark_time[i] = now;
2094 dev_info(DEV, "Online Verify start sector: %llu\n",
2095 (unsigned long long)sector);
2097 e->w.cb = w_e_end_ov_req;
2098 fault_type = DRBD_FAULT_RS_RD;
2102 dev_err(DEV, "unexpected command (%s) in receive_DataRequest\n",
2104 fault_type = DRBD_FAULT_MAX;
2108 /* Throttle, drbd_rs_begin_io and submit should become asynchronous
2109 * wrt the receiver, but it is not as straightforward as it may seem.
2110 * Various places in the resync start and stop logic assume resync
2111 * requests are processed in order, requeuing this on the worker thread
2112 * introduces a bunch of new code for synchronization between threads.
2114 * Unlimited throttling before drbd_rs_begin_io may stall the resync
2115 * "forever", throttling after drbd_rs_begin_io will lock that extent
2116 * for application writes for the same time. For now, just throttle
2117 * here, where the rest of the code expects the receiver to sleep for
2121 /* Throttle before drbd_rs_begin_io, as that locks out application IO;
2122 * this defers syncer requests for some time, before letting at least
2123 * on request through. The resync controller on the receiving side
2124 * will adapt to the incoming rate accordingly.
2126 * We cannot throttle here if remote is Primary/SyncTarget:
2127 * we would also throttle its application reads.
2128 * In that case, throttling is done on the SyncTarget only.
2130 if (mdev->state.peer != R_PRIMARY && drbd_rs_should_slow_down(mdev, sector))
2131 schedule_timeout_uninterruptible(HZ/10);
2132 if (drbd_rs_begin_io(mdev, sector))
2136 atomic_add(size >> 9, &mdev->rs_sect_ev);
2140 spin_lock_irq(&mdev->req_lock);
2141 list_add_tail(&e->w.list, &mdev->read_ee);
2142 spin_unlock_irq(&mdev->req_lock);
2144 if (drbd_submit_ee(mdev, e, READ, fault_type) == 0)
2147 /* don't care for the reason here */
2148 dev_err(DEV, "submit failed, triggering re-connect\n");
2149 spin_lock_irq(&mdev->req_lock);
2150 list_del(&e->w.list);
2151 spin_unlock_irq(&mdev->req_lock);
2152 /* no drbd_rs_complete_io(), we are dropping the connection anyways */
2156 drbd_free_ee(mdev, e);
2160 static int drbd_asb_recover_0p(struct drbd_conf *mdev) __must_hold(local)
2162 int self, peer, rv = -100;
2163 unsigned long ch_self, ch_peer;
2165 self = mdev->ldev->md.uuid[UI_BITMAP] & 1;
2166 peer = mdev->p_uuid[UI_BITMAP] & 1;
2168 ch_peer = mdev->p_uuid[UI_SIZE];
2169 ch_self = mdev->comm_bm_set;
2171 switch (mdev->net_conf->after_sb_0p) {
2173 case ASB_DISCARD_SECONDARY:
2174 case ASB_CALL_HELPER:
2175 dev_err(DEV, "Configuration error.\n");
2177 case ASB_DISCONNECT:
2179 case ASB_DISCARD_YOUNGER_PRI:
2180 if (self == 0 && peer == 1) {
2184 if (self == 1 && peer == 0) {
2188 /* Else fall through to one of the other strategies... */
2189 case ASB_DISCARD_OLDER_PRI:
2190 if (self == 0 && peer == 1) {
2194 if (self == 1 && peer == 0) {
2198 /* Else fall through to one of the other strategies... */
2199 dev_warn(DEV, "Discard younger/older primary did not find a decision\n"
2200 "Using discard-least-changes instead\n");
2201 case ASB_DISCARD_ZERO_CHG:
2202 if (ch_peer == 0 && ch_self == 0) {
2203 rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
2207 if (ch_peer == 0) { rv = 1; break; }
2208 if (ch_self == 0) { rv = -1; break; }
2210 if (mdev->net_conf->after_sb_0p == ASB_DISCARD_ZERO_CHG)
2212 case ASB_DISCARD_LEAST_CHG:
2213 if (ch_self < ch_peer)
2215 else if (ch_self > ch_peer)
2217 else /* ( ch_self == ch_peer ) */
2218 /* Well, then use something else. */
2219 rv = drbd_test_flag(mdev, DISCARD_CONCURRENT)
2222 case ASB_DISCARD_LOCAL:
2225 case ASB_DISCARD_REMOTE:
2232 static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local)
2236 switch (mdev->net_conf->after_sb_1p) {
2237 case ASB_DISCARD_YOUNGER_PRI:
2238 case ASB_DISCARD_OLDER_PRI:
2239 case ASB_DISCARD_LEAST_CHG:
2240 case ASB_DISCARD_LOCAL:
2241 case ASB_DISCARD_REMOTE:
2242 dev_err(DEV, "Configuration error.\n");
2244 case ASB_DISCONNECT:
2247 hg = drbd_asb_recover_0p(mdev);
2248 if (hg == -1 && mdev->state.role == R_SECONDARY)
2250 if (hg == 1 && mdev->state.role == R_PRIMARY)
2254 rv = drbd_asb_recover_0p(mdev);
2256 case ASB_DISCARD_SECONDARY:
2257 return mdev->state.role == R_PRIMARY ? 1 : -1;
2258 case ASB_CALL_HELPER:
2259 hg = drbd_asb_recover_0p(mdev);
2260 if (hg == -1 && mdev->state.role == R_PRIMARY) {
2261 enum drbd_state_rv rv2;
2263 drbd_set_role(mdev, R_SECONDARY, 0);
2264 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2265 * we might be here in C_WF_REPORT_PARAMS which is transient.
2266 * we do not need to wait for the after state change work either. */
2267 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2268 if (rv2 != SS_SUCCESS) {
2269 drbd_khelper(mdev, "pri-lost-after-sb");
2271 dev_warn(DEV, "Successfully gave up primary role.\n");
2281 static int drbd_asb_recover_2p(struct drbd_conf *mdev) __must_hold(local)
2285 switch (mdev->net_conf->after_sb_2p) {
2286 case ASB_DISCARD_YOUNGER_PRI:
2287 case ASB_DISCARD_OLDER_PRI:
2288 case ASB_DISCARD_LEAST_CHG:
2289 case ASB_DISCARD_LOCAL:
2290 case ASB_DISCARD_REMOTE:
2292 case ASB_DISCARD_SECONDARY:
2293 dev_err(DEV, "Configuration error.\n");
2296 rv = drbd_asb_recover_0p(mdev);
2298 case ASB_DISCONNECT:
2300 case ASB_CALL_HELPER:
2301 hg = drbd_asb_recover_0p(mdev);
2303 enum drbd_state_rv rv2;
2305 /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE,
2306 * we might be here in C_WF_REPORT_PARAMS which is transient.
2307 * we do not need to wait for the after state change work either. */
2308 rv2 = drbd_change_state(mdev, CS_VERBOSE, NS(role, R_SECONDARY));
2309 if (rv2 != SS_SUCCESS) {
2310 drbd_khelper(mdev, "pri-lost-after-sb");
2312 dev_warn(DEV, "Successfully gave up primary role.\n");
2322 static void drbd_uuid_dump(struct drbd_conf *mdev, char *text, u64 *uuid,
2323 u64 bits, u64 flags)
2326 dev_info(DEV, "%s uuid info vanished while I was looking!\n", text);
2329 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX bits:%llu flags:%llX\n",
2331 (unsigned long long)uuid[UI_CURRENT],
2332 (unsigned long long)uuid[UI_BITMAP],
2333 (unsigned long long)uuid[UI_HISTORY_START],
2334 (unsigned long long)uuid[UI_HISTORY_END],
2335 (unsigned long long)bits,
2336 (unsigned long long)flags);
2340 100 after split brain try auto recover
2341 2 C_SYNC_SOURCE set BitMap
2342 1 C_SYNC_SOURCE use BitMap
2344 -1 C_SYNC_TARGET use BitMap
2345 -2 C_SYNC_TARGET set BitMap
2346 -100 after split brain, disconnect
2347 -1000 unrelated data
2348 -1091 requires proto 91
2349 -1096 requires proto 96
2351 static int drbd_uuid_compare(struct drbd_conf *mdev, int *rule_nr) __must_hold(local)
2356 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2357 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2360 if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
2364 if ((self == UUID_JUST_CREATED || self == (u64)0) &&
2365 peer != UUID_JUST_CREATED)
2369 if (self != UUID_JUST_CREATED &&
2370 (peer == UUID_JUST_CREATED || peer == (u64)0))
2374 int rct, dc; /* roles at crash time */
2376 if (mdev->p_uuid[UI_BITMAP] == (u64)0 && mdev->ldev->md.uuid[UI_BITMAP] != (u64)0) {
2378 if (mdev->agreed_pro_version < 91)
2381 if ((mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) &&
2382 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1))) {
2383 dev_info(DEV, "was SyncSource, missed the resync finished event, corrected myself:\n");
2384 drbd_uuid_move_history(mdev);
2385 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
2386 mdev->ldev->md.uuid[UI_BITMAP] = 0;
2388 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2389 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2392 dev_info(DEV, "was SyncSource (peer failed to write sync_uuid)\n");
2399 if (mdev->ldev->md.uuid[UI_BITMAP] == (u64)0 && mdev->p_uuid[UI_BITMAP] != (u64)0) {
2401 if (mdev->agreed_pro_version < 91)
2404 if ((mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) == (mdev->p_uuid[UI_BITMAP] & ~((u64)1)) &&
2405 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) == (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1))) {
2406 dev_info(DEV, "was SyncTarget, peer missed the resync finished event, corrected peer:\n");
2408 mdev->p_uuid[UI_HISTORY_START + 1] = mdev->p_uuid[UI_HISTORY_START];
2409 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_BITMAP];
2410 mdev->p_uuid[UI_BITMAP] = 0UL;
2412 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2415 dev_info(DEV, "was SyncTarget (failed to write sync_uuid)\n");
2422 /* Common power [off|failure] */
2423 rct = (drbd_test_flag(mdev, CRASHED_PRIMARY) ? 1 : 0) +
2424 (mdev->p_uuid[UI_FLAGS] & 2);
2425 /* lowest bit is set when we were primary,
2426 * next bit (weight 2) is set when peer was primary */
2430 case 0: /* !self_pri && !peer_pri */ return 0;
2431 case 1: /* self_pri && !peer_pri */ return 1;
2432 case 2: /* !self_pri && peer_pri */ return -1;
2433 case 3: /* self_pri && peer_pri */
2434 dc = drbd_test_flag(mdev, DISCARD_CONCURRENT);
2440 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2445 peer = mdev->p_uuid[UI_HISTORY_START] & ~((u64)1);
2447 if (mdev->agreed_pro_version < 96 ?
2448 (mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1)) ==
2449 (mdev->p_uuid[UI_HISTORY_START + 1] & ~((u64)1)) :
2450 peer + UUID_NEW_BM_OFFSET == (mdev->p_uuid[UI_BITMAP] & ~((u64)1))) {
2451 /* The last P_SYNC_UUID did not get though. Undo the last start of
2452 resync as sync source modifications of the peer's UUIDs. */
2454 if (mdev->agreed_pro_version < 91)
2457 mdev->p_uuid[UI_BITMAP] = mdev->p_uuid[UI_HISTORY_START];
2458 mdev->p_uuid[UI_HISTORY_START] = mdev->p_uuid[UI_HISTORY_START + 1];
2460 dev_info(DEV, "Lost last syncUUID packet, corrected:\n");
2461 drbd_uuid_dump(mdev, "peer", mdev->p_uuid, mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2468 self = mdev->ldev->md.uuid[UI_CURRENT] & ~((u64)1);
2469 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2470 peer = mdev->p_uuid[i] & ~((u64)1);
2476 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2477 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2482 self = mdev->ldev->md.uuid[UI_HISTORY_START] & ~((u64)1);
2484 if (mdev->agreed_pro_version < 96 ?
2485 (mdev->ldev->md.uuid[UI_HISTORY_START + 1] & ~((u64)1)) ==
2486 (mdev->p_uuid[UI_HISTORY_START] & ~((u64)1)) :
2487 self + UUID_NEW_BM_OFFSET == (mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1))) {
2488 /* The last P_SYNC_UUID did not get though. Undo the last start of
2489 resync as sync source modifications of our UUIDs. */
2491 if (mdev->agreed_pro_version < 91)
2494 __drbd_uuid_set(mdev, UI_BITMAP, mdev->ldev->md.uuid[UI_HISTORY_START]);
2495 __drbd_uuid_set(mdev, UI_HISTORY_START, mdev->ldev->md.uuid[UI_HISTORY_START + 1]);
2497 dev_info(DEV, "Last syncUUID did not get through, corrected:\n");
2498 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid,
2499 mdev->state.disk >= D_NEGOTIATING ? drbd_bm_total_weight(mdev) : 0, 0);
2507 peer = mdev->p_uuid[UI_CURRENT] & ~((u64)1);
2508 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2509 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2515 self = mdev->ldev->md.uuid[UI_BITMAP] & ~((u64)1);
2516 peer = mdev->p_uuid[UI_BITMAP] & ~((u64)1);
2517 if (self == peer && self != ((u64)0))
2521 for (i = UI_HISTORY_START; i <= UI_HISTORY_END; i++) {
2522 self = mdev->ldev->md.uuid[i] & ~((u64)1);
2523 for (j = UI_HISTORY_START; j <= UI_HISTORY_END; j++) {
2524 peer = mdev->p_uuid[j] & ~((u64)1);
2533 /* drbd_sync_handshake() returns the new conn state on success, or
2534 CONN_MASK (-1) on failure.
2536 static enum drbd_conns drbd_sync_handshake(struct drbd_conf *mdev, enum drbd_role peer_role,
2537 enum drbd_disk_state peer_disk) __must_hold(local)
2540 enum drbd_conns rv = C_MASK;
2541 enum drbd_disk_state mydisk;
2543 mydisk = mdev->state.disk;
2544 if (mydisk == D_NEGOTIATING)
2545 mydisk = mdev->new_state_tmp.disk;
2547 dev_info(DEV, "drbd_sync_handshake:\n");
2549 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2550 drbd_uuid_dump(mdev, "self", mdev->ldev->md.uuid, mdev->comm_bm_set, 0);
2551 drbd_uuid_dump(mdev, "peer", mdev->p_uuid,
2552 mdev->p_uuid[UI_SIZE], mdev->p_uuid[UI_FLAGS]);
2554 hg = drbd_uuid_compare(mdev, &rule_nr);
2555 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2557 dev_info(DEV, "uuid_compare()=%d by rule %d\n", hg, rule_nr);
2560 dev_alert(DEV, "Unrelated data, aborting!\n");
2564 dev_alert(DEV, "To resolve this both sides have to support at least protocol %d\n", -hg - 1000);
2568 if ((mydisk == D_INCONSISTENT && peer_disk > D_INCONSISTENT) ||
2569 (peer_disk == D_INCONSISTENT && mydisk > D_INCONSISTENT)) {
2570 int f = (hg == -100) || abs(hg) == 2;
2571 hg = mydisk > D_INCONSISTENT ? 1 : -1;
2574 dev_info(DEV, "Becoming sync %s due to disk states.\n",
2575 hg > 0 ? "source" : "target");
2579 drbd_khelper(mdev, "initial-split-brain");
2581 if (hg == 100 || (hg == -100 && mdev->net_conf->always_asbp)) {
2582 int pcount = (mdev->state.role == R_PRIMARY)
2583 + (peer_role == R_PRIMARY);
2584 int forced = (hg == -100);
2588 hg = drbd_asb_recover_0p(mdev);
2591 hg = drbd_asb_recover_1p(mdev);
2594 hg = drbd_asb_recover_2p(mdev);
2597 if (abs(hg) < 100) {
2598 dev_warn(DEV, "Split-Brain detected, %d primaries, "
2599 "automatically solved. Sync from %s node\n",
2600 pcount, (hg < 0) ? "peer" : "this");
2602 dev_warn(DEV, "Doing a full sync, since"
2603 " UUIDs where ambiguous.\n");
2610 if (mdev->net_conf->want_lose && !(mdev->p_uuid[UI_FLAGS]&1))
2612 if (!mdev->net_conf->want_lose && (mdev->p_uuid[UI_FLAGS]&1))
2616 dev_warn(DEV, "Split-Brain detected, manually solved. "
2617 "Sync from %s node\n",
2618 (hg < 0) ? "peer" : "this");
2622 /* FIXME this log message is not correct if we end up here
2623 * after an attempted attach on a diskless node.
2624 * We just refuse to attach -- well, we drop the "connection"
2625 * to that disk, in a way... */
2626 dev_alert(DEV, "Split-Brain detected but unresolved, dropping connection!\n");
2627 drbd_khelper(mdev, "split-brain");
2631 if (hg > 0 && mydisk <= D_INCONSISTENT) {
2632 dev_err(DEV, "I shall become SyncSource, but I am inconsistent!\n");
2636 if (hg < 0 && /* by intention we do not use mydisk here. */
2637 mdev->state.role == R_PRIMARY && mdev->state.disk >= D_CONSISTENT) {
2638 switch (mdev->net_conf->rr_conflict) {
2639 case ASB_CALL_HELPER:
2640 drbd_khelper(mdev, "pri-lost");
2642 case ASB_DISCONNECT:
2643 dev_err(DEV, "I shall become SyncTarget, but I am primary!\n");
2646 dev_warn(DEV, "Becoming SyncTarget, violating the stable-data"
2651 if (mdev->net_conf->dry_run || drbd_test_flag(mdev, CONN_DRY_RUN)) {
2653 dev_info(DEV, "dry-run connect: No resync, would become Connected immediately.\n");
2655 dev_info(DEV, "dry-run connect: Would become %s, doing a %s resync.",
2656 drbd_conn_str(hg > 0 ? C_SYNC_SOURCE : C_SYNC_TARGET),
2657 abs(hg) >= 2 ? "full" : "bit-map based");
2662 dev_info(DEV, "Writing the whole bitmap, full sync required after drbd_sync_handshake.\n");
2663 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write, "set_n_write from sync_handshake",
2664 BM_LOCKED_SET_ALLOWED))
2668 if (hg > 0) { /* become sync source. */
2670 } else if (hg < 0) { /* become sync target */
2674 if (drbd_bm_total_weight(mdev)) {
2675 dev_info(DEV, "No resync, but %lu bits in bitmap!\n",
2676 drbd_bm_total_weight(mdev));
2683 /* returns 1 if invalid */
2684 static int cmp_after_sb(enum drbd_after_sb_p peer, enum drbd_after_sb_p self)
2686 /* ASB_DISCARD_REMOTE - ASB_DISCARD_LOCAL is valid */
2687 if ((peer == ASB_DISCARD_REMOTE && self == ASB_DISCARD_LOCAL) ||
2688 (self == ASB_DISCARD_REMOTE && peer == ASB_DISCARD_LOCAL))
2691 /* any other things with ASB_DISCARD_REMOTE or ASB_DISCARD_LOCAL are invalid */
2692 if (peer == ASB_DISCARD_REMOTE || peer == ASB_DISCARD_LOCAL ||
2693 self == ASB_DISCARD_REMOTE || self == ASB_DISCARD_LOCAL)
2696 /* everything else is valid if they are equal on both sides. */
2700 /* everything es is invalid. */
2704 static int receive_protocol(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2706 struct p_protocol *p = &mdev->data.rbuf.protocol;
2707 int p_proto, p_after_sb_0p, p_after_sb_1p, p_after_sb_2p;
2708 int p_want_lose, p_two_primaries, cf;
2709 char p_integrity_alg[SHARED_SECRET_MAX] = "";
2711 p_proto = be32_to_cpu(p->protocol);
2712 p_after_sb_0p = be32_to_cpu(p->after_sb_0p);
2713 p_after_sb_1p = be32_to_cpu(p->after_sb_1p);
2714 p_after_sb_2p = be32_to_cpu(p->after_sb_2p);
2715 p_two_primaries = be32_to_cpu(p->two_primaries);
2716 cf = be32_to_cpu(p->conn_flags);
2717 p_want_lose = cf & CF_WANT_LOSE;
2719 drbd_clear_flag(mdev, CONN_DRY_RUN);
2721 if (cf & CF_DRY_RUN)
2722 drbd_set_flag(mdev, CONN_DRY_RUN);
2724 if (p_proto != mdev->net_conf->wire_protocol) {
2725 dev_err(DEV, "incompatible communication protocols\n");
2729 if (cmp_after_sb(p_after_sb_0p, mdev->net_conf->after_sb_0p)) {
2730 dev_err(DEV, "incompatible after-sb-0pri settings\n");
2734 if (cmp_after_sb(p_after_sb_1p, mdev->net_conf->after_sb_1p)) {
2735 dev_err(DEV, "incompatible after-sb-1pri settings\n");
2739 if (cmp_after_sb(p_after_sb_2p, mdev->net_conf->after_sb_2p)) {
2740 dev_err(DEV, "incompatible after-sb-2pri settings\n");
2744 if (p_want_lose && mdev->net_conf->want_lose) {
2745 dev_err(DEV, "both sides have the 'want_lose' flag set\n");
2749 if (p_two_primaries != mdev->net_conf->two_primaries) {
2750 dev_err(DEV, "incompatible setting of the two-primaries options\n");
2754 if (mdev->agreed_pro_version >= 87) {
2755 unsigned char *my_alg = mdev->net_conf->integrity_alg;
2757 if (drbd_recv(mdev, p_integrity_alg, data_size) != data_size)
2760 p_integrity_alg[SHARED_SECRET_MAX-1] = 0;
2761 if (strcmp(p_integrity_alg, my_alg)) {
2762 dev_err(DEV, "incompatible setting of the data-integrity-alg\n");
2765 dev_info(DEV, "data-integrity-alg: %s\n",
2766 my_alg[0] ? my_alg : (unsigned char *)"<not-used>");
2772 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2777 * input: alg name, feature name
2778 * return: NULL (alg name was "")
2779 * ERR_PTR(error) if something goes wrong
2780 * or the crypto hash ptr, if it worked out ok. */
2781 struct crypto_hash *drbd_crypto_alloc_digest_safe(const struct drbd_conf *mdev,
2782 const char *alg, const char *name)
2784 struct crypto_hash *tfm;
2789 tfm = crypto_alloc_hash(alg, 0, CRYPTO_ALG_ASYNC);
2791 dev_err(DEV, "Can not allocate \"%s\" as %s (reason: %ld)\n",
2792 alg, name, PTR_ERR(tfm));
2795 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
2796 crypto_free_hash(tfm);
2797 dev_err(DEV, "\"%s\" is not a digest (%s)\n", alg, name);
2798 return ERR_PTR(-EINVAL);
2803 static int receive_SyncParam(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int packet_size)
2806 struct p_rs_param_95 *p = &mdev->data.rbuf.rs_param_95;
2807 unsigned int header_size, data_size, exp_max_sz;
2808 struct crypto_hash *verify_tfm = NULL;
2809 struct crypto_hash *csums_tfm = NULL;
2810 const int apv = mdev->agreed_pro_version;
2811 int *rs_plan_s = NULL;
2814 exp_max_sz = apv <= 87 ? sizeof(struct p_rs_param)
2815 : apv == 88 ? sizeof(struct p_rs_param)
2817 : apv <= 94 ? sizeof(struct p_rs_param_89)
2818 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
2820 if (packet_size > exp_max_sz) {
2821 dev_err(DEV, "SyncParam packet too long: received %u, expected <= %u bytes\n",
2822 packet_size, exp_max_sz);
2827 header_size = sizeof(struct p_rs_param) - sizeof(struct p_header80);
2828 data_size = packet_size - header_size;
2829 } else if (apv <= 94) {
2830 header_size = sizeof(struct p_rs_param_89) - sizeof(struct p_header80);
2831 data_size = packet_size - header_size;
2832 D_ASSERT(data_size == 0);
2834 header_size = sizeof(struct p_rs_param_95) - sizeof(struct p_header80);
2835 data_size = packet_size - header_size;
2836 D_ASSERT(data_size == 0);
2839 /* initialize verify_alg and csums_alg */
2840 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
2842 if (drbd_recv(mdev, &p->head.payload, header_size) != header_size)
2845 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2849 if (data_size > SHARED_SECRET_MAX || data_size == 0) {
2850 dev_err(DEV, "verify-alg of wrong size, "
2851 "peer wants %u, accepting only up to %u byte\n",
2852 data_size, SHARED_SECRET_MAX);
2856 if (drbd_recv(mdev, p->verify_alg, data_size) != data_size)
2859 /* we expect NUL terminated string */
2860 /* but just in case someone tries to be evil */
2861 D_ASSERT(p->verify_alg[data_size-1] == 0);
2862 p->verify_alg[data_size-1] = 0;
2864 } else /* apv >= 89 */ {
2865 /* we still expect NUL terminated strings */
2866 /* but just in case someone tries to be evil */
2867 D_ASSERT(p->verify_alg[SHARED_SECRET_MAX-1] == 0);
2868 D_ASSERT(p->csums_alg[SHARED_SECRET_MAX-1] == 0);
2869 p->verify_alg[SHARED_SECRET_MAX-1] = 0;
2870 p->csums_alg[SHARED_SECRET_MAX-1] = 0;
2873 if (strcmp(mdev->sync_conf.verify_alg, p->verify_alg)) {
2874 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2875 dev_err(DEV, "Different verify-alg settings. me=\"%s\" peer=\"%s\"\n",
2876 mdev->sync_conf.verify_alg, p->verify_alg);
2879 verify_tfm = drbd_crypto_alloc_digest_safe(mdev,
2880 p->verify_alg, "verify-alg");
2881 if (IS_ERR(verify_tfm)) {
2887 if (apv >= 89 && strcmp(mdev->sync_conf.csums_alg, p->csums_alg)) {
2888 if (mdev->state.conn == C_WF_REPORT_PARAMS) {
2889 dev_err(DEV, "Different csums-alg settings. me=\"%s\" peer=\"%s\"\n",
2890 mdev->sync_conf.csums_alg, p->csums_alg);
2893 csums_tfm = drbd_crypto_alloc_digest_safe(mdev,
2894 p->csums_alg, "csums-alg");
2895 if (IS_ERR(csums_tfm)) {
2902 mdev->sync_conf.rate = be32_to_cpu(p->rate);
2903 mdev->sync_conf.c_plan_ahead = be32_to_cpu(p->c_plan_ahead);
2904 mdev->sync_conf.c_delay_target = be32_to_cpu(p->c_delay_target);
2905 mdev->sync_conf.c_fill_target = be32_to_cpu(p->c_fill_target);
2906 mdev->sync_conf.c_max_rate = be32_to_cpu(p->c_max_rate);
2908 fifo_size = (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
2909 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
2910 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_NOIO);
2912 dev_err(DEV, "kmalloc of fifo_buffer failed");
2918 spin_lock(&mdev->peer_seq_lock);
2919 /* lock against drbd_nl_syncer_conf() */
2921 strcpy(mdev->sync_conf.verify_alg, p->verify_alg);
2922 mdev->sync_conf.verify_alg_len = strlen(p->verify_alg) + 1;
2923 crypto_free_hash(mdev->verify_tfm);
2924 mdev->verify_tfm = verify_tfm;
2925 dev_info(DEV, "using verify-alg: \"%s\"\n", p->verify_alg);
2928 strcpy(mdev->sync_conf.csums_alg, p->csums_alg);
2929 mdev->sync_conf.csums_alg_len = strlen(p->csums_alg) + 1;
2930 crypto_free_hash(mdev->csums_tfm);
2931 mdev->csums_tfm = csums_tfm;
2932 dev_info(DEV, "using csums-alg: \"%s\"\n", p->csums_alg);
2934 if (fifo_size != mdev->rs_plan_s.size) {
2935 kfree(mdev->rs_plan_s.values);
2936 mdev->rs_plan_s.values = rs_plan_s;
2937 mdev->rs_plan_s.size = fifo_size;
2938 mdev->rs_planed = 0;
2940 spin_unlock(&mdev->peer_seq_lock);
2945 /* just for completeness: actually not needed,
2946 * as this is not reached if csums_tfm was ok. */
2947 crypto_free_hash(csums_tfm);
2948 /* but free the verify_tfm again, if csums_tfm did not work out */
2949 crypto_free_hash(verify_tfm);
2950 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2954 /* warn if the arguments differ by more than 12.5% */
2955 static void warn_if_differ_considerably(struct drbd_conf *mdev,
2956 const char *s, sector_t a, sector_t b)
2959 if (a == 0 || b == 0)
2961 d = (a > b) ? (a - b) : (b - a);
2962 if (d > (a>>3) || d > (b>>3))
2963 dev_warn(DEV, "Considerable difference in %s: %llus vs. %llus\n", s,
2964 (unsigned long long)a, (unsigned long long)b);
2967 static int receive_sizes(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
2969 struct p_sizes *p = &mdev->data.rbuf.sizes;
2970 enum determine_dev_size dd = unchanged;
2971 sector_t p_size, p_usize, my_usize;
2972 int ldsc = 0; /* local disk size changed */
2973 enum dds_flags ddsf;
2975 p_size = be64_to_cpu(p->d_size);
2976 p_usize = be64_to_cpu(p->u_size);
2978 if (p_size == 0 && mdev->state.disk == D_DISKLESS) {
2979 dev_err(DEV, "some backing storage is needed\n");
2980 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
2984 /* just store the peer's disk size for now.
2985 * we still need to figure out whether we accept that. */
2986 mdev->p_size = p_size;
2988 if (get_ldev(mdev)) {
2989 warn_if_differ_considerably(mdev, "lower level device sizes",
2990 p_size, drbd_get_max_capacity(mdev->ldev));
2991 warn_if_differ_considerably(mdev, "user requested size",
2992 p_usize, mdev->ldev->dc.disk_size);
2994 /* if this is the first connect, or an otherwise expected
2995 * param exchange, choose the minimum */
2996 if (mdev->state.conn == C_WF_REPORT_PARAMS)
2997 p_usize = min_not_zero((sector_t)mdev->ldev->dc.disk_size,
3000 my_usize = mdev->ldev->dc.disk_size;
3002 if (mdev->ldev->dc.disk_size != p_usize) {
3003 mdev->ldev->dc.disk_size = p_usize;
3004 dev_info(DEV, "Peer sets u_size to %lu sectors\n",
3005 (unsigned long)mdev->ldev->dc.disk_size);
3008 /* Never shrink a device with usable data during connect.
3009 But allow online shrinking if we are connected. */
3010 if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
3011 drbd_get_capacity(mdev->this_bdev) &&
3012 mdev->state.disk >= D_OUTDATED &&
3013 mdev->state.conn < C_CONNECTED) {
3014 dev_err(DEV, "The peer's disk size is too small!\n");
3015 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3016 mdev->ldev->dc.disk_size = my_usize;
3023 ddsf = be16_to_cpu(p->dds_flags);
3024 if (get_ldev(mdev)) {
3025 dd = drbd_determine_dev_size(mdev, ddsf);
3027 if (dd == dev_size_error)
3031 /* I am diskless, need to accept the peer's size. */
3032 drbd_set_my_capacity(mdev, p_size);
3035 mdev->peer_max_bio_size = be32_to_cpu(p->max_bio_size);
3036 drbd_reconsider_max_bio_size(mdev);
3038 if (get_ldev(mdev)) {
3039 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev)) {
3040 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
3047 if (mdev->state.conn > C_WF_REPORT_PARAMS) {
3048 if (be64_to_cpu(p->c_size) !=
3049 drbd_get_capacity(mdev->this_bdev) || ldsc) {
3050 /* we have different sizes, probably peer
3051 * needs to know my new size... */
3052 drbd_send_sizes(mdev, 0, ddsf);
3054 if (drbd_test_and_clear_flag(mdev, RESIZE_PENDING) ||
3055 (dd == grew && mdev->state.conn == C_CONNECTED)) {
3056 if (mdev->state.pdsk >= D_INCONSISTENT &&
3057 mdev->state.disk >= D_INCONSISTENT) {
3058 if (ddsf & DDSF_NO_RESYNC)
3059 dev_info(DEV, "Resync of new storage suppressed with --assume-clean\n");
3061 resync_after_online_grow(mdev);
3063 drbd_set_flag(mdev, RESYNC_AFTER_NEG);
3070 static int receive_uuids(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3072 struct p_uuids *p = &mdev->data.rbuf.uuids;
3074 int i, updated_uuids = 0;
3076 p_uuid = kmalloc(sizeof(u64)*UI_EXTENDED_SIZE, GFP_NOIO);
3078 for (i = UI_CURRENT; i < UI_EXTENDED_SIZE; i++)
3079 p_uuid[i] = be64_to_cpu(p->uuid[i]);
3081 kfree(mdev->p_uuid);
3082 mdev->p_uuid = p_uuid;
3084 if (mdev->state.conn < C_CONNECTED &&
3085 mdev->state.disk < D_INCONSISTENT &&
3086 mdev->state.role == R_PRIMARY &&
3087 (mdev->ed_uuid & ~((u64)1)) != (p_uuid[UI_CURRENT] & ~((u64)1))) {
3088 dev_err(DEV, "Can only connect to data with current UUID=%016llX\n",
3089 (unsigned long long)mdev->ed_uuid);
3090 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3094 if (get_ldev(mdev)) {
3095 int skip_initial_sync =
3096 mdev->state.conn == C_CONNECTED &&
3097 mdev->agreed_pro_version >= 90 &&
3098 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED &&
3099 (p_uuid[UI_FLAGS] & 8);
3100 if (skip_initial_sync) {
3101 dev_info(DEV, "Accepted new current UUID, preparing to skip initial sync\n");
3102 drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3103 "clear_n_write from receive_uuids",
3104 BM_LOCKED_TEST_ALLOWED);
3105 _drbd_uuid_set(mdev, UI_CURRENT, p_uuid[UI_CURRENT]);
3106 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3107 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3113 } else if (mdev->state.disk < D_INCONSISTENT &&
3114 mdev->state.role == R_PRIMARY) {
3115 /* I am a diskless primary, the peer just created a new current UUID
3117 updated_uuids = drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3120 /* Before we test for the disk state, we should wait until an eventually
3121 ongoing cluster wide state change is finished. That is important if
3122 we are primary and are detaching from our disk. We need to see the
3123 new disk state... */
3124 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, CLUSTER_ST_CHANGE));
3125 if (mdev->state.conn >= C_CONNECTED && mdev->state.disk < D_INCONSISTENT)
3126 updated_uuids |= drbd_set_ed_uuid(mdev, p_uuid[UI_CURRENT]);
3129 drbd_print_uuids(mdev, "receiver updated UUIDs to");
3135 * convert_state() - Converts the peer's view of the cluster state to our point of view
3136 * @ps: The state as seen by the peer.
3138 static union drbd_state convert_state(union drbd_state ps)
3140 union drbd_state ms;
3142 static enum drbd_conns c_tab[] = {
3143 [C_CONNECTED] = C_CONNECTED,
3145 [C_STARTING_SYNC_S] = C_STARTING_SYNC_T,
3146 [C_STARTING_SYNC_T] = C_STARTING_SYNC_S,
3147 [C_DISCONNECTING] = C_TEAR_DOWN, /* C_NETWORK_FAILURE, */
3148 [C_VERIFY_S] = C_VERIFY_T,
3154 ms.conn = c_tab[ps.conn];
3159 ms.peer_isp = (ps.aftr_isp | ps.user_isp);
3164 static int receive_req_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3166 struct p_req_state *p = &mdev->data.rbuf.req_state;
3167 union drbd_state mask, val;
3168 enum drbd_state_rv rv;
3170 mask.i = be32_to_cpu(p->mask);
3171 val.i = be32_to_cpu(p->val);
3173 if (drbd_test_flag(mdev, DISCARD_CONCURRENT) &&
3174 drbd_test_flag(mdev, CLUSTER_ST_CHANGE)) {
3175 drbd_send_sr_reply(mdev, SS_CONCURRENT_ST_CHG);
3179 mask = convert_state(mask);
3180 val = convert_state(val);
3182 rv = drbd_change_state(mdev, CS_VERBOSE, mask, val);
3184 drbd_send_sr_reply(mdev, rv);
3190 static int receive_state(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3192 struct p_state *p = &mdev->data.rbuf.state;
3193 union drbd_state os, ns, peer_state;
3194 enum drbd_disk_state real_peer_disk;
3195 enum chg_state_flags cs_flags;
3198 peer_state.i = be32_to_cpu(p->state);
3200 real_peer_disk = peer_state.disk;
3201 if (peer_state.disk == D_NEGOTIATING) {
3202 real_peer_disk = mdev->p_uuid[UI_FLAGS] & 4 ? D_INCONSISTENT : D_CONSISTENT;
3203 dev_info(DEV, "real peer disk state = %s\n", drbd_disk_str(real_peer_disk));
3206 spin_lock_irq(&mdev->req_lock);
3208 os = ns = mdev->state;
3209 spin_unlock_irq(&mdev->req_lock);
3211 /* If some other part of the code (asender thread, timeout)
3212 * already decided to close the connection again,
3213 * we must not "re-establish" it here. */
3214 if (os.conn <= C_TEAR_DOWN)
3217 /* If this is the "end of sync" confirmation, usually the peer disk
3218 * transitions from D_INCONSISTENT to D_UP_TO_DATE. For empty (0 bits
3219 * set) resync started in PausedSyncT, or if the timing of pause-/
3220 * unpause-sync events has been "just right", the peer disk may
3221 * transition from D_CONSISTENT to D_UP_TO_DATE as well.
3223 if ((os.pdsk == D_INCONSISTENT || os.pdsk == D_CONSISTENT) &&
3224 real_peer_disk == D_UP_TO_DATE &&
3225 os.conn > C_CONNECTED && os.disk == D_UP_TO_DATE) {
3226 /* If we are (becoming) SyncSource, but peer is still in sync
3227 * preparation, ignore its uptodate-ness to avoid flapping, it
3228 * will change to inconsistent once the peer reaches active
3230 * It may have changed syncer-paused flags, however, so we
3231 * cannot ignore this completely. */
3232 if (peer_state.conn > C_CONNECTED &&
3233 peer_state.conn < C_SYNC_SOURCE)
3234 real_peer_disk = D_INCONSISTENT;
3236 /* if peer_state changes to connected at the same time,
3237 * it explicitly notifies us that it finished resync.
3238 * Maybe we should finish it up, too? */
3239 else if (os.conn >= C_SYNC_SOURCE &&
3240 peer_state.conn == C_CONNECTED) {
3241 if (drbd_bm_total_weight(mdev) <= mdev->rs_failed)
3242 drbd_resync_finished(mdev);
3247 /* explicit verify finished notification, stop sector reached. */
3248 if (os.conn == C_VERIFY_T && os.disk == D_UP_TO_DATE &&
3249 peer_state.conn == C_CONNECTED && real_peer_disk == D_UP_TO_DATE) {
3251 drbd_resync_finished(mdev);
3255 /* peer says his disk is inconsistent, while we think it is uptodate,
3256 * and this happens while the peer still thinks we have a sync going on,
3257 * but we think we are already done with the sync.
3258 * We ignore this to avoid flapping pdsk.
3259 * This should not happen, if the peer is a recent version of drbd. */
3260 if (os.pdsk == D_UP_TO_DATE && real_peer_disk == D_INCONSISTENT &&
3261 os.conn == C_CONNECTED && peer_state.conn > C_SYNC_SOURCE)
3262 real_peer_disk = D_UP_TO_DATE;
3264 if (ns.conn == C_WF_REPORT_PARAMS)
3265 ns.conn = C_CONNECTED;
3267 if (peer_state.conn == C_AHEAD)
3270 if (mdev->p_uuid && peer_state.disk >= D_NEGOTIATING &&
3271 get_ldev_if_state(mdev, D_NEGOTIATING)) {
3272 int cr; /* consider resync */
3274 /* if we established a new connection */
3275 cr = (os.conn < C_CONNECTED);
3276 /* if we had an established connection
3277 * and one of the nodes newly attaches a disk */
3278 cr |= (os.conn == C_CONNECTED &&
3279 (peer_state.disk == D_NEGOTIATING ||
3280 os.disk == D_NEGOTIATING));
3281 /* if we have both been inconsistent, and the peer has been
3282 * forced to be UpToDate with --overwrite-data */
3283 cr |= drbd_test_flag(mdev, CONSIDER_RESYNC);
3284 /* if we had been plain connected, and the admin requested to
3285 * start a sync by "invalidate" or "invalidate-remote" */
3286 cr |= (os.conn == C_CONNECTED &&
3287 (peer_state.conn >= C_STARTING_SYNC_S &&
3288 peer_state.conn <= C_WF_BITMAP_T));
3291 ns.conn = drbd_sync_handshake(mdev, peer_state.role, real_peer_disk);
3294 if (ns.conn == C_MASK) {
3295 ns.conn = C_CONNECTED;
3296 if (mdev->state.disk == D_NEGOTIATING) {
3297 drbd_force_state(mdev, NS(disk, D_FAILED));
3298 } else if (peer_state.disk == D_NEGOTIATING) {
3299 dev_err(DEV, "Disk attach process on the peer node was aborted.\n");
3300 peer_state.disk = D_DISKLESS;
3301 real_peer_disk = D_DISKLESS;
3303 if (drbd_test_and_clear_flag(mdev, CONN_DRY_RUN))
3305 D_ASSERT(os.conn == C_WF_REPORT_PARAMS);
3306 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3312 spin_lock_irq(&mdev->req_lock);
3313 if (mdev->state.i != os.i)
3315 drbd_clear_flag(mdev, CONSIDER_RESYNC);
3316 ns.peer = peer_state.role;
3317 ns.pdsk = real_peer_disk;
3318 ns.peer_isp = (peer_state.aftr_isp | peer_state.user_isp);
3319 if ((ns.conn == C_CONNECTED || ns.conn == C_WF_BITMAP_S) && ns.disk == D_NEGOTIATING)
3320 ns.disk = mdev->new_state_tmp.disk;
3321 cs_flags = CS_VERBOSE + (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED ? 0 : CS_HARD);
3322 if (ns.pdsk == D_CONSISTENT && is_susp(ns) && ns.conn == C_CONNECTED && os.conn < C_CONNECTED &&
3323 drbd_test_flag(mdev, NEW_CUR_UUID)) {
3324 /* Do not allow tl_restart(resend) for a rebooted peer. We can only allow this
3325 for temporal network outages! */
3326 spin_unlock_irq(&mdev->req_lock);
3327 dev_err(DEV, "Aborting Connect, can not thaw IO with an only Consistent peer\n");
3329 drbd_uuid_new_current(mdev);
3330 drbd_clear_flag(mdev, NEW_CUR_UUID);
3331 drbd_force_state(mdev, NS2(conn, C_PROTOCOL_ERROR, susp, 0));
3334 rv = _drbd_set_state(mdev, ns, cs_flags, NULL);
3336 spin_unlock_irq(&mdev->req_lock);
3338 if (rv < SS_SUCCESS) {
3339 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
3343 if (os.conn > C_WF_REPORT_PARAMS) {
3344 if (ns.conn > C_CONNECTED && peer_state.conn <= C_CONNECTED &&
3345 peer_state.disk != D_NEGOTIATING ) {
3346 /* we want resync, peer has not yet decided to sync... */
3347 /* Nowadays only used when forcing a node into primary role and
3348 setting its disk to UpToDate with that */
3349 drbd_send_uuids(mdev);
3350 drbd_send_current_state(mdev);
3354 mdev->net_conf->want_lose = 0;
3356 drbd_md_sync(mdev); /* update connected indicator, la_size, ... */
3361 static int receive_sync_uuid(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3363 struct p_rs_uuid *p = &mdev->data.rbuf.rs_uuid;
3365 wait_event(mdev->misc_wait,
3366 mdev->state.conn == C_WF_SYNC_UUID ||
3367 mdev->state.conn == C_BEHIND ||
3368 mdev->state.conn < C_CONNECTED ||
3369 mdev->state.disk < D_NEGOTIATING);
3371 /* D_ASSERT( mdev->state.conn == C_WF_SYNC_UUID ); */
3373 /* Here the _drbd_uuid_ functions are right, current should
3374 _not_ be rotated into the history */
3375 if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
3376 _drbd_uuid_set(mdev, UI_CURRENT, be64_to_cpu(p->uuid));
3377 _drbd_uuid_set(mdev, UI_BITMAP, 0UL);
3379 drbd_print_uuids(mdev, "updated sync uuid");
3380 drbd_start_resync(mdev, C_SYNC_TARGET);
3384 dev_err(DEV, "Ignoring SyncUUID packet!\n");
3390 * receive_bitmap_plain
3392 * Return 0 when done, 1 when another iteration is needed, and a negative error
3393 * code upon failure.
3396 receive_bitmap_plain(struct drbd_conf *mdev, unsigned int data_size,
3397 unsigned long *buffer, struct bm_xfer_ctx *c)
3399 unsigned num_words = min_t(size_t, BM_PACKET_WORDS, c->bm_words - c->word_offset);
3400 unsigned want = num_words * sizeof(long);
3403 if (want != data_size) {
3404 dev_err(DEV, "%s:want (%u) != data_size (%u)\n", __func__, want, data_size);
3409 err = drbd_recv(mdev, buffer, want);
3416 drbd_bm_merge_lel(mdev, c->word_offset, num_words, buffer);
3418 c->word_offset += num_words;
3419 c->bit_offset = c->word_offset * BITS_PER_LONG;
3420 if (c->bit_offset > c->bm_bits)
3421 c->bit_offset = c->bm_bits;
3429 * Return 0 when done, 1 when another iteration is needed, and a negative error
3430 * code upon failure.
3433 recv_bm_rle_bits(struct drbd_conf *mdev,
3434 struct p_compressed_bm *p,
3435 struct bm_xfer_ctx *c)
3437 struct bitstream bs;
3441 unsigned long s = c->bit_offset;
3443 int len = be16_to_cpu(p->head.length) - (sizeof(*p) - sizeof(p->head));
3444 int toggle = DCBP_get_start(p);
3448 bitstream_init(&bs, p->code, len, DCBP_get_pad_bits(p));
3450 bits = bitstream_get_bits(&bs, &look_ahead, 64);
3454 for (have = bits; have > 0; s += rl, toggle = !toggle) {
3455 bits = vli_decode_bits(&rl, look_ahead);
3461 if (e >= c->bm_bits) {
3462 dev_err(DEV, "bitmap overflow (e:%lu) while decoding bm RLE packet\n", e);
3465 _drbd_bm_set_bits(mdev, s, e);
3469 dev_err(DEV, "bitmap decoding error: h:%d b:%d la:0x%08llx l:%u/%u\n",
3470 have, bits, look_ahead,
3471 (unsigned int)(bs.cur.b - p->code),
3472 (unsigned int)bs.buf_len);
3475 look_ahead >>= bits;
3478 bits = bitstream_get_bits(&bs, &tmp, 64 - have);
3481 look_ahead |= tmp << have;
3486 bm_xfer_ctx_bit_to_word_offset(c);
3488 return (s != c->bm_bits);
3494 * Return 0 when done, 1 when another iteration is needed, and a negative error
3495 * code upon failure.
3498 decode_bitmap_c(struct drbd_conf *mdev,
3499 struct p_compressed_bm *p,
3500 struct bm_xfer_ctx *c)
3502 if (DCBP_get_code(p) == RLE_VLI_Bits)
3503 return recv_bm_rle_bits(mdev, p, c);
3505 /* other variants had been implemented for evaluation,
3506 * but have been dropped as this one turned out to be "best"
3507 * during all our tests. */
3509 dev_err(DEV, "receive_bitmap_c: unknown encoding %u\n", p->encoding);
3510 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3514 void INFO_bm_xfer_stats(struct drbd_conf *mdev,
3515 const char *direction, struct bm_xfer_ctx *c)
3517 /* what would it take to transfer it "plaintext" */
3518 unsigned plain = sizeof(struct p_header80) *
3519 ((c->bm_words+BM_PACKET_WORDS-1)/BM_PACKET_WORDS+1)
3520 + c->bm_words * sizeof(long);
3521 unsigned total = c->bytes[0] + c->bytes[1];
3524 /* total can not be zero. but just in case: */
3528 /* don't report if not compressed */
3532 /* total < plain. check for overflow, still */
3533 r = (total > UINT_MAX/1000) ? (total / (plain/1000))
3534 : (1000 * total / plain);
3540 dev_info(DEV, "%s bitmap stats [Bytes(packets)]: plain %u(%u), RLE %u(%u), "
3541 "total %u; compression: %u.%u%%\n",
3543 c->bytes[1], c->packets[1],
3544 c->bytes[0], c->packets[0],
3545 total, r/10, r % 10);
3548 /* Since we are processing the bitfield from lower addresses to higher,
3549 it does not matter if the process it in 32 bit chunks or 64 bit
3550 chunks as long as it is little endian. (Understand it as byte stream,
3551 beginning with the lowest byte...) If we would use big endian
3552 we would need to process it from the highest address to the lowest,
3553 in order to be agnostic to the 32 vs 64 bits issue.
3555 returns 0 on failure, 1 if we successfully received it. */
3556 static int receive_bitmap(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3558 struct bm_xfer_ctx c;
3562 struct p_header80 *h = &mdev->data.rbuf.header.h80;
3564 drbd_bm_lock(mdev, "receive bitmap", BM_LOCKED_SET_ALLOWED);
3565 /* you are supposed to send additional out-of-sync information
3566 * if you actually set bits during this phase */
3568 /* maybe we should use some per thread scratch page,
3569 * and allocate that during initial device creation? */
3570 buffer = (unsigned long *) __get_free_page(GFP_NOIO);
3572 dev_err(DEV, "failed to allocate one page buffer in %s\n", __func__);
3576 c = (struct bm_xfer_ctx) {
3577 .bm_bits = drbd_bm_bits(mdev),
3578 .bm_words = drbd_bm_words(mdev),
3582 if (cmd == P_BITMAP) {
3583 err = receive_bitmap_plain(mdev, data_size, buffer, &c);
3584 } else if (cmd == P_COMPRESSED_BITMAP) {
3585 /* MAYBE: sanity check that we speak proto >= 90,
3586 * and the feature is enabled! */
3587 struct p_compressed_bm *p;
3589 if (data_size > BM_PACKET_PAYLOAD_BYTES) {
3590 dev_err(DEV, "ReportCBitmap packet too large\n");
3593 /* use the page buff */
3595 memcpy(p, h, sizeof(*h));
3596 if (drbd_recv(mdev, p->head.payload, data_size) != data_size)
3598 if (data_size <= (sizeof(*p) - sizeof(p->head))) {
3599 dev_err(DEV, "ReportCBitmap packet too small (l:%u)\n", data_size);
3602 err = decode_bitmap_c(mdev, p, &c);
3604 dev_warn(DEV, "receive_bitmap: cmd neither ReportBitMap nor ReportCBitMap (is 0x%x)", cmd);
3608 c.packets[cmd == P_BITMAP]++;
3609 c.bytes[cmd == P_BITMAP] += sizeof(struct p_header80) + data_size;
3616 if (!drbd_recv_header(mdev, &cmd, &data_size))
3620 INFO_bm_xfer_stats(mdev, "receive", &c);
3622 if (mdev->state.conn == C_WF_BITMAP_T) {
3623 enum drbd_state_rv rv;
3625 ok = !drbd_send_bitmap(mdev);
3628 /* Omit CS_ORDERED with this state transition to avoid deadlocks. */
3629 rv = _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE);
3630 D_ASSERT(rv == SS_SUCCESS);
3631 } else if (mdev->state.conn != C_WF_BITMAP_S) {
3632 /* admin may have requested C_DISCONNECTING,
3633 * other threads may have noticed network errors */
3634 dev_info(DEV, "unexpected cstate (%s) in receive_bitmap\n",
3635 drbd_conn_str(mdev->state.conn));
3640 drbd_bm_unlock(mdev);
3641 if (ok && mdev->state.conn == C_WF_BITMAP_S)
3642 drbd_start_resync(mdev, C_SYNC_SOURCE);
3643 free_page((unsigned long) buffer);
3647 static int receive_skip(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3649 /* TODO zero copy sink :) */
3650 static char sink[128];
3653 dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n",
3658 want = min_t(int, size, sizeof(sink));
3659 r = drbd_recv(mdev, sink, want);
3660 ERR_IF(r <= 0) break;
3666 static int receive_UnplugRemote(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3668 /* Make sure we've acked all the TCP data associated
3669 * with the data requests being unplugged */
3670 drbd_tcp_quickack(mdev->data.socket);
3675 static int receive_out_of_sync(struct drbd_conf *mdev, enum drbd_packets cmd, unsigned int data_size)
3677 struct p_block_desc *p = &mdev->data.rbuf.block_desc;
3679 switch (mdev->state.conn) {
3680 case C_WF_SYNC_UUID:
3685 dev_err(DEV, "ASSERT FAILED cstate = %s, expected: WFSyncUUID|WFBitMapT|Behind\n",
3686 drbd_conn_str(mdev->state.conn));
3689 drbd_set_out_of_sync(mdev, be64_to_cpu(p->sector), be32_to_cpu(p->blksize));
3694 typedef int (*drbd_cmd_handler_f)(struct drbd_conf *, enum drbd_packets cmd, unsigned int to_receive);
3699 drbd_cmd_handler_f function;
3702 static struct data_cmd drbd_cmd_handler[] = {
3703 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
3704 [P_DATA_REPLY] = { 1, sizeof(struct p_data), receive_DataReply },
3705 [P_RS_DATA_REPLY] = { 1, sizeof(struct p_data), receive_RSDataReply } ,
3706 [P_BARRIER] = { 0, sizeof(struct p_barrier), receive_Barrier } ,
3707 [P_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3708 [P_COMPRESSED_BITMAP] = { 1, sizeof(struct p_header80), receive_bitmap } ,
3709 [P_UNPLUG_REMOTE] = { 0, sizeof(struct p_header80), receive_UnplugRemote },
3710 [P_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3711 [P_RS_DATA_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3712 [P_SYNC_PARAM] = { 1, sizeof(struct p_header80), receive_SyncParam },
3713 [P_SYNC_PARAM89] = { 1, sizeof(struct p_header80), receive_SyncParam },
3714 [P_PROTOCOL] = { 1, sizeof(struct p_protocol), receive_protocol },
3715 [P_UUIDS] = { 0, sizeof(struct p_uuids), receive_uuids },
3716 [P_SIZES] = { 0, sizeof(struct p_sizes), receive_sizes },
3717 [P_STATE] = { 0, sizeof(struct p_state), receive_state },
3718 [P_STATE_CHG_REQ] = { 0, sizeof(struct p_req_state), receive_req_state },
3719 [P_SYNC_UUID] = { 0, sizeof(struct p_rs_uuid), receive_sync_uuid },
3720 [P_OV_REQUEST] = { 0, sizeof(struct p_block_req), receive_DataRequest },
3721 [P_OV_REPLY] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3722 [P_CSUM_RS_REQUEST] = { 1, sizeof(struct p_block_req), receive_DataRequest },
3723 [P_DELAY_PROBE] = { 0, sizeof(struct p_delay_probe93), receive_skip },
3724 [P_OUT_OF_SYNC] = { 0, sizeof(struct p_block_desc), receive_out_of_sync },
3725 /* anything missing from this table is in
3726 * the asender_tbl, see get_asender_cmd */
3727 [P_MAX_CMD] = { 0, 0, NULL },
3730 /* All handler functions that expect a sub-header get that sub-heder in
3731 mdev->data.rbuf.header.head.payload.
3733 Usually in mdev->data.rbuf.header.head the callback can find the usual
3734 p_header, but they may not rely on that. Since there is also p_header95 !
3737 static void drbdd(struct drbd_conf *mdev)
3739 union p_header *header = &mdev->data.rbuf.header;
3740 unsigned int packet_size;
3741 enum drbd_packets cmd;
3742 size_t shs; /* sub header size */
3745 while (get_t_state(&mdev->receiver) == Running) {
3746 drbd_thread_current_set_cpu(mdev);
3747 if (!drbd_recv_header(mdev, &cmd, &packet_size))
3750 if (unlikely(cmd >= P_MAX_CMD || !drbd_cmd_handler[cmd].function)) {
3751 dev_err(DEV, "unknown packet type %d, l: %d!\n", cmd, packet_size);
3755 shs = drbd_cmd_handler[cmd].pkt_size - sizeof(union p_header);
3756 if (packet_size - shs > 0 && !drbd_cmd_handler[cmd].expect_payload) {
3757 dev_err(DEV, "No payload expected %s l:%d\n", cmdname(cmd), packet_size);
3762 rv = drbd_recv(mdev, &header->h80.payload, shs);
3763 if (unlikely(rv != shs)) {
3764 if (!signal_pending(current))
3765 dev_warn(DEV, "short read while reading sub header: rv=%d\n", rv);
3770 rv = drbd_cmd_handler[cmd].function(mdev, cmd, packet_size - shs);
3772 if (unlikely(!rv)) {
3773 dev_err(DEV, "error receiving %s, l: %d!\n",
3774 cmdname(cmd), packet_size);
3781 drbd_force_state(mdev, NS(conn, C_PROTOCOL_ERROR));
3783 /* If we leave here, we probably want to update at least the
3784 * "Connected" indicator on stable storage. Do so explicitly here. */
3788 void drbd_flush_workqueue(struct drbd_conf *mdev)
3790 struct drbd_wq_barrier barr;
3792 barr.w.cb = w_prev_work_done;
3793 init_completion(&barr.done);
3794 drbd_queue_work(&mdev->data.work, &barr.w);
3795 wait_for_completion(&barr.done);
3798 void drbd_free_tl_hash(struct drbd_conf *mdev)
3800 struct hlist_head *h;
3802 spin_lock_irq(&mdev->req_lock);
3804 if (!mdev->tl_hash || mdev->state.conn != C_STANDALONE) {
3805 spin_unlock_irq(&mdev->req_lock);
3809 for (h = mdev->ee_hash; h < mdev->ee_hash + mdev->ee_hash_s; h++)
3811 dev_err(DEV, "ASSERT FAILED ee_hash[%u].first == %p, expected NULL\n",
3812 (int)(h - mdev->ee_hash), h->first);
3813 kfree(mdev->ee_hash);
3814 mdev->ee_hash = NULL;
3815 mdev->ee_hash_s = 0;
3817 /* We may not have had the chance to wait for all locally pending
3818 * application requests. The hlist_add_fake() prevents access after
3819 * free on master bio completion. */
3820 for (h = mdev->tl_hash; h < mdev->tl_hash + mdev->tl_hash_s; h++) {
3821 struct drbd_request *req;
3822 struct hlist_node *pos, *n;
3823 hlist_for_each_entry_safe(req, pos, n, h, collision) {
3824 hlist_del_init(&req->collision);
3825 hlist_add_fake(&req->collision);
3829 kfree(mdev->tl_hash);
3830 mdev->tl_hash = NULL;
3831 mdev->tl_hash_s = 0;
3832 spin_unlock_irq(&mdev->req_lock);
3835 static void drbd_disconnect(struct drbd_conf *mdev)
3837 enum drbd_fencing_p fp;
3838 union drbd_state os, ns;
3839 int rv = SS_UNKNOWN_ERROR;
3842 if (mdev->state.conn == C_STANDALONE)
3845 /* We are about to start the cleanup after connection loss.
3846 * Make sure drbd_make_request knows about that.
3847 * Usually we should be in some network failure state already,
3848 * but just in case we are not, we fix it up here.
3850 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
3852 /* asender does not clean up anything. it must not interfere, either */
3853 drbd_thread_stop(&mdev->asender);
3854 drbd_free_sock(mdev);
3856 /* wait for current activity to cease. */
3857 spin_lock_irq(&mdev->req_lock);
3858 _drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
3859 _drbd_wait_ee_list_empty(mdev, &mdev->sync_ee);
3860 _drbd_wait_ee_list_empty(mdev, &mdev->read_ee);
3861 spin_unlock_irq(&mdev->req_lock);
3863 /* We do not have data structures that would allow us to
3864 * get the rs_pending_cnt down to 0 again.
3865 * * On C_SYNC_TARGET we do not have any data structures describing
3866 * the pending RSDataRequest's we have sent.
3867 * * On C_SYNC_SOURCE there is no data structure that tracks
3868 * the P_RS_DATA_REPLY blocks that we sent to the SyncTarget.
3869 * And no, it is not the sum of the reference counts in the
3870 * resync_LRU. The resync_LRU tracks the whole operation including
3871 * the disk-IO, while the rs_pending_cnt only tracks the blocks
3873 drbd_rs_cancel_all(mdev);
3875 mdev->rs_failed = 0;
3876 atomic_set(&mdev->rs_pending_cnt, 0);
3877 wake_up(&mdev->misc_wait);
3879 /* make sure syncer is stopped and w_resume_next_sg queued */
3880 del_timer_sync(&mdev->resync_timer);
3881 resync_timer_fn((unsigned long)mdev);
3883 /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier,
3884 * w_make_resync_request etc. which may still be on the worker queue
3885 * to be "canceled" */
3886 drbd_flush_workqueue(mdev);
3888 /* This also does reclaim_net_ee(). If we do this too early, we might
3889 * miss some resync ee and pages.*/
3890 drbd_process_done_ee(mdev);
3892 kfree(mdev->p_uuid);
3893 mdev->p_uuid = NULL;
3895 if (!is_susp(mdev->state))
3898 dev_info(DEV, "Connection closed\n");
3903 if (get_ldev(mdev)) {
3904 fp = mdev->ldev->dc.fencing;
3908 if (mdev->state.role == R_PRIMARY && fp >= FP_RESOURCE && mdev->state.pdsk >= D_UNKNOWN)
3909 drbd_try_outdate_peer_async(mdev);
3911 spin_lock_irq(&mdev->req_lock);
3913 if (os.conn >= C_UNCONNECTED) {
3914 /* Do not restart in case we are C_DISCONNECTING */
3916 ns.conn = C_UNCONNECTED;
3917 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
3919 spin_unlock_irq(&mdev->req_lock);
3921 if (os.conn == C_DISCONNECTING) {
3922 wait_event(mdev->net_cnt_wait, atomic_read(&mdev->net_cnt) == 0);
3924 crypto_free_hash(mdev->cram_hmac_tfm);
3925 mdev->cram_hmac_tfm = NULL;
3927 kfree(mdev->net_conf);
3928 mdev->net_conf = NULL;
3929 drbd_request_state(mdev, NS(conn, C_STANDALONE));
3932 /* serialize with bitmap writeout triggered by the state change,
3934 wait_event(mdev->misc_wait, !drbd_test_flag(mdev, BITMAP_IO));
3936 /* tcp_close and release of sendpage pages can be deferred. I don't
3937 * want to use SO_LINGER, because apparently it can be deferred for
3938 * more than 20 seconds (longest time I checked).
3940 * Actually we don't care for exactly when the network stack does its
3941 * put_page(), but release our reference on these pages right here.
3943 i = drbd_release_ee(mdev, &mdev->net_ee);
3945 dev_info(DEV, "net_ee not empty, killed %u entries\n", i);
3946 i = atomic_read(&mdev->pp_in_use_by_net);
3948 dev_info(DEV, "pp_in_use_by_net = %d, expected 0\n", i);
3949 i = atomic_read(&mdev->pp_in_use);
3951 dev_info(DEV, "pp_in_use = %d, expected 0\n", i);
3953 D_ASSERT(list_empty(&mdev->read_ee));
3954 D_ASSERT(list_empty(&mdev->active_ee));
3955 D_ASSERT(list_empty(&mdev->sync_ee));
3956 D_ASSERT(list_empty(&mdev->done_ee));
3958 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
3959 atomic_set(&mdev->current_epoch->epoch_size, 0);
3960 D_ASSERT(list_empty(&mdev->current_epoch->list));
3964 * We support PRO_VERSION_MIN to PRO_VERSION_MAX. The protocol version
3965 * we can agree on is stored in agreed_pro_version.
3967 * feature flags and the reserved array should be enough room for future
3968 * enhancements of the handshake protocol, and possible plugins...
3970 * for now, they are expected to be zero, but ignored.
3972 static int drbd_send_handshake(struct drbd_conf *mdev)
3974 /* ASSERT current == mdev->receiver ... */
3975 struct p_handshake *p = &mdev->data.sbuf.handshake;
3978 if (mutex_lock_interruptible(&mdev->data.mutex)) {
3979 dev_err(DEV, "interrupted during initial handshake\n");
3980 return 0; /* interrupted. not ok. */
3983 if (mdev->data.socket == NULL) {
3984 mutex_unlock(&mdev->data.mutex);
3988 memset(p, 0, sizeof(*p));
3989 p->protocol_min = cpu_to_be32(PRO_VERSION_MIN);
3990 p->protocol_max = cpu_to_be32(PRO_VERSION_MAX);
3991 ok = _drbd_send_cmd( mdev, mdev->data.socket, P_HAND_SHAKE,
3992 (struct p_header80 *)p, sizeof(*p), 0 );
3993 mutex_unlock(&mdev->data.mutex);
3999 * 1 yes, we have a valid connection
4000 * 0 oops, did not work out, please try again
4001 * -1 peer talks different language,
4002 * no point in trying again, please go standalone.
4004 static int drbd_do_handshake(struct drbd_conf *mdev)
4006 /* ASSERT current == mdev->receiver ... */
4007 struct p_handshake *p = &mdev->data.rbuf.handshake;
4008 const int expect = sizeof(struct p_handshake) - sizeof(struct p_header80);
4009 unsigned int length;
4010 enum drbd_packets cmd;
4013 rv = drbd_send_handshake(mdev);
4017 rv = drbd_recv_header(mdev, &cmd, &length);
4021 if (cmd != P_HAND_SHAKE) {
4022 dev_err(DEV, "expected HandShake packet, received: %s (0x%04x)\n",
4027 if (length != expect) {
4028 dev_err(DEV, "expected HandShake length: %u, received: %u\n",
4033 rv = drbd_recv(mdev, &p->head.payload, expect);
4036 if (!signal_pending(current))
4037 dev_warn(DEV, "short read receiving handshake packet: l=%u\n", rv);
4041 p->protocol_min = be32_to_cpu(p->protocol_min);
4042 p->protocol_max = be32_to_cpu(p->protocol_max);
4043 if (p->protocol_max == 0)
4044 p->protocol_max = p->protocol_min;
4046 if (PRO_VERSION_MAX < p->protocol_min ||
4047 PRO_VERSION_MIN > p->protocol_max)
4050 mdev->agreed_pro_version = min_t(int, PRO_VERSION_MAX, p->protocol_max);
4052 dev_info(DEV, "Handshake successful: "
4053 "Agreed network protocol version %d\n", mdev->agreed_pro_version);
4058 dev_err(DEV, "incompatible DRBD dialects: "
4059 "I support %d-%d, peer supports %d-%d\n",
4060 PRO_VERSION_MIN, PRO_VERSION_MAX,
4061 p->protocol_min, p->protocol_max);
4065 #if !defined(CONFIG_CRYPTO_HMAC) && !defined(CONFIG_CRYPTO_HMAC_MODULE)
4066 static int drbd_do_auth(struct drbd_conf *mdev)
4068 dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
4069 dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
4073 #define CHALLENGE_LEN 64
4077 0 - failed, try again (network error),
4078 -1 - auth failed, don't try again.
4081 static int drbd_do_auth(struct drbd_conf *mdev)
4083 char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
4084 struct scatterlist sg;
4085 char *response = NULL;
4086 char *right_response = NULL;
4087 char *peers_ch = NULL;
4088 unsigned int key_len = strlen(mdev->net_conf->shared_secret);
4089 unsigned int resp_size;
4090 struct hash_desc desc;
4091 enum drbd_packets cmd;
4092 unsigned int length;
4095 desc.tfm = mdev->cram_hmac_tfm;
4098 rv = crypto_hash_setkey(mdev->cram_hmac_tfm,
4099 (u8 *)mdev->net_conf->shared_secret, key_len);
4101 dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
4106 get_random_bytes(my_challenge, CHALLENGE_LEN);
4108 rv = drbd_send_cmd2(mdev, P_AUTH_CHALLENGE, my_challenge, CHALLENGE_LEN);
4112 rv = drbd_recv_header(mdev, &cmd, &length);
4116 if (cmd != P_AUTH_CHALLENGE) {
4117 dev_err(DEV, "expected AuthChallenge packet, received: %s (0x%04x)\n",
4123 if (length > CHALLENGE_LEN * 2) {
4124 dev_err(DEV, "expected AuthChallenge payload too big.\n");
4129 peers_ch = kmalloc(length, GFP_NOIO);
4130 if (peers_ch == NULL) {
4131 dev_err(DEV, "kmalloc of peers_ch failed\n");
4136 rv = drbd_recv(mdev, peers_ch, length);
4139 if (!signal_pending(current))
4140 dev_warn(DEV, "short read AuthChallenge: l=%u\n", rv);
4145 resp_size = crypto_hash_digestsize(mdev->cram_hmac_tfm);
4146 response = kmalloc(resp_size, GFP_NOIO);
4147 if (response == NULL) {
4148 dev_err(DEV, "kmalloc of response failed\n");
4153 sg_init_table(&sg, 1);
4154 sg_set_buf(&sg, peers_ch, length);
4156 rv = crypto_hash_digest(&desc, &sg, sg.length, response);
4158 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4163 rv = drbd_send_cmd2(mdev, P_AUTH_RESPONSE, response, resp_size);
4167 rv = drbd_recv_header(mdev, &cmd, &length);
4171 if (cmd != P_AUTH_RESPONSE) {
4172 dev_err(DEV, "expected AuthResponse packet, received: %s (0x%04x)\n",
4178 if (length != resp_size) {
4179 dev_err(DEV, "expected AuthResponse payload of wrong size\n");
4184 rv = drbd_recv(mdev, response , resp_size);
4186 if (rv != resp_size) {
4187 if (!signal_pending(current))
4188 dev_warn(DEV, "short read receiving AuthResponse: l=%u\n", rv);
4193 right_response = kmalloc(resp_size, GFP_NOIO);
4194 if (right_response == NULL) {
4195 dev_err(DEV, "kmalloc of right_response failed\n");
4200 sg_set_buf(&sg, my_challenge, CHALLENGE_LEN);
4202 rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
4204 dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
4209 rv = !memcmp(response, right_response, resp_size);
4212 dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
4213 resp_size, mdev->net_conf->cram_hmac_alg);
4220 kfree(right_response);
4226 int drbdd_init(struct drbd_thread *thi)
4228 struct drbd_conf *mdev = thi->mdev;
4229 unsigned int minor = mdev_to_minor(mdev);
4232 sprintf(current->comm, "drbd%d_receiver", minor);
4234 dev_info(DEV, "receiver (re)started\n");
4237 h = drbd_connect(mdev);
4239 drbd_disconnect(mdev);
4240 schedule_timeout_interruptible(HZ);
4243 dev_warn(DEV, "Discarding network configuration.\n");
4244 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4249 if (get_net_conf(mdev)) {
4255 drbd_disconnect(mdev);
4257 dev_info(DEV, "receiver terminated\n");
4261 /* ********* acknowledge sender ******** */
4263 static int got_RqSReply(struct drbd_conf *mdev, struct p_header80 *h)
4265 struct p_req_state_reply *p = (struct p_req_state_reply *)h;
4267 int retcode = be32_to_cpu(p->retcode);
4269 if (retcode >= SS_SUCCESS) {
4270 drbd_set_flag(mdev, CL_ST_CHG_SUCCESS);
4272 drbd_set_flag(mdev, CL_ST_CHG_FAIL);
4273 dev_err(DEV, "Requested state change failed by peer: %s (%d)\n",
4274 drbd_set_st_err_str(retcode), retcode);
4276 wake_up(&mdev->state_wait);
4281 static int got_Ping(struct drbd_conf *mdev, struct p_header80 *h)
4283 return drbd_send_ping_ack(mdev);
4287 static int got_PingAck(struct drbd_conf *mdev, struct p_header80 *h)
4289 /* restore idle timeout */
4290 mdev->meta.socket->sk->sk_rcvtimeo = mdev->net_conf->ping_int*HZ;
4291 if (!drbd_test_and_set_flag(mdev, GOT_PING_ACK))
4292 wake_up(&mdev->misc_wait);
4297 static int got_IsInSync(struct drbd_conf *mdev, struct p_header80 *h)
4299 struct p_block_ack *p = (struct p_block_ack *)h;
4300 sector_t sector = be64_to_cpu(p->sector);
4301 int blksize = be32_to_cpu(p->blksize);
4303 D_ASSERT(mdev->agreed_pro_version >= 89);
4305 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4307 if (get_ldev(mdev)) {
4308 drbd_rs_complete_io(mdev, sector);
4309 drbd_set_in_sync(mdev, sector, blksize);
4310 /* rs_same_csums is supposed to count in units of BM_BLOCK_SIZE */
4311 mdev->rs_same_csum += (blksize >> BM_BLOCK_SHIFT);
4314 dec_rs_pending(mdev);
4315 atomic_add(blksize >> 9, &mdev->rs_sect_in);
4320 /* when we receive the ACK for a write request,
4321 * verify that we actually know about it */
4322 static struct drbd_request *_ack_id_to_req(struct drbd_conf *mdev,
4323 u64 id, sector_t sector)
4325 struct hlist_head *slot = tl_hash_slot(mdev, sector);
4326 struct hlist_node *n;
4327 struct drbd_request *req;
4329 hlist_for_each_entry(req, n, slot, collision) {
4330 if ((unsigned long)req == (unsigned long)id) {
4331 if (req->sector != sector) {
4332 dev_err(DEV, "_ack_id_to_req: found req %p but it has "
4333 "wrong sector (%llus versus %llus)\n", req,
4334 (unsigned long long)req->sector,
4335 (unsigned long long)sector);
4344 typedef struct drbd_request *(req_validator_fn)
4345 (struct drbd_conf *mdev, u64 id, sector_t sector);
4347 static int validate_req_change_req_state(struct drbd_conf *mdev,
4348 u64 id, sector_t sector, req_validator_fn validator,
4349 const char *func, enum drbd_req_event what)
4351 struct drbd_request *req;
4352 struct bio_and_error m;
4354 spin_lock_irq(&mdev->req_lock);
4355 req = validator(mdev, id, sector);
4356 if (unlikely(!req)) {
4357 spin_unlock_irq(&mdev->req_lock);
4359 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", func,
4360 (void *)(unsigned long)id, (unsigned long long)sector);
4363 __req_mod(req, what, &m);
4364 spin_unlock_irq(&mdev->req_lock);
4367 complete_master_bio(mdev, &m);
4371 static int got_BlockAck(struct drbd_conf *mdev, struct p_header80 *h)
4373 struct p_block_ack *p = (struct p_block_ack *)h;
4374 sector_t sector = be64_to_cpu(p->sector);
4375 int blksize = be32_to_cpu(p->blksize);
4376 enum drbd_req_event what;
4378 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4380 if (is_syncer_block_id(p->block_id)) {
4381 drbd_set_in_sync(mdev, sector, blksize);
4382 dec_rs_pending(mdev);
4385 switch (be16_to_cpu(h->command)) {
4386 case P_RS_WRITE_ACK:
4387 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4388 what = write_acked_by_peer_and_sis;
4391 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4392 what = write_acked_by_peer;
4395 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_B);
4396 what = recv_acked_by_peer;
4399 D_ASSERT(mdev->net_conf->wire_protocol == DRBD_PROT_C);
4400 what = conflict_discarded_by_peer;
4407 return validate_req_change_req_state(mdev, p->block_id, sector,
4408 _ack_id_to_req, __func__ , what);
4411 static int got_NegAck(struct drbd_conf *mdev, struct p_header80 *h)
4413 struct p_block_ack *p = (struct p_block_ack *)h;
4414 sector_t sector = be64_to_cpu(p->sector);
4415 int size = be32_to_cpu(p->blksize);
4416 struct drbd_request *req;
4417 struct bio_and_error m;
4419 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4421 if (is_syncer_block_id(p->block_id)) {
4422 dec_rs_pending(mdev);
4423 drbd_rs_failed_io(mdev, sector, size);
4427 spin_lock_irq(&mdev->req_lock);
4428 req = _ack_id_to_req(mdev, p->block_id, sector);
4430 spin_unlock_irq(&mdev->req_lock);
4431 if (mdev->net_conf->wire_protocol == DRBD_PROT_A ||
4432 mdev->net_conf->wire_protocol == DRBD_PROT_B) {
4433 /* Protocol A has no P_WRITE_ACKs, but has P_NEG_ACKs.
4434 The master bio might already be completed, therefore the
4435 request is no longer in the collision hash.
4436 => Do not try to validate block_id as request. */
4437 /* In Protocol B we might already have got a P_RECV_ACK
4438 but then get a P_NEG_ACK after wards. */
4439 drbd_set_out_of_sync(mdev, sector, size);
4442 dev_err(DEV, "%s: failed to find req %p, sector %llus\n", __func__,
4443 (void *)(unsigned long)p->block_id, (unsigned long long)sector);
4447 __req_mod(req, neg_acked, &m);
4448 spin_unlock_irq(&mdev->req_lock);
4451 complete_master_bio(mdev, &m);
4455 static int got_NegDReply(struct drbd_conf *mdev, struct p_header80 *h)
4457 struct p_block_ack *p = (struct p_block_ack *)h;
4458 sector_t sector = be64_to_cpu(p->sector);
4460 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4461 dev_err(DEV, "Got NegDReply; Sector %llus, len %u; Fail original request.\n",
4462 (unsigned long long)sector, be32_to_cpu(p->blksize));
4464 return validate_req_change_req_state(mdev, p->block_id, sector,
4465 _ar_id_to_req, __func__ , neg_acked);
4468 static int got_NegRSDReply(struct drbd_conf *mdev, struct p_header80 *h)
4472 struct p_block_ack *p = (struct p_block_ack *)h;
4474 sector = be64_to_cpu(p->sector);
4475 size = be32_to_cpu(p->blksize);
4477 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4479 dec_rs_pending(mdev);
4481 if (get_ldev_if_state(mdev, D_FAILED)) {
4482 drbd_rs_complete_io(mdev, sector);
4483 switch (be16_to_cpu(h->command)) {
4484 case P_NEG_RS_DREPLY:
4485 drbd_rs_failed_io(mdev, sector, size);
4499 static int got_BarrierAck(struct drbd_conf *mdev, struct p_header80 *h)
4501 struct p_barrier_ack *p = (struct p_barrier_ack *)h;
4503 tl_release(mdev, p->barrier, be32_to_cpu(p->set_size));
4505 if (mdev->state.conn == C_AHEAD &&
4506 atomic_read(&mdev->ap_in_flight) == 0 &&
4507 !drbd_test_and_set_flag(mdev, AHEAD_TO_SYNC_SOURCE)) {
4508 mdev->start_resync_timer.expires = jiffies + HZ;
4509 add_timer(&mdev->start_resync_timer);
4515 static int got_OVResult(struct drbd_conf *mdev, struct p_header80 *h)
4517 struct p_block_ack *p = (struct p_block_ack *)h;
4518 struct drbd_work *w;
4522 sector = be64_to_cpu(p->sector);
4523 size = be32_to_cpu(p->blksize);
4525 update_peer_seq(mdev, be32_to_cpu(p->seq_num));
4527 if (be64_to_cpu(p->block_id) == ID_OUT_OF_SYNC)
4528 drbd_ov_oos_found(mdev, sector, size);
4532 if (!get_ldev(mdev))
4535 drbd_rs_complete_io(mdev, sector);
4536 dec_rs_pending(mdev);
4540 /* let's advance progress step marks only for every other megabyte */
4541 if ((mdev->ov_left & 0x200) == 0x200)
4542 drbd_advance_rs_marks(mdev, mdev->ov_left);
4544 if (mdev->ov_left == 0) {
4545 w = kmalloc(sizeof(*w), GFP_NOIO);
4547 w->cb = w_ov_finished;
4548 drbd_queue_work_front(&mdev->data.work, w);
4550 dev_err(DEV, "kmalloc(w) failed.");
4552 drbd_resync_finished(mdev);
4559 static int got_skip(struct drbd_conf *mdev, struct p_header80 *h)
4564 struct asender_cmd {
4566 int (*process)(struct drbd_conf *mdev, struct p_header80 *h);
4569 static struct asender_cmd *get_asender_cmd(int cmd)
4571 static struct asender_cmd asender_tbl[] = {
4572 /* anything missing from this table is in
4573 * the drbd_cmd_handler (drbd_default_handler) table,
4574 * see the beginning of drbdd() */
4575 [P_PING] = { sizeof(struct p_header80), got_Ping },
4576 [P_PING_ACK] = { sizeof(struct p_header80), got_PingAck },
4577 [P_RECV_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4578 [P_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4579 [P_RS_WRITE_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4580 [P_DISCARD_ACK] = { sizeof(struct p_block_ack), got_BlockAck },
4581 [P_NEG_ACK] = { sizeof(struct p_block_ack), got_NegAck },
4582 [P_NEG_DREPLY] = { sizeof(struct p_block_ack), got_NegDReply },
4583 [P_NEG_RS_DREPLY] = { sizeof(struct p_block_ack), got_NegRSDReply},
4584 [P_OV_RESULT] = { sizeof(struct p_block_ack), got_OVResult },
4585 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck },
4586 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply },
4587 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync },
4588 [P_DELAY_PROBE] = { sizeof(struct p_delay_probe93), got_skip },
4589 [P_RS_CANCEL] = { sizeof(struct p_block_ack), got_NegRSDReply},
4590 [P_MAX_CMD] = { 0, NULL },
4592 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
4594 return &asender_tbl[cmd];
4597 int drbd_asender(struct drbd_thread *thi)
4599 struct drbd_conf *mdev = thi->mdev;
4600 struct p_header80 *h = &mdev->meta.rbuf.header.h80;
4601 struct asender_cmd *cmd = NULL;
4606 int expect = sizeof(struct p_header80);
4608 int ping_timeout_active = 0;
4610 sprintf(current->comm, "drbd%d_asender", mdev_to_minor(mdev));
4612 current->policy = SCHED_RR; /* Make this a realtime task! */
4613 current->rt_priority = 2; /* more important than all other tasks */
4615 while (get_t_state(thi) == Running) {
4616 drbd_thread_current_set_cpu(mdev);
4617 if (drbd_test_and_clear_flag(mdev, SEND_PING)) {
4618 ERR_IF(!drbd_send_ping(mdev)) goto reconnect;
4619 mdev->meta.socket->sk->sk_rcvtimeo =
4620 mdev->net_conf->ping_timeo*HZ/10;
4621 ping_timeout_active = 1;
4624 /* conditionally cork;
4625 * it may hurt latency if we cork without much to send */
4626 if (!mdev->net_conf->no_cork &&
4627 3 < atomic_read(&mdev->unacked_cnt))
4628 drbd_tcp_cork(mdev->meta.socket);
4630 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4631 flush_signals(current);
4632 if (!drbd_process_done_ee(mdev))
4634 /* to avoid race with newly queued ACKs */
4635 drbd_set_flag(mdev, SIGNAL_ASENDER);
4636 spin_lock_irq(&mdev->req_lock);
4637 empty = list_empty(&mdev->done_ee);
4638 spin_unlock_irq(&mdev->req_lock);
4639 /* new ack may have been queued right here,
4640 * but then there is also a signal pending,
4641 * and we start over... */
4645 /* but unconditionally uncork unless disabled */
4646 if (!mdev->net_conf->no_cork)
4647 drbd_tcp_uncork(mdev->meta.socket);
4649 /* short circuit, recv_msg would return EINTR anyways. */
4650 if (signal_pending(current))
4653 rv = drbd_recv_short(mdev, mdev->meta.socket,
4654 buf, expect-received, 0);
4655 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4657 flush_signals(current);
4660 * -EINTR (on meta) we got a signal
4661 * -EAGAIN (on meta) rcvtimeo expired
4662 * -ECONNRESET other side closed the connection
4663 * -ERESTARTSYS (on data) we got a signal
4664 * rv < 0 other than above: unexpected error!
4665 * rv == expected: full header or command
4666 * rv < expected: "woken" by signal during receive
4667 * rv == 0 : "connection shut down by peer"
4669 if (likely(rv > 0)) {
4672 } else if (rv == 0) {
4673 if (drbd_test_flag(mdev, DISCONNECT_SENT)) {
4674 long t; /* time_left */
4675 t = wait_event_timeout(mdev->state_wait, mdev->state.conn < C_CONNECTED,
4676 mdev->net_conf->ping_timeo * HZ/10);
4680 dev_err(DEV, "meta connection shut down by peer.\n");
4682 } else if (rv == -EAGAIN) {
4683 /* If the data socket received something meanwhile,
4684 * that is good enough: peer is still alive. */
4685 if (time_after(mdev->last_received,
4686 jiffies - mdev->meta.socket->sk->sk_rcvtimeo))
4688 if (ping_timeout_active) {
4689 dev_err(DEV, "PingAck did not arrive in time.\n");
4692 drbd_set_flag(mdev, SEND_PING);
4694 } else if (rv == -EINTR) {
4697 dev_err(DEV, "sock_recvmsg returned %d\n", rv);
4701 if (received == expect && cmd == NULL) {
4702 if (unlikely(h->magic != BE_DRBD_MAGIC)) {
4703 dev_err(DEV, "magic?? on meta m: 0x%08x c: %d l: %d\n",
4704 be32_to_cpu(h->magic),
4705 be16_to_cpu(h->command),
4706 be16_to_cpu(h->length));
4709 cmd = get_asender_cmd(be16_to_cpu(h->command));
4710 len = be16_to_cpu(h->length);
4711 if (unlikely(cmd == NULL)) {
4712 dev_err(DEV, "unknown command?? on meta m: 0x%08x c: %d l: %d\n",
4713 be32_to_cpu(h->magic),
4714 be16_to_cpu(h->command),
4715 be16_to_cpu(h->length));
4718 expect = cmd->pkt_size;
4719 ERR_IF(len != expect-sizeof(struct p_header80))
4722 if (received == expect) {
4723 mdev->last_received = jiffies;
4724 D_ASSERT(cmd != NULL);
4725 if (!cmd->process(mdev, h))
4728 /* the idle_timeout (ping-int)
4729 * has been restored in got_PingAck() */
4730 if (cmd == get_asender_cmd(P_PING_ACK))
4731 ping_timeout_active = 0;
4735 expect = sizeof(struct p_header80);
4742 drbd_force_state(mdev, NS(conn, C_NETWORK_FAILURE));
4747 drbd_force_state(mdev, NS(conn, C_DISCONNECTING));
4750 drbd_clear_flag(mdev, SIGNAL_ASENDER);
4752 D_ASSERT(mdev->state.conn < C_CONNECTED);
4753 dev_info(DEV, "asender terminated\n");