4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48 #include <linux/workqueue.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_protocol.h"
56 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
60 static DEFINE_MUTEX(drbd_main_mutex);
61 static int drbd_open(struct block_device *bdev, fmode_t mode);
62 static void drbd_release(struct gendisk *gd, fmode_t mode);
63 static int w_md_sync(struct drbd_work *w, int unused);
64 static void md_sync_timer_fn(unsigned long data);
65 static int w_bitmap_io(struct drbd_work *w, int unused);
66 static int w_go_diskless(struct drbd_work *w, int unused);
68 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
69 "Lars Ellenberg <lars@linbit.com>");
70 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
71 MODULE_VERSION(REL_VERSION);
72 MODULE_LICENSE("GPL");
73 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
74 __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
75 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
77 #include <linux/moduleparam.h>
78 /* allow_open_on_secondary */
79 MODULE_PARM_DESC(allow_oos, "DONT USE!");
80 /* thanks to these macros, if compiled into the kernel (not-module),
81 * this becomes the boot parameter drbd.minor_count */
82 module_param(minor_count, uint, 0444);
83 module_param(disable_sendpage, bool, 0644);
84 module_param(allow_oos, bool, 0);
85 module_param(proc_details, int, 0644);
87 #ifdef CONFIG_DRBD_FAULT_INJECTION
90 static int fault_count;
92 /* bitmap of enabled faults */
93 module_param(enable_faults, int, 0664);
94 /* fault rate % value - applies to all enabled faults */
95 module_param(fault_rate, int, 0664);
96 /* count of faults inserted */
97 module_param(fault_count, int, 0664);
98 /* bitmap of devices to insert faults on */
99 module_param(fault_devs, int, 0644);
102 /* module parameter, defined */
103 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
104 bool disable_sendpage;
106 int proc_details; /* Detail level in proc drbd*/
108 /* Module parameter for setting the user mode helper program
109 * to run. Default is /sbin/drbdadm */
110 char usermode_helper[80] = "/sbin/drbdadm";
112 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
114 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
115 * as member "struct gendisk *vdisk;"
117 struct idr drbd_devices;
118 struct list_head drbd_resources;
120 struct kmem_cache *drbd_request_cache;
121 struct kmem_cache *drbd_ee_cache; /* peer requests */
122 struct kmem_cache *drbd_bm_ext_cache; /* bitmap extents */
123 struct kmem_cache *drbd_al_ext_cache; /* activity log extents */
124 mempool_t *drbd_request_mempool;
125 mempool_t *drbd_ee_mempool;
126 mempool_t *drbd_md_io_page_pool;
127 struct bio_set *drbd_md_io_bio_set;
129 /* I do not use a standard mempool, because:
130 1) I want to hand out the pre-allocated objects first.
131 2) I want to be able to interrupt sleeping allocation with a signal.
132 Note: This is a single linked list, the next pointer is the private
133 member of struct page.
135 struct page *drbd_pp_pool;
136 spinlock_t drbd_pp_lock;
138 wait_queue_head_t drbd_pp_wait;
140 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
142 static const struct block_device_operations drbd_ops = {
143 .owner = THIS_MODULE,
145 .release = drbd_release,
148 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
152 if (!drbd_md_io_bio_set)
153 return bio_alloc(gfp_mask, 1);
155 bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
162 /* When checking with sparse, and this is an inline function, sparse will
163 give tons of false positives. When this is a real functions sparse works.
165 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
169 atomic_inc(&device->local_cnt);
170 io_allowed = (device->state.disk >= mins);
172 if (atomic_dec_and_test(&device->local_cnt))
173 wake_up(&device->misc_wait);
181 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
182 * @connection: DRBD connection.
183 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
184 * @set_size: Expected number of requests before that barrier.
186 * In case the passed barrier_nr or set_size does not match the oldest
187 * epoch of not yet barrier-acked requests, this function will cause a
188 * termination of the connection.
190 void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
191 unsigned int set_size)
193 struct drbd_request *r;
194 struct drbd_request *req = NULL;
195 int expect_epoch = 0;
198 spin_lock_irq(&connection->resource->req_lock);
200 /* find oldest not yet barrier-acked write request,
201 * count writes in its epoch. */
202 list_for_each_entry(r, &connection->transfer_log, tl_requests) {
203 const unsigned s = r->rq_state;
207 if (!(s & RQ_NET_MASK))
212 expect_epoch = req->epoch;
215 if (r->epoch != expect_epoch)
219 /* if (s & RQ_DONE): not expected */
220 /* if (!(s & RQ_NET_MASK)): not expected */
225 /* first some paranoia code */
227 drbd_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
231 if (expect_epoch != barrier_nr) {
232 drbd_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
233 barrier_nr, expect_epoch);
237 if (expect_size != set_size) {
238 drbd_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
239 barrier_nr, set_size, expect_size);
243 /* Clean up list of requests processed during current epoch. */
244 /* this extra list walk restart is paranoia,
245 * to catch requests being barrier-acked "unexpectedly".
246 * It usually should find the same req again, or some READ preceding it. */
247 list_for_each_entry(req, &connection->transfer_log, tl_requests)
248 if (req->epoch == expect_epoch)
250 list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
251 if (req->epoch != expect_epoch)
253 _req_mod(req, BARRIER_ACKED);
255 spin_unlock_irq(&connection->resource->req_lock);
260 spin_unlock_irq(&connection->resource->req_lock);
261 conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
266 * _tl_restart() - Walks the transfer log, and applies an action to all requests
267 * @device: DRBD device.
268 * @what: The action/event to perform with all request objects
270 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
271 * RESTART_FROZEN_DISK_IO.
273 /* must hold resource->req_lock */
274 void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
276 struct drbd_request *req, *r;
278 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
282 void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
284 spin_lock_irq(&connection->resource->req_lock);
285 _tl_restart(connection, what);
286 spin_unlock_irq(&connection->resource->req_lock);
290 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
291 * @device: DRBD device.
293 * This is called after the connection to the peer was lost. The storage covered
294 * by the requests on the transfer gets marked as our of sync. Called from the
295 * receiver thread and the worker thread.
297 void tl_clear(struct drbd_connection *connection)
299 tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
303 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
304 * @device: DRBD device.
306 void tl_abort_disk_io(struct drbd_device *device)
308 struct drbd_connection *connection = first_peer_device(device)->connection;
309 struct drbd_request *req, *r;
311 spin_lock_irq(&connection->resource->req_lock);
312 list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
313 if (!(req->rq_state & RQ_LOCAL_PENDING))
315 if (req->device != device)
317 _req_mod(req, ABORT_DISK_IO);
319 spin_unlock_irq(&connection->resource->req_lock);
322 static int drbd_thread_setup(void *arg)
324 struct drbd_thread *thi = (struct drbd_thread *) arg;
325 struct drbd_resource *resource = thi->resource;
329 snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
334 retval = thi->function(thi);
336 spin_lock_irqsave(&thi->t_lock, flags);
338 /* if the receiver has been "EXITING", the last thing it did
339 * was set the conn state to "StandAlone",
340 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
341 * and receiver thread will be "started".
342 * drbd_thread_start needs to set "RESTARTING" in that case.
343 * t_state check and assignment needs to be within the same spinlock,
344 * so either thread_start sees EXITING, and can remap to RESTARTING,
345 * or thread_start see NONE, and can proceed as normal.
348 if (thi->t_state == RESTARTING) {
349 drbd_info(resource, "Restarting %s thread\n", thi->name);
350 thi->t_state = RUNNING;
351 spin_unlock_irqrestore(&thi->t_lock, flags);
358 complete_all(&thi->stop);
359 spin_unlock_irqrestore(&thi->t_lock, flags);
361 drbd_info(resource, "Terminating %s\n", current->comm);
363 /* Release mod reference taken when thread was started */
366 kref_put(&thi->connection->kref, drbd_destroy_connection);
367 kref_put(&resource->kref, drbd_destroy_resource);
368 module_put(THIS_MODULE);
372 static void drbd_thread_init(struct drbd_resource *resource, struct drbd_thread *thi,
373 int (*func) (struct drbd_thread *), const char *name)
375 spin_lock_init(&thi->t_lock);
378 thi->function = func;
379 thi->resource = resource;
380 thi->connection = NULL;
384 int drbd_thread_start(struct drbd_thread *thi)
386 struct drbd_resource *resource = thi->resource;
387 struct task_struct *nt;
390 /* is used from state engine doing drbd_thread_stop_nowait,
391 * while holding the req lock irqsave */
392 spin_lock_irqsave(&thi->t_lock, flags);
394 switch (thi->t_state) {
396 drbd_info(resource, "Starting %s thread (from %s [%d])\n",
397 thi->name, current->comm, current->pid);
399 /* Get ref on module for thread - this is released when thread exits */
400 if (!try_module_get(THIS_MODULE)) {
401 drbd_err(resource, "Failed to get module reference in drbd_thread_start\n");
402 spin_unlock_irqrestore(&thi->t_lock, flags);
406 kref_get(&resource->kref);
408 kref_get(&thi->connection->kref);
410 init_completion(&thi->stop);
411 thi->reset_cpu_mask = 1;
412 thi->t_state = RUNNING;
413 spin_unlock_irqrestore(&thi->t_lock, flags);
414 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
416 nt = kthread_create(drbd_thread_setup, (void *) thi,
417 "drbd_%c_%s", thi->name[0], thi->resource->name);
420 drbd_err(resource, "Couldn't start thread\n");
423 kref_put(&thi->connection->kref, drbd_destroy_connection);
424 kref_put(&resource->kref, drbd_destroy_resource);
425 module_put(THIS_MODULE);
428 spin_lock_irqsave(&thi->t_lock, flags);
430 thi->t_state = RUNNING;
431 spin_unlock_irqrestore(&thi->t_lock, flags);
435 thi->t_state = RESTARTING;
436 drbd_info(resource, "Restarting %s thread (from %s [%d])\n",
437 thi->name, current->comm, current->pid);
442 spin_unlock_irqrestore(&thi->t_lock, flags);
450 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
454 enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
456 /* may be called from state engine, holding the req lock irqsave */
457 spin_lock_irqsave(&thi->t_lock, flags);
459 if (thi->t_state == NONE) {
460 spin_unlock_irqrestore(&thi->t_lock, flags);
462 drbd_thread_start(thi);
466 if (thi->t_state != ns) {
467 if (thi->task == NULL) {
468 spin_unlock_irqrestore(&thi->t_lock, flags);
474 init_completion(&thi->stop);
475 if (thi->task != current)
476 force_sig(DRBD_SIGKILL, thi->task);
479 spin_unlock_irqrestore(&thi->t_lock, flags);
482 wait_for_completion(&thi->stop);
485 int conn_lowest_minor(struct drbd_connection *connection)
487 struct drbd_peer_device *peer_device;
488 int vnr = 0, minor = -1;
491 peer_device = idr_get_next(&connection->peer_devices, &vnr);
493 minor = device_to_minor(peer_device->device);
501 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
503 * Forces all threads of a resource onto the same CPU. This is beneficial for
504 * DRBD's performance. May be overwritten by user's configuration.
506 static void drbd_calc_cpu_mask(cpumask_var_t *cpu_mask)
508 unsigned int *resources_per_cpu, min_index = ~0;
510 resources_per_cpu = kzalloc(nr_cpu_ids * sizeof(*resources_per_cpu), GFP_KERNEL);
511 if (resources_per_cpu) {
512 struct drbd_resource *resource;
513 unsigned int cpu, min = ~0;
516 for_each_resource_rcu(resource, &drbd_resources) {
517 for_each_cpu(cpu, resource->cpu_mask)
518 resources_per_cpu[cpu]++;
521 for_each_online_cpu(cpu) {
522 if (resources_per_cpu[cpu] < min) {
523 min = resources_per_cpu[cpu];
527 kfree(resources_per_cpu);
529 if (min_index == ~0) {
530 cpumask_setall(*cpu_mask);
533 cpumask_set_cpu(min_index, *cpu_mask);
537 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
538 * @device: DRBD device.
539 * @thi: drbd_thread object
541 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
544 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
546 struct drbd_resource *resource = thi->resource;
547 struct task_struct *p = current;
549 if (!thi->reset_cpu_mask)
551 thi->reset_cpu_mask = 0;
552 set_cpus_allowed_ptr(p, resource->cpu_mask);
555 #define drbd_calc_cpu_mask(A) ({})
559 * drbd_header_size - size of a packet header
561 * The header size is a multiple of 8, so any payload following the header is
562 * word aligned on 64-bit architectures. (The bitmap send and receive code
565 unsigned int drbd_header_size(struct drbd_connection *connection)
567 if (connection->agreed_pro_version >= 100) {
568 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
569 return sizeof(struct p_header100);
571 BUILD_BUG_ON(sizeof(struct p_header80) !=
572 sizeof(struct p_header95));
573 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
574 return sizeof(struct p_header80);
578 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
580 h->magic = cpu_to_be32(DRBD_MAGIC);
581 h->command = cpu_to_be16(cmd);
582 h->length = cpu_to_be16(size);
583 return sizeof(struct p_header80);
586 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
588 h->magic = cpu_to_be16(DRBD_MAGIC_BIG);
589 h->command = cpu_to_be16(cmd);
590 h->length = cpu_to_be32(size);
591 return sizeof(struct p_header95);
594 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
597 h->magic = cpu_to_be32(DRBD_MAGIC_100);
598 h->volume = cpu_to_be16(vnr);
599 h->command = cpu_to_be16(cmd);
600 h->length = cpu_to_be32(size);
602 return sizeof(struct p_header100);
605 static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
606 void *buffer, enum drbd_packet cmd, int size)
608 if (connection->agreed_pro_version >= 100)
609 return prepare_header100(buffer, cmd, size, vnr);
610 else if (connection->agreed_pro_version >= 95 &&
611 size > DRBD_MAX_SIZE_H80_PACKET)
612 return prepare_header95(buffer, cmd, size);
614 return prepare_header80(buffer, cmd, size);
617 static void *__conn_prepare_command(struct drbd_connection *connection,
618 struct drbd_socket *sock)
622 return sock->sbuf + drbd_header_size(connection);
625 void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
629 mutex_lock(&sock->mutex);
630 p = __conn_prepare_command(connection, sock);
632 mutex_unlock(&sock->mutex);
637 void *drbd_prepare_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock)
639 return conn_prepare_command(peer_device->connection, sock);
642 static int __send_command(struct drbd_connection *connection, int vnr,
643 struct drbd_socket *sock, enum drbd_packet cmd,
644 unsigned int header_size, void *data,
651 * Called with @data == NULL and the size of the data blocks in @size
652 * for commands that send data blocks. For those commands, omit the
653 * MSG_MORE flag: this will increase the likelihood that data blocks
654 * which are page aligned on the sender will end up page aligned on the
657 msg_flags = data ? MSG_MORE : 0;
659 header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
661 err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
664 err = drbd_send_all(connection, sock->socket, data, size, 0);
668 static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
669 enum drbd_packet cmd, unsigned int header_size,
670 void *data, unsigned int size)
672 return __send_command(connection, 0, sock, cmd, header_size, data, size);
675 int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
676 enum drbd_packet cmd, unsigned int header_size,
677 void *data, unsigned int size)
681 err = __conn_send_command(connection, sock, cmd, header_size, data, size);
682 mutex_unlock(&sock->mutex);
686 int drbd_send_command(struct drbd_peer_device *peer_device, struct drbd_socket *sock,
687 enum drbd_packet cmd, unsigned int header_size,
688 void *data, unsigned int size)
692 err = __send_command(peer_device->connection, peer_device->device->vnr,
693 sock, cmd, header_size, data, size);
694 mutex_unlock(&sock->mutex);
698 int drbd_send_ping(struct drbd_connection *connection)
700 struct drbd_socket *sock;
702 sock = &connection->meta;
703 if (!conn_prepare_command(connection, sock))
705 return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
708 int drbd_send_ping_ack(struct drbd_connection *connection)
710 struct drbd_socket *sock;
712 sock = &connection->meta;
713 if (!conn_prepare_command(connection, sock))
715 return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
718 int drbd_send_sync_param(struct drbd_peer_device *peer_device)
720 struct drbd_socket *sock;
721 struct p_rs_param_95 *p;
723 const int apv = peer_device->connection->agreed_pro_version;
724 enum drbd_packet cmd;
726 struct disk_conf *dc;
728 sock = &peer_device->connection->data;
729 p = drbd_prepare_command(peer_device, sock);
734 nc = rcu_dereference(peer_device->connection->net_conf);
736 size = apv <= 87 ? sizeof(struct p_rs_param)
737 : apv == 88 ? sizeof(struct p_rs_param)
738 + strlen(nc->verify_alg) + 1
739 : apv <= 94 ? sizeof(struct p_rs_param_89)
740 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
742 cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
744 /* initialize verify_alg and csums_alg */
745 memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
747 if (get_ldev(peer_device->device)) {
748 dc = rcu_dereference(peer_device->device->ldev->disk_conf);
749 p->resync_rate = cpu_to_be32(dc->resync_rate);
750 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
751 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
752 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
753 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
754 put_ldev(peer_device->device);
756 p->resync_rate = cpu_to_be32(DRBD_RESYNC_RATE_DEF);
757 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
758 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
759 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
760 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
764 strcpy(p->verify_alg, nc->verify_alg);
766 strcpy(p->csums_alg, nc->csums_alg);
769 return drbd_send_command(peer_device, sock, cmd, size, NULL, 0);
772 int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
774 struct drbd_socket *sock;
775 struct p_protocol *p;
779 sock = &connection->data;
780 p = __conn_prepare_command(connection, sock);
785 nc = rcu_dereference(connection->net_conf);
787 if (nc->tentative && connection->agreed_pro_version < 92) {
789 mutex_unlock(&sock->mutex);
790 drbd_err(connection, "--dry-run is not supported by peer");
795 if (connection->agreed_pro_version >= 87)
796 size += strlen(nc->integrity_alg) + 1;
798 p->protocol = cpu_to_be32(nc->wire_protocol);
799 p->after_sb_0p = cpu_to_be32(nc->after_sb_0p);
800 p->after_sb_1p = cpu_to_be32(nc->after_sb_1p);
801 p->after_sb_2p = cpu_to_be32(nc->after_sb_2p);
802 p->two_primaries = cpu_to_be32(nc->two_primaries);
804 if (nc->discard_my_data)
805 cf |= CF_DISCARD_MY_DATA;
808 p->conn_flags = cpu_to_be32(cf);
810 if (connection->agreed_pro_version >= 87)
811 strcpy(p->integrity_alg, nc->integrity_alg);
814 return __conn_send_command(connection, sock, cmd, size, NULL, 0);
817 int drbd_send_protocol(struct drbd_connection *connection)
821 mutex_lock(&connection->data.mutex);
822 err = __drbd_send_protocol(connection, P_PROTOCOL);
823 mutex_unlock(&connection->data.mutex);
828 static int _drbd_send_uuids(struct drbd_peer_device *peer_device, u64 uuid_flags)
830 struct drbd_device *device = peer_device->device;
831 struct drbd_socket *sock;
835 if (!get_ldev_if_state(device, D_NEGOTIATING))
838 sock = &peer_device->connection->data;
839 p = drbd_prepare_command(peer_device, sock);
844 spin_lock_irq(&device->ldev->md.uuid_lock);
845 for (i = UI_CURRENT; i < UI_SIZE; i++)
846 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
847 spin_unlock_irq(&device->ldev->md.uuid_lock);
849 device->comm_bm_set = drbd_bm_total_weight(device);
850 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
852 uuid_flags |= rcu_dereference(peer_device->connection->net_conf)->discard_my_data ? 1 : 0;
854 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
855 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
856 p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
859 return drbd_send_command(peer_device, sock, P_UUIDS, sizeof(*p), NULL, 0);
862 int drbd_send_uuids(struct drbd_peer_device *peer_device)
864 return _drbd_send_uuids(peer_device, 0);
867 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *peer_device)
869 return _drbd_send_uuids(peer_device, 8);
872 void drbd_print_uuids(struct drbd_device *device, const char *text)
874 if (get_ldev_if_state(device, D_NEGOTIATING)) {
875 u64 *uuid = device->ldev->md.uuid;
876 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n",
878 (unsigned long long)uuid[UI_CURRENT],
879 (unsigned long long)uuid[UI_BITMAP],
880 (unsigned long long)uuid[UI_HISTORY_START],
881 (unsigned long long)uuid[UI_HISTORY_END]);
884 drbd_info(device, "%s effective data uuid: %016llX\n",
886 (unsigned long long)device->ed_uuid);
890 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *peer_device)
892 struct drbd_device *device = peer_device->device;
893 struct drbd_socket *sock;
897 D_ASSERT(device, device->state.disk == D_UP_TO_DATE);
899 uuid = device->ldev->md.uuid[UI_BITMAP];
900 if (uuid && uuid != UUID_JUST_CREATED)
901 uuid = uuid + UUID_NEW_BM_OFFSET;
903 get_random_bytes(&uuid, sizeof(u64));
904 drbd_uuid_set(device, UI_BITMAP, uuid);
905 drbd_print_uuids(device, "updated sync UUID");
906 drbd_md_sync(device);
908 sock = &peer_device->connection->data;
909 p = drbd_prepare_command(peer_device, sock);
911 p->uuid = cpu_to_be64(uuid);
912 drbd_send_command(peer_device, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
916 int drbd_send_sizes(struct drbd_peer_device *peer_device, int trigger_reply, enum dds_flags flags)
918 struct drbd_device *device = peer_device->device;
919 struct drbd_socket *sock;
921 sector_t d_size, u_size;
923 unsigned int max_bio_size;
925 if (get_ldev_if_state(device, D_NEGOTIATING)) {
926 D_ASSERT(device, device->ldev->backing_bdev);
927 d_size = drbd_get_max_capacity(device->ldev);
929 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size;
931 q_order_type = drbd_queue_order_type(device);
932 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9;
933 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
938 q_order_type = QUEUE_ORDERED_NONE;
939 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
942 sock = &peer_device->connection->data;
943 p = drbd_prepare_command(peer_device, sock);
947 if (peer_device->connection->agreed_pro_version <= 94)
948 max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
949 else if (peer_device->connection->agreed_pro_version < 100)
950 max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
952 p->d_size = cpu_to_be64(d_size);
953 p->u_size = cpu_to_be64(u_size);
954 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev));
955 p->max_bio_size = cpu_to_be32(max_bio_size);
956 p->queue_order_type = cpu_to_be16(q_order_type);
957 p->dds_flags = cpu_to_be16(flags);
958 return drbd_send_command(peer_device, sock, P_SIZES, sizeof(*p), NULL, 0);
962 * drbd_send_current_state() - Sends the drbd state to the peer
963 * @peer_device: DRBD peer device.
965 int drbd_send_current_state(struct drbd_peer_device *peer_device)
967 struct drbd_socket *sock;
970 sock = &peer_device->connection->data;
971 p = drbd_prepare_command(peer_device, sock);
974 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */
975 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
979 * drbd_send_state() - After a state change, sends the new state to the peer
980 * @peer_device: DRBD peer device.
981 * @state: the state to send, not necessarily the current state.
983 * Each state change queues an "after_state_ch" work, which will eventually
984 * send the resulting new state to the peer. If more state changes happen
985 * between queuing and processing of the after_state_ch work, we still
986 * want to send each intermediary state in the order it occurred.
988 int drbd_send_state(struct drbd_peer_device *peer_device, union drbd_state state)
990 struct drbd_socket *sock;
993 sock = &peer_device->connection->data;
994 p = drbd_prepare_command(peer_device, sock);
997 p->state = cpu_to_be32(state.i); /* Within the send mutex */
998 return drbd_send_command(peer_device, sock, P_STATE, sizeof(*p), NULL, 0);
1001 int drbd_send_state_req(struct drbd_peer_device *peer_device, union drbd_state mask, union drbd_state val)
1003 struct drbd_socket *sock;
1004 struct p_req_state *p;
1006 sock = &peer_device->connection->data;
1007 p = drbd_prepare_command(peer_device, sock);
1010 p->mask = cpu_to_be32(mask.i);
1011 p->val = cpu_to_be32(val.i);
1012 return drbd_send_command(peer_device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1015 int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
1017 enum drbd_packet cmd;
1018 struct drbd_socket *sock;
1019 struct p_req_state *p;
1021 cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1022 sock = &connection->data;
1023 p = conn_prepare_command(connection, sock);
1026 p->mask = cpu_to_be32(mask.i);
1027 p->val = cpu_to_be32(val.i);
1028 return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1031 void drbd_send_sr_reply(struct drbd_peer_device *peer_device, enum drbd_state_rv retcode)
1033 struct drbd_socket *sock;
1034 struct p_req_state_reply *p;
1036 sock = &peer_device->connection->meta;
1037 p = drbd_prepare_command(peer_device, sock);
1039 p->retcode = cpu_to_be32(retcode);
1040 drbd_send_command(peer_device, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1044 void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
1046 struct drbd_socket *sock;
1047 struct p_req_state_reply *p;
1048 enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1050 sock = &connection->meta;
1051 p = conn_prepare_command(connection, sock);
1053 p->retcode = cpu_to_be32(retcode);
1054 conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
1058 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1060 BUG_ON(code & ~0xf);
1061 p->encoding = (p->encoding & ~0xf) | code;
1064 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1066 p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1069 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1072 p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1075 static int fill_bitmap_rle_bits(struct drbd_device *device,
1076 struct p_compressed_bm *p,
1078 struct bm_xfer_ctx *c)
1080 struct bitstream bs;
1081 unsigned long plain_bits;
1088 /* may we use this feature? */
1090 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle;
1092 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90)
1095 if (c->bit_offset >= c->bm_bits)
1096 return 0; /* nothing to do. */
1098 /* use at most thus many bytes */
1099 bitstream_init(&bs, p->code, size, 0);
1100 memset(p->code, 0, size);
1101 /* plain bits covered in this code string */
1104 /* p->encoding & 0x80 stores whether the first run length is set.
1105 * bit offset is implicit.
1106 * start with toggle == 2 to be able to tell the first iteration */
1109 /* see how much plain bits we can stuff into one packet
1110 * using RLE and VLI. */
1112 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset)
1113 : _drbd_bm_find_next(device, c->bit_offset);
1116 rl = tmp - c->bit_offset;
1118 if (toggle == 2) { /* first iteration */
1120 /* the first checked bit was set,
1121 * store start value, */
1122 dcbp_set_start(p, 1);
1123 /* but skip encoding of zero run length */
1127 dcbp_set_start(p, 0);
1130 /* paranoia: catch zero runlength.
1131 * can only happen if bitmap is modified while we scan it. */
1133 drbd_err(device, "unexpected zero runlength while encoding bitmap "
1134 "t:%u bo:%lu\n", toggle, c->bit_offset);
1138 bits = vli_encode_bits(&bs, rl);
1139 if (bits == -ENOBUFS) /* buffer full */
1142 drbd_err(device, "error while encoding bitmap: %d\n", bits);
1148 c->bit_offset = tmp;
1149 } while (c->bit_offset < c->bm_bits);
1151 len = bs.cur.b - p->code + !!bs.cur.bit;
1153 if (plain_bits < (len << 3)) {
1154 /* incompressible with this method.
1155 * we need to rewind both word and bit position. */
1156 c->bit_offset -= plain_bits;
1157 bm_xfer_ctx_bit_to_word_offset(c);
1158 c->bit_offset = c->word_offset * BITS_PER_LONG;
1162 /* RLE + VLI was able to compress it just fine.
1163 * update c->word_offset. */
1164 bm_xfer_ctx_bit_to_word_offset(c);
1166 /* store pad_bits */
1167 dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1173 * send_bitmap_rle_or_plain
1175 * Return 0 when done, 1 when another iteration is needed, and a negative error
1176 * code upon failure.
1179 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
1181 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1182 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection);
1183 struct p_compressed_bm *p = sock->sbuf + header_size;
1186 len = fill_bitmap_rle_bits(device, p,
1187 DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1192 dcbp_set_code(p, RLE_VLI_Bits);
1193 err = __send_command(first_peer_device(device)->connection, device->vnr, sock,
1194 P_COMPRESSED_BITMAP, sizeof(*p) + len,
1197 c->bytes[0] += header_size + sizeof(*p) + len;
1199 if (c->bit_offset >= c->bm_bits)
1202 /* was not compressible.
1203 * send a buffer full of plain text bits instead. */
1204 unsigned int data_size;
1205 unsigned long num_words;
1206 unsigned long *p = sock->sbuf + header_size;
1208 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1209 num_words = min_t(size_t, data_size / sizeof(*p),
1210 c->bm_words - c->word_offset);
1211 len = num_words * sizeof(*p);
1213 drbd_bm_get_lel(device, c->word_offset, num_words, p);
1214 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
1215 c->word_offset += num_words;
1216 c->bit_offset = c->word_offset * BITS_PER_LONG;
1219 c->bytes[1] += header_size + len;
1221 if (c->bit_offset > c->bm_bits)
1222 c->bit_offset = c->bm_bits;
1226 INFO_bm_xfer_stats(device, "send", c);
1234 /* See the comment at receive_bitmap() */
1235 static int _drbd_send_bitmap(struct drbd_device *device)
1237 struct bm_xfer_ctx c;
1240 if (!expect(device->bitmap))
1243 if (get_ldev(device)) {
1244 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) {
1245 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n");
1246 drbd_bm_set_all(device);
1247 if (drbd_bm_write(device)) {
1248 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1249 * but otherwise process as per normal - need to tell other
1250 * side that a full resync is required! */
1251 drbd_err(device, "Failed to write bitmap to disk!\n");
1253 drbd_md_clear_flag(device, MDF_FULL_SYNC);
1254 drbd_md_sync(device);
1260 c = (struct bm_xfer_ctx) {
1261 .bm_bits = drbd_bm_bits(device),
1262 .bm_words = drbd_bm_words(device),
1266 err = send_bitmap_rle_or_plain(device, &c);
1272 int drbd_send_bitmap(struct drbd_device *device)
1274 struct drbd_socket *sock = &first_peer_device(device)->connection->data;
1277 mutex_lock(&sock->mutex);
1279 err = !_drbd_send_bitmap(device);
1280 mutex_unlock(&sock->mutex);
1284 void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
1286 struct drbd_socket *sock;
1287 struct p_barrier_ack *p;
1289 if (connection->cstate < C_WF_REPORT_PARAMS)
1292 sock = &connection->meta;
1293 p = conn_prepare_command(connection, sock);
1296 p->barrier = barrier_nr;
1297 p->set_size = cpu_to_be32(set_size);
1298 conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1302 * _drbd_send_ack() - Sends an ack packet
1303 * @device: DRBD device.
1304 * @cmd: Packet command code.
1305 * @sector: sector, needs to be in big endian byte order
1306 * @blksize: size in byte, needs to be in big endian byte order
1307 * @block_id: Id, big endian byte order
1309 static int _drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1310 u64 sector, u32 blksize, u64 block_id)
1312 struct drbd_socket *sock;
1313 struct p_block_ack *p;
1315 if (peer_device->device->state.conn < C_CONNECTED)
1318 sock = &peer_device->connection->meta;
1319 p = drbd_prepare_command(peer_device, sock);
1323 p->block_id = block_id;
1324 p->blksize = blksize;
1325 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq));
1326 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1329 /* dp->sector and dp->block_id already/still in network byte order,
1330 * data_size is payload size according to dp->head,
1331 * and may need to be corrected for digest size. */
1332 void drbd_send_ack_dp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1333 struct p_data *dp, int data_size)
1335 if (peer_device->connection->peer_integrity_tfm)
1336 data_size -= crypto_hash_digestsize(peer_device->connection->peer_integrity_tfm);
1337 _drbd_send_ack(peer_device, cmd, dp->sector, cpu_to_be32(data_size),
1341 void drbd_send_ack_rp(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1342 struct p_block_req *rp)
1344 _drbd_send_ack(peer_device, cmd, rp->sector, rp->blksize, rp->block_id);
1348 * drbd_send_ack() - Sends an ack packet
1349 * @device: DRBD device
1350 * @cmd: packet command code
1351 * @peer_req: peer request
1353 int drbd_send_ack(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1354 struct drbd_peer_request *peer_req)
1356 return _drbd_send_ack(peer_device, cmd,
1357 cpu_to_be64(peer_req->i.sector),
1358 cpu_to_be32(peer_req->i.size),
1359 peer_req->block_id);
1362 /* This function misuses the block_id field to signal if the blocks
1363 * are is sync or not. */
1364 int drbd_send_ack_ex(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1365 sector_t sector, int blksize, u64 block_id)
1367 return _drbd_send_ack(peer_device, cmd,
1368 cpu_to_be64(sector),
1369 cpu_to_be32(blksize),
1370 cpu_to_be64(block_id));
1373 int drbd_send_drequest(struct drbd_peer_device *peer_device, int cmd,
1374 sector_t sector, int size, u64 block_id)
1376 struct drbd_socket *sock;
1377 struct p_block_req *p;
1379 sock = &peer_device->connection->data;
1380 p = drbd_prepare_command(peer_device, sock);
1383 p->sector = cpu_to_be64(sector);
1384 p->block_id = block_id;
1385 p->blksize = cpu_to_be32(size);
1386 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), NULL, 0);
1389 int drbd_send_drequest_csum(struct drbd_peer_device *peer_device, sector_t sector, int size,
1390 void *digest, int digest_size, enum drbd_packet cmd)
1392 struct drbd_socket *sock;
1393 struct p_block_req *p;
1395 /* FIXME: Put the digest into the preallocated socket buffer. */
1397 sock = &peer_device->connection->data;
1398 p = drbd_prepare_command(peer_device, sock);
1401 p->sector = cpu_to_be64(sector);
1402 p->block_id = ID_SYNCER /* unused */;
1403 p->blksize = cpu_to_be32(size);
1404 return drbd_send_command(peer_device, sock, cmd, sizeof(*p), digest, digest_size);
1407 int drbd_send_ov_request(struct drbd_peer_device *peer_device, sector_t sector, int size)
1409 struct drbd_socket *sock;
1410 struct p_block_req *p;
1412 sock = &peer_device->connection->data;
1413 p = drbd_prepare_command(peer_device, sock);
1416 p->sector = cpu_to_be64(sector);
1417 p->block_id = ID_SYNCER /* unused */;
1418 p->blksize = cpu_to_be32(size);
1419 return drbd_send_command(peer_device, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1422 /* called on sndtimeo
1423 * returns false if we should retry,
1424 * true if we think connection is dead
1426 static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
1429 /* long elapsed = (long)(jiffies - device->last_received); */
1431 drop_it = connection->meta.socket == sock
1432 || !connection->asender.task
1433 || get_t_state(&connection->asender) != RUNNING
1434 || connection->cstate < C_WF_REPORT_PARAMS;
1439 drop_it = !--connection->ko_count;
1441 drbd_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1442 current->comm, current->pid, connection->ko_count);
1443 request_ping(connection);
1446 return drop_it; /* && (device->state == R_PRIMARY) */;
1449 static void drbd_update_congested(struct drbd_connection *connection)
1451 struct sock *sk = connection->data.socket->sk;
1452 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1453 set_bit(NET_CONGESTED, &connection->flags);
1456 /* The idea of sendpage seems to be to put some kind of reference
1457 * to the page into the skb, and to hand it over to the NIC. In
1458 * this process get_page() gets called.
1460 * As soon as the page was really sent over the network put_page()
1461 * gets called by some part of the network layer. [ NIC driver? ]
1463 * [ get_page() / put_page() increment/decrement the count. If count
1464 * reaches 0 the page will be freed. ]
1466 * This works nicely with pages from FSs.
1467 * But this means that in protocol A we might signal IO completion too early!
1469 * In order not to corrupt data during a resync we must make sure
1470 * that we do not reuse our own buffer pages (EEs) to early, therefore
1471 * we have the net_ee list.
1473 * XFS seems to have problems, still, it submits pages with page_count == 0!
1474 * As a workaround, we disable sendpage on pages
1475 * with page_count == 0 or PageSlab.
1477 static int _drbd_no_send_page(struct drbd_peer_device *peer_device, struct page *page,
1478 int offset, size_t size, unsigned msg_flags)
1480 struct socket *socket;
1484 socket = peer_device->connection->data.socket;
1485 addr = kmap(page) + offset;
1486 err = drbd_send_all(peer_device->connection, socket, addr, size, msg_flags);
1489 peer_device->device->send_cnt += size >> 9;
1493 static int _drbd_send_page(struct drbd_peer_device *peer_device, struct page *page,
1494 int offset, size_t size, unsigned msg_flags)
1496 struct socket *socket = peer_device->connection->data.socket;
1497 mm_segment_t oldfs = get_fs();
1501 /* e.g. XFS meta- & log-data is in slab pages, which have a
1502 * page_count of 0 and/or have PageSlab() set.
1503 * we cannot use send_page for those, as that does get_page();
1504 * put_page(); and would cause either a VM_BUG directly, or
1505 * __page_cache_release a page that would actually still be referenced
1506 * by someone, leading to some obscure delayed Oops somewhere else. */
1507 if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1508 return _drbd_no_send_page(peer_device, page, offset, size, msg_flags);
1510 msg_flags |= MSG_NOSIGNAL;
1511 drbd_update_congested(peer_device->connection);
1516 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1518 if (sent == -EAGAIN) {
1519 if (we_should_drop_the_connection(peer_device->connection, socket))
1523 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n",
1524 __func__, (int)size, len, sent);
1531 } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
1533 clear_bit(NET_CONGESTED, &peer_device->connection->flags);
1537 peer_device->device->send_cnt += size >> 9;
1542 static int _drbd_send_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1544 struct bio_vec bvec;
1545 struct bvec_iter iter;
1547 /* hint all but last page with MSG_MORE */
1548 bio_for_each_segment(bvec, bio, iter) {
1551 err = _drbd_no_send_page(peer_device, bvec.bv_page,
1552 bvec.bv_offset, bvec.bv_len,
1553 bio_iter_last(bvec, iter)
1561 static int _drbd_send_zc_bio(struct drbd_peer_device *peer_device, struct bio *bio)
1563 struct bio_vec bvec;
1564 struct bvec_iter iter;
1566 /* hint all but last page with MSG_MORE */
1567 bio_for_each_segment(bvec, bio, iter) {
1570 err = _drbd_send_page(peer_device, bvec.bv_page,
1571 bvec.bv_offset, bvec.bv_len,
1572 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1579 static int _drbd_send_zc_ee(struct drbd_peer_device *peer_device,
1580 struct drbd_peer_request *peer_req)
1582 struct page *page = peer_req->pages;
1583 unsigned len = peer_req->i.size;
1586 /* hint all but last page with MSG_MORE */
1587 page_chain_for_each(page) {
1588 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1590 err = _drbd_send_page(peer_device, page, 0, l,
1591 page_chain_next(page) ? MSG_MORE : 0);
1599 static u32 bio_flags_to_wire(struct drbd_connection *connection, unsigned long bi_rw)
1601 if (connection->agreed_pro_version >= 95)
1602 return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1603 (bi_rw & REQ_FUA ? DP_FUA : 0) |
1604 (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1605 (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1607 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1610 /* Used to send write or TRIM aka REQ_DISCARD requests
1611 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1613 int drbd_send_dblock(struct drbd_peer_device *peer_device, struct drbd_request *req)
1615 struct drbd_device *device = peer_device->device;
1616 struct drbd_socket *sock;
1618 unsigned int dp_flags = 0;
1622 sock = &peer_device->connection->data;
1623 p = drbd_prepare_command(peer_device, sock);
1624 dgs = peer_device->connection->integrity_tfm ?
1625 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
1629 p->sector = cpu_to_be64(req->i.sector);
1630 p->block_id = (unsigned long)req;
1631 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq));
1632 dp_flags = bio_flags_to_wire(peer_device->connection, req->master_bio->bi_rw);
1633 if (device->state.conn >= C_SYNC_SOURCE &&
1634 device->state.conn <= C_PAUSED_SYNC_T)
1635 dp_flags |= DP_MAY_SET_IN_SYNC;
1636 if (peer_device->connection->agreed_pro_version >= 100) {
1637 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1638 dp_flags |= DP_SEND_RECEIVE_ACK;
1639 if (req->rq_state & RQ_EXP_WRITE_ACK)
1640 dp_flags |= DP_SEND_WRITE_ACK;
1642 p->dp_flags = cpu_to_be32(dp_flags);
1644 if (dp_flags & DP_DISCARD) {
1645 struct p_trim *t = (struct p_trim*)p;
1646 t->size = cpu_to_be32(req->i.size);
1647 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0);
1651 /* our digest is still only over the payload.
1652 * TRIM does not carry any payload. */
1654 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, p + 1);
1655 err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
1657 /* For protocol A, we have to memcpy the payload into
1658 * socket buffers, as we may complete right away
1659 * as soon as we handed it over to tcp, at which point the data
1660 * pages may become invalid.
1662 * For data-integrity enabled, we copy it as well, so we can be
1663 * sure that even if the bio pages may still be modified, it
1664 * won't change the data on the wire, thus if the digest checks
1665 * out ok after sending on this side, but does not fit on the
1666 * receiving side, we sure have detected corruption elsewhere.
1668 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
1669 err = _drbd_send_bio(peer_device, req->master_bio);
1671 err = _drbd_send_zc_bio(peer_device, req->master_bio);
1673 /* double check digest, sometimes buffers have been modified in flight. */
1674 if (dgs > 0 && dgs <= 64) {
1675 /* 64 byte, 512 bit, is the largest digest size
1676 * currently supported in kernel crypto. */
1677 unsigned char digest[64];
1678 drbd_csum_bio(peer_device->connection->integrity_tfm, req->master_bio, digest);
1679 if (memcmp(p + 1, digest, dgs)) {
1681 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1682 (unsigned long long)req->i.sector, req->i.size);
1684 } /* else if (dgs > 64) {
1685 ... Be noisy about digest too large ...
1689 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1694 /* answer packet, used to send data back for read requests:
1695 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1696 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1698 int drbd_send_block(struct drbd_peer_device *peer_device, enum drbd_packet cmd,
1699 struct drbd_peer_request *peer_req)
1701 struct drbd_device *device = peer_device->device;
1702 struct drbd_socket *sock;
1707 sock = &peer_device->connection->data;
1708 p = drbd_prepare_command(peer_device, sock);
1710 dgs = peer_device->connection->integrity_tfm ?
1711 crypto_hash_digestsize(peer_device->connection->integrity_tfm) : 0;
1715 p->sector = cpu_to_be64(peer_req->i.sector);
1716 p->block_id = peer_req->block_id;
1717 p->seq_num = 0; /* unused */
1720 drbd_csum_ee(peer_device->connection->integrity_tfm, peer_req, p + 1);
1721 err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
1723 err = _drbd_send_zc_ee(peer_device, peer_req);
1724 mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
1729 int drbd_send_out_of_sync(struct drbd_peer_device *peer_device, struct drbd_request *req)
1731 struct drbd_socket *sock;
1732 struct p_block_desc *p;
1734 sock = &peer_device->connection->data;
1735 p = drbd_prepare_command(peer_device, sock);
1738 p->sector = cpu_to_be64(req->i.sector);
1739 p->blksize = cpu_to_be32(req->i.size);
1740 return drbd_send_command(peer_device, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1744 drbd_send distinguishes two cases:
1746 Packets sent via the data socket "sock"
1747 and packets sent via the meta data socket "msock"
1750 -----------------+-------------------------+------------------------------
1751 timeout conf.timeout / 2 conf.timeout / 2
1752 timeout action send a ping via msock Abort communication
1753 and close all sockets
1757 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1759 int drbd_send(struct drbd_connection *connection, struct socket *sock,
1760 void *buf, size_t size, unsigned msg_flags)
1769 /* THINK if (signal_pending) return ... ? */
1774 msg.msg_name = NULL;
1775 msg.msg_namelen = 0;
1776 msg.msg_control = NULL;
1777 msg.msg_controllen = 0;
1778 msg.msg_flags = msg_flags | MSG_NOSIGNAL;
1780 if (sock == connection->data.socket) {
1782 connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
1784 drbd_update_congested(connection);
1788 * tcp_sendmsg does _not_ use its size parameter at all ?
1790 * -EAGAIN on timeout, -EINTR on signal.
1793 * do we need to block DRBD_SIG if sock == &meta.socket ??
1794 * otherwise wake_asender() might interrupt some send_*Ack !
1796 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1797 if (rv == -EAGAIN) {
1798 if (we_should_drop_the_connection(connection, sock))
1804 flush_signals(current);
1812 } while (sent < size);
1814 if (sock == connection->data.socket)
1815 clear_bit(NET_CONGESTED, &connection->flags);
1818 if (rv != -EAGAIN) {
1819 drbd_err(connection, "%s_sendmsg returned %d\n",
1820 sock == connection->meta.socket ? "msock" : "sock",
1822 conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
1824 conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
1831 * drbd_send_all - Send an entire buffer
1833 * Returns 0 upon success and a negative error value otherwise.
1835 int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
1836 size_t size, unsigned msg_flags)
1840 err = drbd_send(connection, sock, buffer, size, msg_flags);
1848 static int drbd_open(struct block_device *bdev, fmode_t mode)
1850 struct drbd_device *device = bdev->bd_disk->private_data;
1851 unsigned long flags;
1854 mutex_lock(&drbd_main_mutex);
1855 spin_lock_irqsave(&device->resource->req_lock, flags);
1856 /* to have a stable device->state.role
1857 * and no race with updating open_cnt */
1859 if (device->state.role != R_PRIMARY) {
1860 if (mode & FMODE_WRITE)
1862 else if (!allow_oos)
1868 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1869 mutex_unlock(&drbd_main_mutex);
1874 static void drbd_release(struct gendisk *gd, fmode_t mode)
1876 struct drbd_device *device = gd->private_data;
1877 mutex_lock(&drbd_main_mutex);
1879 mutex_unlock(&drbd_main_mutex);
1882 static void drbd_set_defaults(struct drbd_device *device)
1884 /* Beware! The actual layout differs
1885 * between big endian and little endian */
1886 device->state = (union drbd_dev_state) {
1887 { .role = R_SECONDARY,
1889 .conn = C_STANDALONE,
1895 void drbd_init_set_defaults(struct drbd_device *device)
1897 /* the memset(,0,) did most of this.
1898 * note: only assignments, no allocation in here */
1900 drbd_set_defaults(device);
1902 atomic_set(&device->ap_bio_cnt, 0);
1903 atomic_set(&device->ap_pending_cnt, 0);
1904 atomic_set(&device->rs_pending_cnt, 0);
1905 atomic_set(&device->unacked_cnt, 0);
1906 atomic_set(&device->local_cnt, 0);
1907 atomic_set(&device->pp_in_use_by_net, 0);
1908 atomic_set(&device->rs_sect_in, 0);
1909 atomic_set(&device->rs_sect_ev, 0);
1910 atomic_set(&device->ap_in_flight, 0);
1911 atomic_set(&device->md_io_in_use, 0);
1913 mutex_init(&device->own_state_mutex);
1914 device->state_mutex = &device->own_state_mutex;
1916 spin_lock_init(&device->al_lock);
1917 spin_lock_init(&device->peer_seq_lock);
1919 INIT_LIST_HEAD(&device->active_ee);
1920 INIT_LIST_HEAD(&device->sync_ee);
1921 INIT_LIST_HEAD(&device->done_ee);
1922 INIT_LIST_HEAD(&device->read_ee);
1923 INIT_LIST_HEAD(&device->net_ee);
1924 INIT_LIST_HEAD(&device->resync_reads);
1925 INIT_LIST_HEAD(&device->resync_work.list);
1926 INIT_LIST_HEAD(&device->unplug_work.list);
1927 INIT_LIST_HEAD(&device->go_diskless.list);
1928 INIT_LIST_HEAD(&device->md_sync_work.list);
1929 INIT_LIST_HEAD(&device->start_resync_work.list);
1930 INIT_LIST_HEAD(&device->bm_io_work.w.list);
1932 device->resync_work.cb = w_resync_timer;
1933 device->unplug_work.cb = w_send_write_hint;
1934 device->go_diskless.cb = w_go_diskless;
1935 device->md_sync_work.cb = w_md_sync;
1936 device->bm_io_work.w.cb = w_bitmap_io;
1937 device->start_resync_work.cb = w_start_resync;
1939 init_timer(&device->resync_timer);
1940 init_timer(&device->md_sync_timer);
1941 init_timer(&device->start_resync_timer);
1942 init_timer(&device->request_timer);
1943 device->resync_timer.function = resync_timer_fn;
1944 device->resync_timer.data = (unsigned long) device;
1945 device->md_sync_timer.function = md_sync_timer_fn;
1946 device->md_sync_timer.data = (unsigned long) device;
1947 device->start_resync_timer.function = start_resync_timer_fn;
1948 device->start_resync_timer.data = (unsigned long) device;
1949 device->request_timer.function = request_timer_fn;
1950 device->request_timer.data = (unsigned long) device;
1952 init_waitqueue_head(&device->misc_wait);
1953 init_waitqueue_head(&device->state_wait);
1954 init_waitqueue_head(&device->ee_wait);
1955 init_waitqueue_head(&device->al_wait);
1956 init_waitqueue_head(&device->seq_wait);
1958 device->resync_wenr = LC_FREE;
1959 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1960 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
1963 void drbd_device_cleanup(struct drbd_device *device)
1966 if (first_peer_device(device)->connection->receiver.t_state != NONE)
1967 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
1968 first_peer_device(device)->connection->receiver.t_state);
1970 device->al_writ_cnt =
1971 device->bm_writ_cnt =
1979 device->rs_failed = 0;
1980 device->rs_last_events = 0;
1981 device->rs_last_sect_ev = 0;
1982 for (i = 0; i < DRBD_SYNC_MARKS; i++) {
1983 device->rs_mark_left[i] = 0;
1984 device->rs_mark_time[i] = 0;
1986 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL);
1988 drbd_set_my_capacity(device, 0);
1989 if (device->bitmap) {
1990 /* maybe never allocated. */
1991 drbd_bm_resize(device, 0, 1);
1992 drbd_bm_cleanup(device);
1995 drbd_free_bc(device->ldev);
1996 device->ldev = NULL;
1998 clear_bit(AL_SUSPENDED, &device->flags);
2000 D_ASSERT(device, list_empty(&device->active_ee));
2001 D_ASSERT(device, list_empty(&device->sync_ee));
2002 D_ASSERT(device, list_empty(&device->done_ee));
2003 D_ASSERT(device, list_empty(&device->read_ee));
2004 D_ASSERT(device, list_empty(&device->net_ee));
2005 D_ASSERT(device, list_empty(&device->resync_reads));
2006 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q));
2007 D_ASSERT(device, list_empty(&device->resync_work.list));
2008 D_ASSERT(device, list_empty(&device->unplug_work.list));
2009 D_ASSERT(device, list_empty(&device->go_diskless.list));
2011 drbd_set_defaults(device);
2015 static void drbd_destroy_mempools(void)
2019 while (drbd_pp_pool) {
2020 page = drbd_pp_pool;
2021 drbd_pp_pool = (struct page *)page_private(page);
2026 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2028 if (drbd_md_io_bio_set)
2029 bioset_free(drbd_md_io_bio_set);
2030 if (drbd_md_io_page_pool)
2031 mempool_destroy(drbd_md_io_page_pool);
2032 if (drbd_ee_mempool)
2033 mempool_destroy(drbd_ee_mempool);
2034 if (drbd_request_mempool)
2035 mempool_destroy(drbd_request_mempool);
2037 kmem_cache_destroy(drbd_ee_cache);
2038 if (drbd_request_cache)
2039 kmem_cache_destroy(drbd_request_cache);
2040 if (drbd_bm_ext_cache)
2041 kmem_cache_destroy(drbd_bm_ext_cache);
2042 if (drbd_al_ext_cache)
2043 kmem_cache_destroy(drbd_al_ext_cache);
2045 drbd_md_io_bio_set = NULL;
2046 drbd_md_io_page_pool = NULL;
2047 drbd_ee_mempool = NULL;
2048 drbd_request_mempool = NULL;
2049 drbd_ee_cache = NULL;
2050 drbd_request_cache = NULL;
2051 drbd_bm_ext_cache = NULL;
2052 drbd_al_ext_cache = NULL;
2057 static int drbd_create_mempools(void)
2060 const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2063 /* prepare our caches and mempools */
2064 drbd_request_mempool = NULL;
2065 drbd_ee_cache = NULL;
2066 drbd_request_cache = NULL;
2067 drbd_bm_ext_cache = NULL;
2068 drbd_al_ext_cache = NULL;
2069 drbd_pp_pool = NULL;
2070 drbd_md_io_page_pool = NULL;
2071 drbd_md_io_bio_set = NULL;
2074 drbd_request_cache = kmem_cache_create(
2075 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2076 if (drbd_request_cache == NULL)
2079 drbd_ee_cache = kmem_cache_create(
2080 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2081 if (drbd_ee_cache == NULL)
2084 drbd_bm_ext_cache = kmem_cache_create(
2085 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2086 if (drbd_bm_ext_cache == NULL)
2089 drbd_al_ext_cache = kmem_cache_create(
2090 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2091 if (drbd_al_ext_cache == NULL)
2095 drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2096 if (drbd_md_io_bio_set == NULL)
2099 drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2100 if (drbd_md_io_page_pool == NULL)
2103 drbd_request_mempool = mempool_create(number,
2104 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2105 if (drbd_request_mempool == NULL)
2108 drbd_ee_mempool = mempool_create(number,
2109 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2110 if (drbd_ee_mempool == NULL)
2113 /* drbd's page pool */
2114 spin_lock_init(&drbd_pp_lock);
2116 for (i = 0; i < number; i++) {
2117 page = alloc_page(GFP_HIGHUSER);
2120 set_page_private(page, (unsigned long)drbd_pp_pool);
2121 drbd_pp_pool = page;
2123 drbd_pp_vacant = number;
2128 drbd_destroy_mempools(); /* in case we allocated some */
2132 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2135 /* just so we have it. you never know what interesting things we
2136 * might want to do here some day...
2142 static struct notifier_block drbd_notifier = {
2143 .notifier_call = drbd_notify_sys,
2146 static void drbd_release_all_peer_reqs(struct drbd_device *device)
2150 rr = drbd_free_peer_reqs(device, &device->active_ee);
2152 drbd_err(device, "%d EEs in active list found!\n", rr);
2154 rr = drbd_free_peer_reqs(device, &device->sync_ee);
2156 drbd_err(device, "%d EEs in sync list found!\n", rr);
2158 rr = drbd_free_peer_reqs(device, &device->read_ee);
2160 drbd_err(device, "%d EEs in read list found!\n", rr);
2162 rr = drbd_free_peer_reqs(device, &device->done_ee);
2164 drbd_err(device, "%d EEs in done list found!\n", rr);
2166 rr = drbd_free_peer_reqs(device, &device->net_ee);
2168 drbd_err(device, "%d EEs in net list found!\n", rr);
2171 /* caution. no locking. */
2172 void drbd_destroy_device(struct kref *kref)
2174 struct drbd_device *device = container_of(kref, struct drbd_device, kref);
2175 struct drbd_resource *resource = device->resource;
2176 struct drbd_connection *connection;
2178 del_timer_sync(&device->request_timer);
2180 /* paranoia asserts */
2181 D_ASSERT(device, device->open_cnt == 0);
2182 /* end paranoia asserts */
2184 /* cleanup stuff that may have been allocated during
2185 * device (re-)configuration or state changes */
2187 if (device->this_bdev)
2188 bdput(device->this_bdev);
2190 drbd_free_bc(device->ldev);
2191 device->ldev = NULL;
2193 drbd_release_all_peer_reqs(device);
2195 lc_destroy(device->act_log);
2196 lc_destroy(device->resync);
2198 kfree(device->p_uuid);
2199 /* device->p_uuid = NULL; */
2201 if (device->bitmap) /* should no longer be there. */
2202 drbd_bm_cleanup(device);
2203 __free_page(device->md_io_page);
2204 put_disk(device->vdisk);
2205 blk_cleanup_queue(device->rq_queue);
2206 kfree(device->rs_plan_s);
2207 kfree(first_peer_device(device));
2210 for_each_connection(connection, resource)
2211 kref_put(&connection->kref, drbd_destroy_connection);
2212 kref_put(&resource->kref, drbd_destroy_resource);
2215 /* One global retry thread, if we need to push back some bio and have it
2216 * reinserted through our make request function.
2218 static struct retry_worker {
2219 struct workqueue_struct *wq;
2220 struct work_struct worker;
2223 struct list_head writes;
2226 static void do_retry(struct work_struct *ws)
2228 struct retry_worker *retry = container_of(ws, struct retry_worker, worker);
2230 struct drbd_request *req, *tmp;
2232 spin_lock_irq(&retry->lock);
2233 list_splice_init(&retry->writes, &writes);
2234 spin_unlock_irq(&retry->lock);
2236 list_for_each_entry_safe(req, tmp, &writes, tl_requests) {
2237 struct drbd_device *device = req->device;
2238 struct bio *bio = req->master_bio;
2239 unsigned long start_time = req->start_time;
2243 expect(atomic_read(&req->completion_ref) == 0) &&
2244 expect(req->rq_state & RQ_POSTPONED) &&
2245 expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
2246 (req->rq_state & RQ_LOCAL_ABORTED) != 0);
2249 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n",
2250 req, atomic_read(&req->completion_ref),
2253 /* We still need to put one kref associated with the
2254 * "completion_ref" going zero in the code path that queued it
2255 * here. The request object may still be referenced by a
2256 * frozen local req->private_bio, in case we force-detached.
2258 kref_put(&req->kref, drbd_req_destroy);
2260 /* A single suspended or otherwise blocking device may stall
2261 * all others as well. Fortunately, this code path is to
2262 * recover from a situation that "should not happen":
2263 * concurrent writes in multi-primary setup.
2264 * In a "normal" lifecycle, this workqueue is supposed to be
2265 * destroyed without ever doing anything.
2266 * If it turns out to be an issue anyways, we can do per
2267 * resource (replication group) or per device (minor) retry
2268 * workqueues instead.
2271 /* We are not just doing generic_make_request(),
2272 * as we want to keep the start_time information. */
2274 __drbd_make_request(device, bio, start_time);
2278 void drbd_restart_request(struct drbd_request *req)
2280 unsigned long flags;
2281 spin_lock_irqsave(&retry.lock, flags);
2282 list_move_tail(&req->tl_requests, &retry.writes);
2283 spin_unlock_irqrestore(&retry.lock, flags);
2285 /* Drop the extra reference that would otherwise
2286 * have been dropped by complete_master_bio.
2287 * do_retry() needs to grab a new one. */
2288 dec_ap_bio(req->device);
2290 queue_work(retry.wq, &retry.worker);
2293 void drbd_destroy_resource(struct kref *kref)
2295 struct drbd_resource *resource =
2296 container_of(kref, struct drbd_resource, kref);
2298 idr_destroy(&resource->devices);
2299 free_cpumask_var(resource->cpu_mask);
2300 kfree(resource->name);
2304 void drbd_free_resource(struct drbd_resource *resource)
2306 struct drbd_connection *connection, *tmp;
2308 for_each_connection_safe(connection, tmp, resource) {
2309 list_del(&connection->connections);
2310 kref_put(&connection->kref, drbd_destroy_connection);
2312 kref_put(&resource->kref, drbd_destroy_resource);
2315 static void drbd_cleanup(void)
2318 struct drbd_device *device;
2319 struct drbd_resource *resource, *tmp;
2321 unregister_reboot_notifier(&drbd_notifier);
2323 /* first remove proc,
2324 * drbdsetup uses it's presence to detect
2325 * whether DRBD is loaded.
2326 * If we would get stuck in proc removal,
2327 * but have netlink already deregistered,
2328 * some drbdsetup commands may wait forever
2332 remove_proc_entry("drbd", NULL);
2335 destroy_workqueue(retry.wq);
2337 drbd_genl_unregister();
2339 idr_for_each_entry(&drbd_devices, device, i)
2340 drbd_delete_device(device);
2342 /* not _rcu since, no other updater anymore. Genl already unregistered */
2343 for_each_resource_safe(resource, tmp, &drbd_resources) {
2344 list_del(&resource->resources);
2345 drbd_free_resource(resource);
2348 drbd_destroy_mempools();
2349 unregister_blkdev(DRBD_MAJOR, "drbd");
2351 idr_destroy(&drbd_devices);
2353 printk(KERN_INFO "drbd: module cleanup done.\n");
2357 * drbd_congested() - Callback for the flusher thread
2358 * @congested_data: User data
2359 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2361 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2363 static int drbd_congested(void *congested_data, int bdi_bits)
2365 struct drbd_device *device = congested_data;
2366 struct request_queue *q;
2370 if (!may_inc_ap_bio(device)) {
2371 /* DRBD has frozen IO */
2377 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) {
2378 r |= (1 << BDI_async_congested);
2379 /* Without good local data, we would need to read from remote,
2380 * and that would need the worker thread as well, which is
2381 * currently blocked waiting for that usermode helper to
2384 if (!get_ldev_if_state(device, D_UP_TO_DATE))
2385 r |= (1 << BDI_sync_congested);
2393 if (get_ldev(device)) {
2394 q = bdev_get_queue(device->ldev->backing_bdev);
2395 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2401 if (bdi_bits & (1 << BDI_async_congested) &&
2402 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) {
2403 r |= (1 << BDI_async_congested);
2404 reason = reason == 'b' ? 'a' : 'n';
2408 device->congestion_reason = reason;
2412 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2414 spin_lock_init(&wq->q_lock);
2415 INIT_LIST_HEAD(&wq->q);
2416 init_waitqueue_head(&wq->q_wait);
2419 struct completion_work {
2421 struct completion done;
2424 static int w_complete(struct drbd_work *w, int cancel)
2426 struct completion_work *completion_work =
2427 container_of(w, struct completion_work, w);
2429 complete(&completion_work->done);
2433 void drbd_flush_workqueue(struct drbd_work_queue *work_queue)
2435 struct completion_work completion_work;
2437 completion_work.w.cb = w_complete;
2438 init_completion(&completion_work.done);
2439 drbd_queue_work(work_queue, &completion_work.w);
2440 wait_for_completion(&completion_work.done);
2443 struct drbd_resource *drbd_find_resource(const char *name)
2445 struct drbd_resource *resource;
2447 if (!name || !name[0])
2451 for_each_resource_rcu(resource, &drbd_resources) {
2452 if (!strcmp(resource->name, name)) {
2453 kref_get(&resource->kref);
2463 struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
2464 void *peer_addr, int peer_addr_len)
2466 struct drbd_resource *resource;
2467 struct drbd_connection *connection;
2470 for_each_resource_rcu(resource, &drbd_resources) {
2471 for_each_connection_rcu(connection, resource) {
2472 if (connection->my_addr_len == my_addr_len &&
2473 connection->peer_addr_len == peer_addr_len &&
2474 !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
2475 !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
2476 kref_get(&connection->kref);
2487 static int drbd_alloc_socket(struct drbd_socket *socket)
2489 socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2492 socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2498 static void drbd_free_socket(struct drbd_socket *socket)
2500 free_page((unsigned long) socket->sbuf);
2501 free_page((unsigned long) socket->rbuf);
2504 void conn_free_crypto(struct drbd_connection *connection)
2506 drbd_free_sock(connection);
2508 crypto_free_hash(connection->csums_tfm);
2509 crypto_free_hash(connection->verify_tfm);
2510 crypto_free_hash(connection->cram_hmac_tfm);
2511 crypto_free_hash(connection->integrity_tfm);
2512 crypto_free_hash(connection->peer_integrity_tfm);
2513 kfree(connection->int_dig_in);
2514 kfree(connection->int_dig_vv);
2516 connection->csums_tfm = NULL;
2517 connection->verify_tfm = NULL;
2518 connection->cram_hmac_tfm = NULL;
2519 connection->integrity_tfm = NULL;
2520 connection->peer_integrity_tfm = NULL;
2521 connection->int_dig_in = NULL;
2522 connection->int_dig_vv = NULL;
2525 int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts)
2527 struct drbd_connection *connection;
2528 cpumask_var_t new_cpu_mask;
2531 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL))
2534 retcode = ERR_NOMEM;
2535 drbd_msg_put_info("unable to allocate cpumask");
2538 /* silently ignore cpu mask on UP kernel */
2539 if (nr_cpu_ids > 1 && res_opts->cpu_mask[0] != 0) {
2540 err = bitmap_parse(res_opts->cpu_mask, DRBD_CPU_MASK_SIZE,
2541 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2543 drbd_warn(resource, "bitmap_parse() failed with %d\n", err);
2544 /* retcode = ERR_CPU_MASK_PARSE; */
2548 resource->res_opts = *res_opts;
2549 if (cpumask_empty(new_cpu_mask))
2550 drbd_calc_cpu_mask(&new_cpu_mask);
2551 if (!cpumask_equal(resource->cpu_mask, new_cpu_mask)) {
2552 cpumask_copy(resource->cpu_mask, new_cpu_mask);
2553 for_each_connection_rcu(connection, resource) {
2554 connection->receiver.reset_cpu_mask = 1;
2555 connection->asender.reset_cpu_mask = 1;
2556 connection->worker.reset_cpu_mask = 1;
2562 free_cpumask_var(new_cpu_mask);
2567 struct drbd_resource *drbd_create_resource(const char *name)
2569 struct drbd_resource *resource;
2571 resource = kzalloc(sizeof(struct drbd_resource), GFP_KERNEL);
2574 resource->name = kstrdup(name, GFP_KERNEL);
2575 if (!resource->name)
2576 goto fail_free_resource;
2577 if (!zalloc_cpumask_var(&resource->cpu_mask, GFP_KERNEL))
2578 goto fail_free_name;
2579 kref_init(&resource->kref);
2580 idr_init(&resource->devices);
2581 INIT_LIST_HEAD(&resource->connections);
2582 list_add_tail_rcu(&resource->resources, &drbd_resources);
2583 mutex_init(&resource->conf_update);
2584 mutex_init(&resource->adm_mutex);
2585 spin_lock_init(&resource->req_lock);
2589 kfree(resource->name);
2596 /* caller must be under genl_lock() */
2597 struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
2599 struct drbd_resource *resource;
2600 struct drbd_connection *connection;
2602 connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
2606 if (drbd_alloc_socket(&connection->data))
2608 if (drbd_alloc_socket(&connection->meta))
2611 connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2612 if (!connection->current_epoch)
2615 INIT_LIST_HEAD(&connection->transfer_log);
2617 INIT_LIST_HEAD(&connection->current_epoch->list);
2618 connection->epochs = 1;
2619 spin_lock_init(&connection->epoch_lock);
2620 connection->write_ordering = WO_bdev_flush;
2622 connection->send.seen_any_write_yet = false;
2623 connection->send.current_epoch_nr = 0;
2624 connection->send.current_epoch_writes = 0;
2626 resource = drbd_create_resource(name);
2630 connection->cstate = C_STANDALONE;
2631 mutex_init(&connection->cstate_mutex);
2632 init_waitqueue_head(&connection->ping_wait);
2633 idr_init(&connection->peer_devices);
2635 drbd_init_workqueue(&connection->sender_work);
2636 mutex_init(&connection->data.mutex);
2637 mutex_init(&connection->meta.mutex);
2639 drbd_thread_init(resource, &connection->receiver, drbd_receiver, "receiver");
2640 connection->receiver.connection = connection;
2641 drbd_thread_init(resource, &connection->worker, drbd_worker, "worker");
2642 connection->worker.connection = connection;
2643 drbd_thread_init(resource, &connection->asender, drbd_asender, "asender");
2644 connection->asender.connection = connection;
2646 kref_init(&connection->kref);
2648 connection->resource = resource;
2650 if (set_resource_options(resource, res_opts))
2653 kref_get(&resource->kref);
2654 list_add_tail_rcu(&connection->connections, &resource->connections);
2658 list_del(&resource->resources);
2659 drbd_free_resource(resource);
2661 kfree(connection->current_epoch);
2662 drbd_free_socket(&connection->meta);
2663 drbd_free_socket(&connection->data);
2668 void drbd_destroy_connection(struct kref *kref)
2670 struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
2671 struct drbd_resource *resource = connection->resource;
2673 if (atomic_read(&connection->current_epoch->epoch_size) != 0)
2674 drbd_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
2675 kfree(connection->current_epoch);
2677 idr_destroy(&connection->peer_devices);
2679 drbd_free_socket(&connection->meta);
2680 drbd_free_socket(&connection->data);
2681 kfree(connection->int_dig_in);
2682 kfree(connection->int_dig_vv);
2684 kref_put(&resource->kref, drbd_destroy_resource);
2687 static int init_submitter(struct drbd_device *device)
2689 /* opencoded create_singlethread_workqueue(),
2690 * to be able to say "drbd%d", ..., minor */
2691 device->submit.wq = alloc_workqueue("drbd%u_submit",
2692 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor);
2693 if (!device->submit.wq)
2696 INIT_WORK(&device->submit.worker, do_submit);
2697 spin_lock_init(&device->submit.lock);
2698 INIT_LIST_HEAD(&device->submit.writes);
2702 enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor)
2704 struct drbd_resource *resource = adm_ctx->resource;
2705 struct drbd_connection *connection;
2706 struct drbd_device *device;
2707 struct drbd_peer_device *peer_device, *tmp_peer_device;
2708 struct gendisk *disk;
2709 struct request_queue *q;
2711 int vnr = adm_ctx->volume;
2712 enum drbd_ret_code err = ERR_NOMEM;
2714 device = minor_to_device(minor);
2716 return ERR_MINOR_EXISTS;
2718 /* GFP_KERNEL, we are outside of all write-out paths */
2719 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL);
2722 kref_init(&device->kref);
2724 kref_get(&resource->kref);
2725 device->resource = resource;
2726 device->minor = minor;
2729 drbd_init_set_defaults(device);
2731 q = blk_alloc_queue(GFP_KERNEL);
2734 device->rq_queue = q;
2735 q->queuedata = device;
2737 disk = alloc_disk(1);
2740 device->vdisk = disk;
2742 set_disk_ro(disk, true);
2745 disk->major = DRBD_MAJOR;
2746 disk->first_minor = minor;
2747 disk->fops = &drbd_ops;
2748 sprintf(disk->disk_name, "drbd%d", minor);
2749 disk->private_data = device;
2751 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2752 /* we have no partitions. we contain only ourselves. */
2753 device->this_bdev->bd_contains = device->this_bdev;
2755 q->backing_dev_info.congested_fn = drbd_congested;
2756 q->backing_dev_info.congested_data = device;
2758 blk_queue_make_request(q, drbd_make_request);
2759 blk_queue_flush(q, REQ_FLUSH | REQ_FUA);
2760 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2761 This triggers a max_bio_size message upon first attach or connect */
2762 blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2763 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2764 blk_queue_merge_bvec(q, drbd_merge_bvec);
2765 q->queue_lock = &resource->req_lock;
2767 device->md_io_page = alloc_page(GFP_KERNEL);
2768 if (!device->md_io_page)
2769 goto out_no_io_page;
2771 if (drbd_bm_init(device))
2773 device->read_requests = RB_ROOT;
2774 device->write_requests = RB_ROOT;
2776 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL);
2778 if (id == -ENOSPC) {
2779 err = ERR_MINOR_EXISTS;
2780 drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
2782 goto out_no_minor_idr;
2784 kref_get(&device->kref);
2786 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL);
2788 if (id == -ENOSPC) {
2789 err = ERR_MINOR_EXISTS;
2790 drbd_msg_put_info(adm_ctx->reply_skb, "requested minor exists already");
2792 goto out_idr_remove_minor;
2794 kref_get(&device->kref);
2796 INIT_LIST_HEAD(&device->peer_devices);
2797 for_each_connection(connection, resource) {
2798 peer_device = kzalloc(sizeof(struct drbd_peer_device), GFP_KERNEL);
2800 goto out_idr_remove_from_resource;
2801 peer_device->connection = connection;
2802 peer_device->device = device;
2804 list_add(&peer_device->peer_devices, &device->peer_devices);
2805 kref_get(&device->kref);
2807 id = idr_alloc(&connection->peer_devices, peer_device, vnr, vnr + 1, GFP_KERNEL);
2809 if (id == -ENOSPC) {
2810 err = ERR_INVALID_REQUEST;
2811 drbd_msg_put_info(adm_ctx->reply_skb, "requested volume exists already");
2813 goto out_idr_remove_from_resource;
2815 kref_get(&connection->kref);
2818 if (init_submitter(device)) {
2820 drbd_msg_put_info(adm_ctx->reply_skb, "unable to create submit workqueue");
2821 goto out_idr_remove_vol;
2826 /* inherit the connection state */
2827 device->state.conn = first_connection(resource)->cstate;
2828 if (device->state.conn == C_WF_REPORT_PARAMS) {
2829 for_each_peer_device(peer_device, device)
2830 drbd_connected(peer_device);
2836 idr_remove(&connection->peer_devices, vnr);
2837 out_idr_remove_from_resource:
2838 for_each_connection(connection, resource) {
2839 peer_device = idr_find(&connection->peer_devices, vnr);
2841 idr_remove(&connection->peer_devices, vnr);
2842 kref_put(&connection->kref, drbd_destroy_connection);
2845 for_each_peer_device_safe(peer_device, tmp_peer_device, device) {
2846 list_del(&peer_device->peer_devices);
2849 idr_remove(&resource->devices, vnr);
2850 out_idr_remove_minor:
2851 idr_remove(&drbd_devices, minor);
2854 drbd_bm_cleanup(device);
2856 __free_page(device->md_io_page);
2860 blk_cleanup_queue(q);
2862 kref_put(&resource->kref, drbd_destroy_resource);
2867 void drbd_delete_device(struct drbd_device *device)
2869 struct drbd_resource *resource = device->resource;
2870 struct drbd_connection *connection;
2873 for_each_connection(connection, resource) {
2874 idr_remove(&connection->peer_devices, device->vnr);
2877 idr_remove(&resource->devices, device->vnr);
2878 idr_remove(&drbd_devices, device_to_minor(device));
2879 del_gendisk(device->vdisk);
2881 kref_sub(&device->kref, refs, drbd_destroy_device);
2884 int __init drbd_init(void)
2888 if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2890 "drbd: invalid minor_count (%d)\n", minor_count);
2894 minor_count = DRBD_MINOR_COUNT_DEF;
2898 err = register_blkdev(DRBD_MAJOR, "drbd");
2901 "drbd: unable to register block device major %d\n",
2906 register_reboot_notifier(&drbd_notifier);
2909 * allocate all necessary structs
2911 init_waitqueue_head(&drbd_pp_wait);
2913 drbd_proc = NULL; /* play safe for drbd_cleanup */
2914 idr_init(&drbd_devices);
2916 rwlock_init(&global_state_lock);
2917 INIT_LIST_HEAD(&drbd_resources);
2919 err = drbd_genl_register();
2921 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2925 err = drbd_create_mempools();
2930 drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2932 printk(KERN_ERR "drbd: unable to register proc file\n");
2936 retry.wq = create_singlethread_workqueue("drbd-reissue");
2938 printk(KERN_ERR "drbd: unable to create retry workqueue\n");
2941 INIT_WORK(&retry.worker, do_retry);
2942 spin_lock_init(&retry.lock);
2943 INIT_LIST_HEAD(&retry.writes);
2945 printk(KERN_INFO "drbd: initialized. "
2946 "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2947 API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2948 printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2949 printk(KERN_INFO "drbd: registered as block device major %d\n",
2952 return 0; /* Success! */
2957 printk(KERN_ERR "drbd: ran out of memory\n");
2959 printk(KERN_ERR "drbd: initialization failure\n");
2963 void drbd_free_bc(struct drbd_backing_dev *ldev)
2968 blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2969 blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2971 kfree(ldev->disk_conf);
2975 void drbd_free_sock(struct drbd_connection *connection)
2977 if (connection->data.socket) {
2978 mutex_lock(&connection->data.mutex);
2979 kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
2980 sock_release(connection->data.socket);
2981 connection->data.socket = NULL;
2982 mutex_unlock(&connection->data.mutex);
2984 if (connection->meta.socket) {
2985 mutex_lock(&connection->meta.mutex);
2986 kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
2987 sock_release(connection->meta.socket);
2988 connection->meta.socket = NULL;
2989 mutex_unlock(&connection->meta.mutex);
2993 /* meta data management */
2995 void conn_md_sync(struct drbd_connection *connection)
2997 struct drbd_peer_device *peer_device;
3001 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) {
3002 struct drbd_device *device = peer_device->device;
3004 kref_get(&device->kref);
3006 drbd_md_sync(device);
3007 kref_put(&device->kref, drbd_destroy_device);
3013 /* aligned 4kByte */
3014 struct meta_data_on_disk {
3015 u64 la_size_sect; /* last agreed size. */
3016 u64 uuid[UI_SIZE]; /* UUIDs. */
3019 u32 flags; /* MDF */
3022 u32 al_offset; /* offset to this block */
3023 u32 al_nr_extents; /* important for restoring the AL (userspace) */
3024 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3025 u32 bm_offset; /* offset to the bitmap, from here */
3026 u32 bm_bytes_per_bit; /* BM_BLOCK_SIZE */
3027 u32 la_peer_max_bio_size; /* last peer max_bio_size */
3029 /* see al_tr_number_to_on_disk_sector() */
3031 u32 al_stripe_size_4k;
3033 u8 reserved_u8[4096 - (7*8 + 10*4)];
3038 void drbd_md_write(struct drbd_device *device, void *b)
3040 struct meta_data_on_disk *buffer = b;
3044 memset(buffer, 0, sizeof(*buffer));
3046 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev));
3047 for (i = UI_CURRENT; i < UI_SIZE; i++)
3048 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]);
3049 buffer->flags = cpu_to_be32(device->ldev->md.flags);
3050 buffer->magic = cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN);
3052 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect);
3053 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset);
3054 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements);
3055 buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
3056 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid);
3058 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset);
3059 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size);
3061 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes);
3062 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k);
3064 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset);
3065 sector = device->ldev->md.md_offset;
3067 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) {
3068 /* this was a try anyways ... */
3069 drbd_err(device, "meta data update failed!\n");
3070 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR);
3075 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3076 * @device: DRBD device.
3078 void drbd_md_sync(struct drbd_device *device)
3080 struct meta_data_on_disk *buffer;
3082 /* Don't accidentally change the DRBD meta data layout. */
3083 BUILD_BUG_ON(UI_SIZE != 4);
3084 BUILD_BUG_ON(sizeof(struct meta_data_on_disk) != 4096);
3086 del_timer(&device->md_sync_timer);
3087 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3088 if (!test_and_clear_bit(MD_DIRTY, &device->flags))
3091 /* We use here D_FAILED and not D_ATTACHING because we try to write
3092 * metadata even if we detach due to a disk failure! */
3093 if (!get_ldev_if_state(device, D_FAILED))
3096 buffer = drbd_md_get_buffer(device);
3100 drbd_md_write(device, buffer);
3102 /* Update device->ldev->md.la_size_sect,
3103 * since we updated it on metadata. */
3104 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev);
3106 drbd_md_put_buffer(device);
3111 static int check_activity_log_stripe_size(struct drbd_device *device,
3112 struct meta_data_on_disk *on_disk,
3113 struct drbd_md *in_core)
3115 u32 al_stripes = be32_to_cpu(on_disk->al_stripes);
3116 u32 al_stripe_size_4k = be32_to_cpu(on_disk->al_stripe_size_4k);
3119 /* both not set: default to old fixed size activity log */
3120 if (al_stripes == 0 && al_stripe_size_4k == 0) {
3122 al_stripe_size_4k = MD_32kB_SECT/8;
3125 /* some paranoia plausibility checks */
3127 /* we need both values to be set */
3128 if (al_stripes == 0 || al_stripe_size_4k == 0)
3131 al_size_4k = (u64)al_stripes * al_stripe_size_4k;
3133 /* Upper limit of activity log area, to avoid potential overflow
3134 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3135 * than 72 * 4k blocks total only increases the amount of history,
3136 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3137 if (al_size_4k > (16 * 1024 * 1024/4))
3140 /* Lower limit: we need at least 8 transaction slots (32kB)
3141 * to not break existing setups */
3142 if (al_size_4k < MD_32kB_SECT/8)
3145 in_core->al_stripe_size_4k = al_stripe_size_4k;
3146 in_core->al_stripes = al_stripes;
3147 in_core->al_size_4k = al_size_4k;
3151 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3152 al_stripes, al_stripe_size_4k);
3156 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev)
3158 sector_t capacity = drbd_get_capacity(bdev->md_bdev);
3159 struct drbd_md *in_core = &bdev->md;
3160 s32 on_disk_al_sect;
3161 s32 on_disk_bm_sect;
3163 /* The on-disk size of the activity log, calculated from offsets, and
3164 * the size of the activity log calculated from the stripe settings,
3166 * Though we could relax this a bit: it is ok, if the striped activity log
3167 * fits in the available on-disk activity log size.
3168 * Right now, that would break how resize is implemented.
3169 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3170 * of possible unused padding space in the on disk layout. */
3171 if (in_core->al_offset < 0) {
3172 if (in_core->bm_offset > in_core->al_offset)
3174 on_disk_al_sect = -in_core->al_offset;
3175 on_disk_bm_sect = in_core->al_offset - in_core->bm_offset;
3177 if (in_core->al_offset != MD_4kB_SECT)
3179 if (in_core->bm_offset < in_core->al_offset + in_core->al_size_4k * MD_4kB_SECT)
3182 on_disk_al_sect = in_core->bm_offset - MD_4kB_SECT;
3183 on_disk_bm_sect = in_core->md_size_sect - in_core->bm_offset;
3186 /* old fixed size meta data is exactly that: fixed. */
3187 if (in_core->meta_dev_idx >= 0) {
3188 if (in_core->md_size_sect != MD_128MB_SECT
3189 || in_core->al_offset != MD_4kB_SECT
3190 || in_core->bm_offset != MD_4kB_SECT + MD_32kB_SECT
3191 || in_core->al_stripes != 1
3192 || in_core->al_stripe_size_4k != MD_32kB_SECT/8)
3196 if (capacity < in_core->md_size_sect)
3198 if (capacity - in_core->md_size_sect < drbd_md_first_sector(bdev))
3201 /* should be aligned, and at least 32k */
3202 if ((on_disk_al_sect & 7) || (on_disk_al_sect < MD_32kB_SECT))
3205 /* should fit (for now: exactly) into the available on-disk space;
3206 * overflow prevention is in check_activity_log_stripe_size() above. */
3207 if (on_disk_al_sect != in_core->al_size_4k * MD_4kB_SECT)
3210 /* again, should be aligned */
3211 if (in_core->bm_offset & 7)
3214 /* FIXME check for device grow with flex external meta data? */
3216 /* can the available bitmap space cover the last agreed device size? */
3217 if (on_disk_bm_sect < (in_core->la_size_sect+7)/MD_4kB_SECT/8/512)
3223 drbd_err(device, "meta data offsets don't make sense: idx=%d "
3224 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3225 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3226 in_core->meta_dev_idx,
3227 in_core->al_stripes, in_core->al_stripe_size_4k,
3228 in_core->al_offset, in_core->bm_offset, in_core->md_size_sect,
3229 (unsigned long long)in_core->la_size_sect,
3230 (unsigned long long)capacity);
3237 * drbd_md_read() - Reads in the meta data super block
3238 * @device: DRBD device.
3239 * @bdev: Device from which the meta data should be read in.
3241 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3242 * something goes wrong.
3244 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3245 * even before @bdev is assigned to @device->ldev.
3247 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
3249 struct meta_data_on_disk *buffer;
3251 int i, rv = NO_ERROR;
3253 if (device->state.disk != D_DISKLESS)
3254 return ERR_DISK_CONFIGURED;
3256 buffer = drbd_md_get_buffer(device);
3260 /* First, figure out where our meta data superblock is located,
3262 bdev->md.meta_dev_idx = bdev->disk_conf->meta_dev_idx;
3263 bdev->md.md_offset = drbd_md_ss(bdev);
3265 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) {
3266 /* NOTE: can't do normal error processing here as this is
3267 called BEFORE disk is attached */
3268 drbd_err(device, "Error while reading metadata.\n");
3269 rv = ERR_IO_MD_DISK;
3273 magic = be32_to_cpu(buffer->magic);
3274 flags = be32_to_cpu(buffer->flags);
3275 if (magic == DRBD_MD_MAGIC_84_UNCLEAN ||
3276 (magic == DRBD_MD_MAGIC_08 && !(flags & MDF_AL_CLEAN))) {
3277 /* btw: that's Activity Log clean, not "all" clean. */
3278 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3279 rv = ERR_MD_UNCLEAN;
3283 rv = ERR_MD_INVALID;
3284 if (magic != DRBD_MD_MAGIC_08) {
3285 if (magic == DRBD_MD_MAGIC_07)
3286 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3288 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3292 if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
3293 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3294 be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
3299 /* convert to in_core endian */
3300 bdev->md.la_size_sect = be64_to_cpu(buffer->la_size_sect);
3301 for (i = UI_CURRENT; i < UI_SIZE; i++)
3302 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
3303 bdev->md.flags = be32_to_cpu(buffer->flags);
3304 bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
3306 bdev->md.md_size_sect = be32_to_cpu(buffer->md_size_sect);
3307 bdev->md.al_offset = be32_to_cpu(buffer->al_offset);
3308 bdev->md.bm_offset = be32_to_cpu(buffer->bm_offset);
3310 if (check_activity_log_stripe_size(device, buffer, &bdev->md))
3312 if (check_offsets_and_sizes(device, bdev))
3315 if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
3316 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n",
3317 be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
3320 if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
3321 drbd_err(device, "unexpected md_size: %u (expected %u)\n",
3322 be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
3328 spin_lock_irq(&device->resource->req_lock);
3329 if (device->state.conn < C_CONNECTED) {
3331 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
3332 peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
3333 device->peer_max_bio_size = peer;
3335 spin_unlock_irq(&device->resource->req_lock);
3338 drbd_md_put_buffer(device);
3344 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3345 * @device: DRBD device.
3347 * Call this function if you change anything that should be written to
3348 * the meta-data super block. This function sets MD_DIRTY, and starts a
3349 * timer that ensures that within five seconds you have to call drbd_md_sync().
3352 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func)
3354 if (!test_and_set_bit(MD_DIRTY, &device->flags)) {
3355 mod_timer(&device->md_sync_timer, jiffies + HZ);
3356 device->last_md_mark_dirty.line = line;
3357 device->last_md_mark_dirty.func = func;
3361 void drbd_md_mark_dirty(struct drbd_device *device)
3363 if (!test_and_set_bit(MD_DIRTY, &device->flags))
3364 mod_timer(&device->md_sync_timer, jiffies + 5*HZ);
3368 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local)
3372 for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
3373 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i];
3376 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3378 if (idx == UI_CURRENT) {
3379 if (device->state.role == R_PRIMARY)
3384 drbd_set_ed_uuid(device, val);
3387 device->ldev->md.uuid[idx] = val;
3388 drbd_md_mark_dirty(device);
3391 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3393 unsigned long flags;
3394 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3395 __drbd_uuid_set(device, idx, val);
3396 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3399 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local)
3401 unsigned long flags;
3402 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3403 if (device->ldev->md.uuid[idx]) {
3404 drbd_uuid_move_history(device);
3405 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx];
3407 __drbd_uuid_set(device, idx, val);
3408 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3412 * drbd_uuid_new_current() - Creates a new current UUID
3413 * @device: DRBD device.
3415 * Creates a new current UUID, and rotates the old current UUID into
3416 * the bitmap slot. Causes an incremental resync upon next connect.
3418 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local)
3421 unsigned long long bm_uuid;
3423 get_random_bytes(&val, sizeof(u64));
3425 spin_lock_irq(&device->ldev->md.uuid_lock);
3426 bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3429 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3431 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT];
3432 __drbd_uuid_set(device, UI_CURRENT, val);
3433 spin_unlock_irq(&device->ldev->md.uuid_lock);
3435 drbd_print_uuids(device, "new current UUID");
3436 /* get it to stable storage _now_ */
3437 drbd_md_sync(device);
3440 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local)
3442 unsigned long flags;
3443 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3446 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags);
3448 drbd_uuid_move_history(device);
3449 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP];
3450 device->ldev->md.uuid[UI_BITMAP] = 0;
3452 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP];
3454 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid);
3456 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3458 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags);
3460 drbd_md_mark_dirty(device);
3464 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3465 * @device: DRBD device.
3467 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3469 int drbd_bmio_set_n_write(struct drbd_device *device)
3473 if (get_ldev_if_state(device, D_ATTACHING)) {
3474 drbd_md_set_flag(device, MDF_FULL_SYNC);
3475 drbd_md_sync(device);
3476 drbd_bm_set_all(device);
3478 rv = drbd_bm_write(device);
3481 drbd_md_clear_flag(device, MDF_FULL_SYNC);
3482 drbd_md_sync(device);
3492 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3493 * @device: DRBD device.
3495 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3497 int drbd_bmio_clear_n_write(struct drbd_device *device)
3501 drbd_resume_al(device);
3502 if (get_ldev_if_state(device, D_ATTACHING)) {
3503 drbd_bm_clear_all(device);
3504 rv = drbd_bm_write(device);
3511 static int w_bitmap_io(struct drbd_work *w, int unused)
3513 struct drbd_device *device =
3514 container_of(w, struct drbd_device, bm_io_work.w);
3515 struct bm_io_work *work = &device->bm_io_work;
3518 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0);
3520 if (get_ldev(device)) {
3521 drbd_bm_lock(device, work->why, work->flags);
3522 rv = work->io_fn(device);
3523 drbd_bm_unlock(device);
3527 clear_bit_unlock(BITMAP_IO, &device->flags);
3528 wake_up(&device->misc_wait);
3531 work->done(device, rv);
3533 clear_bit(BITMAP_IO_QUEUED, &device->flags);
3540 void drbd_ldev_destroy(struct drbd_device *device)
3542 lc_destroy(device->resync);
3543 device->resync = NULL;
3544 lc_destroy(device->act_log);
3545 device->act_log = NULL;
3547 drbd_free_bc(device->ldev);
3548 device->ldev = NULL;);
3550 clear_bit(GO_DISKLESS, &device->flags);
3553 static int w_go_diskless(struct drbd_work *w, int unused)
3555 struct drbd_device *device =
3556 container_of(w, struct drbd_device, go_diskless);
3558 D_ASSERT(device, device->state.disk == D_FAILED);
3559 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3560 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3561 * the protected members anymore, though, so once put_ldev reaches zero
3562 * again, it will be safe to free them. */
3564 /* Try to write changed bitmap pages, read errors may have just
3565 * set some bits outside the area covered by the activity log.
3567 * If we have an IO error during the bitmap writeout,
3568 * we will want a full sync next time, just in case.
3569 * (Do we want a specific meta data flag for this?)
3571 * If that does not make it to stable storage either,
3572 * we cannot do anything about that anymore.
3574 * We still need to check if both bitmap and ldev are present, we may
3575 * end up here after a failed attach, before ldev was even assigned.
3577 if (device->bitmap && device->ldev) {
3578 /* An interrupted resync or similar is allowed to recounts bits
3580 * Any modifications would not be expected anymore, though.
3582 if (drbd_bitmap_io_from_worker(device, drbd_bm_write,
3583 "detach", BM_LOCKED_TEST_ALLOWED)) {
3584 if (test_bit(WAS_READ_ERROR, &device->flags)) {
3585 drbd_md_set_flag(device, MDF_FULL_SYNC);
3586 drbd_md_sync(device);
3591 drbd_force_state(device, NS(disk, D_DISKLESS));
3596 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3597 * @device: DRBD device.
3598 * @io_fn: IO callback to be called when bitmap IO is possible
3599 * @done: callback to be called after the bitmap IO was performed
3600 * @why: Descriptive text of the reason for doing the IO
3602 * While IO on the bitmap happens we freeze application IO thus we ensure
3603 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3604 * called from worker context. It MUST NOT be used while a previous such
3605 * work is still pending!
3607 void drbd_queue_bitmap_io(struct drbd_device *device,
3608 int (*io_fn)(struct drbd_device *),
3609 void (*done)(struct drbd_device *, int),
3610 char *why, enum bm_flag flags)
3612 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task);
3614 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags));
3615 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags));
3616 D_ASSERT(device, list_empty(&device->bm_io_work.w.list));
3617 if (device->bm_io_work.why)
3618 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n",
3619 why, device->bm_io_work.why);
3621 device->bm_io_work.io_fn = io_fn;
3622 device->bm_io_work.done = done;
3623 device->bm_io_work.why = why;
3624 device->bm_io_work.flags = flags;
3626 spin_lock_irq(&device->resource->req_lock);
3627 set_bit(BITMAP_IO, &device->flags);
3628 if (atomic_read(&device->ap_bio_cnt) == 0) {
3629 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
3630 drbd_queue_work(&first_peer_device(device)->connection->sender_work,
3631 &device->bm_io_work.w);
3633 spin_unlock_irq(&device->resource->req_lock);
3637 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3638 * @device: DRBD device.
3639 * @io_fn: IO callback to be called when bitmap IO is possible
3640 * @why: Descriptive text of the reason for doing the IO
3642 * freezes application IO while that the actual IO operations runs. This
3643 * functions MAY NOT be called from worker context.
3645 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *),
3646 char *why, enum bm_flag flags)
3650 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task);
3652 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3653 drbd_suspend_io(device);
3655 drbd_bm_lock(device, why, flags);
3657 drbd_bm_unlock(device);
3659 if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3660 drbd_resume_io(device);
3665 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local)
3667 if ((device->ldev->md.flags & flag) != flag) {
3668 drbd_md_mark_dirty(device);
3669 device->ldev->md.flags |= flag;
3673 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local)
3675 if ((device->ldev->md.flags & flag) != 0) {
3676 drbd_md_mark_dirty(device);
3677 device->ldev->md.flags &= ~flag;
3680 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3682 return (bdev->md.flags & flag) != 0;
3685 static void md_sync_timer_fn(unsigned long data)
3687 struct drbd_device *device = (struct drbd_device *) data;
3689 /* must not double-queue! */
3690 if (list_empty(&device->md_sync_work.list))
3691 drbd_queue_work_front(&first_peer_device(device)->connection->sender_work,
3692 &device->md_sync_work);
3695 static int w_md_sync(struct drbd_work *w, int unused)
3697 struct drbd_device *device =
3698 container_of(w, struct drbd_device, md_sync_work);
3700 drbd_warn(device, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3702 drbd_warn(device, "last md_mark_dirty: %s:%u\n",
3703 device->last_md_mark_dirty.func, device->last_md_mark_dirty.line);
3705 drbd_md_sync(device);
3709 const char *cmdname(enum drbd_packet cmd)
3711 /* THINK may need to become several global tables
3712 * when we want to support more than
3713 * one PRO_VERSION */
3714 static const char *cmdnames[] = {
3716 [P_DATA_REPLY] = "DataReply",
3717 [P_RS_DATA_REPLY] = "RSDataReply",
3718 [P_BARRIER] = "Barrier",
3719 [P_BITMAP] = "ReportBitMap",
3720 [P_BECOME_SYNC_TARGET] = "BecomeSyncTarget",
3721 [P_BECOME_SYNC_SOURCE] = "BecomeSyncSource",
3722 [P_UNPLUG_REMOTE] = "UnplugRemote",
3723 [P_DATA_REQUEST] = "DataRequest",
3724 [P_RS_DATA_REQUEST] = "RSDataRequest",
3725 [P_SYNC_PARAM] = "SyncParam",
3726 [P_SYNC_PARAM89] = "SyncParam89",
3727 [P_PROTOCOL] = "ReportProtocol",
3728 [P_UUIDS] = "ReportUUIDs",
3729 [P_SIZES] = "ReportSizes",
3730 [P_STATE] = "ReportState",
3731 [P_SYNC_UUID] = "ReportSyncUUID",
3732 [P_AUTH_CHALLENGE] = "AuthChallenge",
3733 [P_AUTH_RESPONSE] = "AuthResponse",
3735 [P_PING_ACK] = "PingAck",
3736 [P_RECV_ACK] = "RecvAck",
3737 [P_WRITE_ACK] = "WriteAck",
3738 [P_RS_WRITE_ACK] = "RSWriteAck",
3739 [P_SUPERSEDED] = "Superseded",
3740 [P_NEG_ACK] = "NegAck",
3741 [P_NEG_DREPLY] = "NegDReply",
3742 [P_NEG_RS_DREPLY] = "NegRSDReply",
3743 [P_BARRIER_ACK] = "BarrierAck",
3744 [P_STATE_CHG_REQ] = "StateChgRequest",
3745 [P_STATE_CHG_REPLY] = "StateChgReply",
3746 [P_OV_REQUEST] = "OVRequest",
3747 [P_OV_REPLY] = "OVReply",
3748 [P_OV_RESULT] = "OVResult",
3749 [P_CSUM_RS_REQUEST] = "CsumRSRequest",
3750 [P_RS_IS_IN_SYNC] = "CsumRSIsInSync",
3751 [P_COMPRESSED_BITMAP] = "CBitmap",
3752 [P_DELAY_PROBE] = "DelayProbe",
3753 [P_OUT_OF_SYNC] = "OutOfSync",
3754 [P_RETRY_WRITE] = "RetryWrite",
3755 [P_RS_CANCEL] = "RSCancel",
3756 [P_CONN_ST_CHG_REQ] = "conn_st_chg_req",
3757 [P_CONN_ST_CHG_REPLY] = "conn_st_chg_reply",
3758 [P_RETRY_WRITE] = "retry_write",
3759 [P_PROTOCOL_UPDATE] = "protocol_update",
3761 /* enum drbd_packet, but not commands - obsoleted flags:
3767 /* too big for the array: 0xfffX */
3768 if (cmd == P_INITIAL_META)
3769 return "InitialMeta";
3770 if (cmd == P_INITIAL_DATA)
3771 return "InitialData";
3772 if (cmd == P_CONNECTION_FEATURES)
3773 return "ConnectionFeatures";
3774 if (cmd >= ARRAY_SIZE(cmdnames))
3776 return cmdnames[cmd];
3780 * drbd_wait_misc - wait for a request to make progress
3781 * @device: device associated with the request
3782 * @i: the struct drbd_interval embedded in struct drbd_request or
3783 * struct drbd_peer_request
3785 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
3787 struct net_conf *nc;
3792 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
3797 timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3800 /* Indicate to wake up device->misc_wait on progress. */
3802 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
3803 spin_unlock_irq(&device->resource->req_lock);
3804 timeout = schedule_timeout(timeout);
3805 finish_wait(&device->misc_wait, &wait);
3806 spin_lock_irq(&device->resource->req_lock);
3807 if (!timeout || device->state.conn < C_CONNECTED)
3809 if (signal_pending(current))
3810 return -ERESTARTSYS;
3814 #ifdef CONFIG_DRBD_FAULT_INJECTION
3815 /* Fault insertion support including random number generator shamelessly
3816 * stolen from kernel/rcutorture.c */
3817 struct fault_random_state {
3818 unsigned long state;
3819 unsigned long count;
3822 #define FAULT_RANDOM_MULT 39916801 /* prime */
3823 #define FAULT_RANDOM_ADD 479001701 /* prime */
3824 #define FAULT_RANDOM_REFRESH 10000
3827 * Crude but fast random-number generator. Uses a linear congruential
3828 * generator, with occasional help from get_random_bytes().
3830 static unsigned long
3831 _drbd_fault_random(struct fault_random_state *rsp)
3835 if (!rsp->count--) {
3836 get_random_bytes(&refresh, sizeof(refresh));
3837 rsp->state += refresh;
3838 rsp->count = FAULT_RANDOM_REFRESH;
3840 rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3841 return swahw32(rsp->state);
3845 _drbd_fault_str(unsigned int type) {
3846 static char *_faults[] = {
3847 [DRBD_FAULT_MD_WR] = "Meta-data write",
3848 [DRBD_FAULT_MD_RD] = "Meta-data read",
3849 [DRBD_FAULT_RS_WR] = "Resync write",
3850 [DRBD_FAULT_RS_RD] = "Resync read",
3851 [DRBD_FAULT_DT_WR] = "Data write",
3852 [DRBD_FAULT_DT_RD] = "Data read",
3853 [DRBD_FAULT_DT_RA] = "Data read ahead",
3854 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3855 [DRBD_FAULT_AL_EE] = "EE allocation",
3856 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3859 return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3863 _drbd_insert_fault(struct drbd_device *device, unsigned int type)
3865 static struct fault_random_state rrs = {0, 0};
3867 unsigned int ret = (
3869 ((1 << device_to_minor(device)) & fault_devs) != 0) &&
3870 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3875 if (__ratelimit(&drbd_ratelimit_state))
3876 drbd_warn(device, "***Simulating %s failure\n",
3877 _drbd_fault_str(type));
3884 const char *drbd_buildtag(void)
3886 /* DRBD built from external sources has here a reference to the
3887 git hash of the source code. */
3889 static char buildtag[38] = "\0uilt-in";
3891 if (buildtag[0] == 0) {
3893 sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3902 module_init(drbd_init)
3903 module_exit(drbd_cleanup)
3905 EXPORT_SYMBOL(drbd_conn_str);
3906 EXPORT_SYMBOL(drbd_role_str);
3907 EXPORT_SYMBOL(drbd_disk_str);
3908 EXPORT_SYMBOL(drbd_set_st_err_str);