drbd: Use RCU for the drbd_tconns list
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_main.c
1 /*
2    drbd.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11    from Logicworks, Inc. for making SDP replication support possible.
12
13    drbd is free software; you can redistribute it and/or modify
14    it under the terms of the GNU General Public License as published by
15    the Free Software Foundation; either version 2, or (at your option)
16    any later version.
17
18    drbd is distributed in the hope that it will be useful,
19    but WITHOUT ANY WARRANTY; without even the implied warranty of
20    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21    GNU General Public License for more details.
22
23    You should have received a copy of the GNU General Public License
24    along with drbd; see the file COPYING.  If not, write to
25    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26
27  */
28
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
33 #include <net/sock.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
36 #include <linux/fs.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
40 #include <linux/mm.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
48
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
52
53 #include <linux/drbd_limits.h>
54 #include "drbd_int.h"
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
56
57 #include "drbd_vli.h"
58
59 static DEFINE_MUTEX(drbd_main_mutex);
60 int drbdd_init(struct drbd_thread *);
61 int drbd_worker(struct drbd_thread *);
62 int drbd_asender(struct drbd_thread *);
63
64 int drbd_init(void);
65 static int drbd_open(struct block_device *bdev, fmode_t mode);
66 static int drbd_release(struct gendisk *gd, fmode_t mode);
67 static int w_md_sync(struct drbd_work *w, int unused);
68 static void md_sync_timer_fn(unsigned long data);
69 static int w_bitmap_io(struct drbd_work *w, int unused);
70 static int w_go_diskless(struct drbd_work *w, int unused);
71
72 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
73               "Lars Ellenberg <lars@linbit.com>");
74 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION);
75 MODULE_VERSION(REL_VERSION);
76 MODULE_LICENSE("GPL");
77 MODULE_PARM_DESC(minor_count, "Approximate number of drbd devices ("
78                  __stringify(DRBD_MINOR_COUNT_MIN) "-" __stringify(DRBD_MINOR_COUNT_MAX) ")");
79 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR);
80
81 #include <linux/moduleparam.h>
82 /* allow_open_on_secondary */
83 MODULE_PARM_DESC(allow_oos, "DONT USE!");
84 /* thanks to these macros, if compiled into the kernel (not-module),
85  * this becomes the boot parameter drbd.minor_count */
86 module_param(minor_count, uint, 0444);
87 module_param(disable_sendpage, bool, 0644);
88 module_param(allow_oos, bool, 0);
89 module_param(proc_details, int, 0644);
90
91 #ifdef CONFIG_DRBD_FAULT_INJECTION
92 int enable_faults;
93 int fault_rate;
94 static int fault_count;
95 int fault_devs;
96 /* bitmap of enabled faults */
97 module_param(enable_faults, int, 0664);
98 /* fault rate % value - applies to all enabled faults */
99 module_param(fault_rate, int, 0664);
100 /* count of faults inserted */
101 module_param(fault_count, int, 0664);
102 /* bitmap of devices to insert faults on */
103 module_param(fault_devs, int, 0644);
104 #endif
105
106 /* module parameter, defined */
107 unsigned int minor_count = DRBD_MINOR_COUNT_DEF;
108 int disable_sendpage;
109 int allow_oos;
110 int proc_details;       /* Detail level in proc drbd*/
111
112 /* Module parameter for setting the user mode helper program
113  * to run. Default is /sbin/drbdadm */
114 char usermode_helper[80] = "/sbin/drbdadm";
115
116 module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0644);
117
118 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
119  * as member "struct gendisk *vdisk;"
120  */
121 struct idr minors;
122 struct list_head drbd_tconns;  /* list of struct drbd_tconn */
123 DECLARE_RWSEM(drbd_cfg_rwsem);
124
125 struct kmem_cache *drbd_request_cache;
126 struct kmem_cache *drbd_ee_cache;       /* peer requests */
127 struct kmem_cache *drbd_bm_ext_cache;   /* bitmap extents */
128 struct kmem_cache *drbd_al_ext_cache;   /* activity log extents */
129 mempool_t *drbd_request_mempool;
130 mempool_t *drbd_ee_mempool;
131 mempool_t *drbd_md_io_page_pool;
132 struct bio_set *drbd_md_io_bio_set;
133
134 /* I do not use a standard mempool, because:
135    1) I want to hand out the pre-allocated objects first.
136    2) I want to be able to interrupt sleeping allocation with a signal.
137    Note: This is a single linked list, the next pointer is the private
138          member of struct page.
139  */
140 struct page *drbd_pp_pool;
141 spinlock_t   drbd_pp_lock;
142 int          drbd_pp_vacant;
143 wait_queue_head_t drbd_pp_wait;
144
145 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state, 5 * HZ, 5);
146
147 static const struct block_device_operations drbd_ops = {
148         .owner =   THIS_MODULE,
149         .open =    drbd_open,
150         .release = drbd_release,
151 };
152
153 static void bio_destructor_drbd(struct bio *bio)
154 {
155         bio_free(bio, drbd_md_io_bio_set);
156 }
157
158 struct bio *bio_alloc_drbd(gfp_t gfp_mask)
159 {
160         struct bio *bio;
161
162         if (!drbd_md_io_bio_set)
163                 return bio_alloc(gfp_mask, 1);
164
165         bio = bio_alloc_bioset(gfp_mask, 1, drbd_md_io_bio_set);
166         if (!bio)
167                 return NULL;
168         bio->bi_destructor = bio_destructor_drbd;
169         return bio;
170 }
171
172 #ifdef __CHECKER__
173 /* When checking with sparse, and this is an inline function, sparse will
174    give tons of false positives. When this is a real functions sparse works.
175  */
176 int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_state mins)
177 {
178         int io_allowed;
179
180         atomic_inc(&mdev->local_cnt);
181         io_allowed = (mdev->state.disk >= mins);
182         if (!io_allowed) {
183                 if (atomic_dec_and_test(&mdev->local_cnt))
184                         wake_up(&mdev->misc_wait);
185         }
186         return io_allowed;
187 }
188
189 #endif
190
191 /**
192  * DOC: The transfer log
193  *
194  * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
195  * mdev->tconn->newest_tle points to the head, mdev->tconn->oldest_tle points to the tail
196  * of the list. There is always at least one &struct drbd_tl_epoch object.
197  *
198  * Each &struct drbd_tl_epoch has a circular double linked list of requests
199  * attached.
200  */
201 static int tl_init(struct drbd_tconn *tconn)
202 {
203         struct drbd_tl_epoch *b;
204
205         /* during device minor initialization, we may well use GFP_KERNEL */
206         b = kmalloc(sizeof(struct drbd_tl_epoch), GFP_KERNEL);
207         if (!b)
208                 return 0;
209         INIT_LIST_HEAD(&b->requests);
210         INIT_LIST_HEAD(&b->w.list);
211         b->next = NULL;
212         b->br_number = 4711;
213         b->n_writes = 0;
214         b->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
215
216         tconn->oldest_tle = b;
217         tconn->newest_tle = b;
218         INIT_LIST_HEAD(&tconn->out_of_sequence_requests);
219
220         return 1;
221 }
222
223 static void tl_cleanup(struct drbd_tconn *tconn)
224 {
225         if (tconn->oldest_tle != tconn->newest_tle)
226                 conn_err(tconn, "ASSERT FAILED: oldest_tle == newest_tle\n");
227         if (!list_empty(&tconn->out_of_sequence_requests))
228                 conn_err(tconn, "ASSERT FAILED: list_empty(out_of_sequence_requests)\n");
229         kfree(tconn->oldest_tle);
230         tconn->oldest_tle = NULL;
231         kfree(tconn->unused_spare_tle);
232         tconn->unused_spare_tle = NULL;
233 }
234
235 /**
236  * _tl_add_barrier() - Adds a barrier to the transfer log
237  * @mdev:       DRBD device.
238  * @new:        Barrier to be added before the current head of the TL.
239  *
240  * The caller must hold the req_lock.
241  */
242 void _tl_add_barrier(struct drbd_tconn *tconn, struct drbd_tl_epoch *new)
243 {
244         struct drbd_tl_epoch *newest_before;
245
246         INIT_LIST_HEAD(&new->requests);
247         INIT_LIST_HEAD(&new->w.list);
248         new->w.cb = NULL; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
249         new->next = NULL;
250         new->n_writes = 0;
251
252         newest_before = tconn->newest_tle;
253         /* never send a barrier number == 0, because that is special-cased
254          * when using TCQ for our write ordering code */
255         new->br_number = (newest_before->br_number+1) ?: 1;
256         if (tconn->newest_tle != new) {
257                 tconn->newest_tle->next = new;
258                 tconn->newest_tle = new;
259         }
260 }
261
262 /**
263  * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
264  * @mdev:       DRBD device.
265  * @barrier_nr: Expected identifier of the DRBD write barrier packet.
266  * @set_size:   Expected number of requests before that barrier.
267  *
268  * In case the passed barrier_nr or set_size does not match the oldest
269  * &struct drbd_tl_epoch objects this function will cause a termination
270  * of the connection.
271  */
272 void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
273                 unsigned int set_size)
274 {
275         struct drbd_conf *mdev;
276         struct drbd_tl_epoch *b, *nob; /* next old barrier */
277         struct list_head *le, *tle;
278         struct drbd_request *r;
279
280         spin_lock_irq(&tconn->req_lock);
281
282         b = tconn->oldest_tle;
283
284         /* first some paranoia code */
285         if (b == NULL) {
286                 conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
287                          barrier_nr);
288                 goto bail;
289         }
290         if (b->br_number != barrier_nr) {
291                 conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n",
292                          barrier_nr, b->br_number);
293                 goto bail;
294         }
295         if (b->n_writes != set_size) {
296                 conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
297                          barrier_nr, set_size, b->n_writes);
298                 goto bail;
299         }
300
301         /* Clean up list of requests processed during current epoch */
302         list_for_each_safe(le, tle, &b->requests) {
303                 r = list_entry(le, struct drbd_request, tl_requests);
304                 _req_mod(r, BARRIER_ACKED);
305         }
306         /* There could be requests on the list waiting for completion
307            of the write to the local disk. To avoid corruptions of
308            slab's data structures we have to remove the lists head.
309
310            Also there could have been a barrier ack out of sequence, overtaking
311            the write acks - which would be a bug and violating write ordering.
312            To not deadlock in case we lose connection while such requests are
313            still pending, we need some way to find them for the
314            _req_mode(CONNECTION_LOST_WHILE_PENDING).
315
316            These have been list_move'd to the out_of_sequence_requests list in
317            _req_mod(, BARRIER_ACKED) above.
318            */
319         list_del_init(&b->requests);
320         mdev = b->w.mdev;
321
322         nob = b->next;
323         if (test_and_clear_bit(CREATE_BARRIER, &mdev->flags)) {
324                 _tl_add_barrier(tconn, b);
325                 if (nob)
326                         tconn->oldest_tle = nob;
327                 /* if nob == NULL b was the only barrier, and becomes the new
328                    barrier. Therefore tconn->oldest_tle points already to b */
329         } else {
330                 D_ASSERT(nob != NULL);
331                 tconn->oldest_tle = nob;
332                 kfree(b);
333         }
334
335         spin_unlock_irq(&tconn->req_lock);
336         dec_ap_pending(mdev);
337
338         return;
339
340 bail:
341         spin_unlock_irq(&tconn->req_lock);
342         conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
343 }
344
345
346 /**
347  * _tl_restart() - Walks the transfer log, and applies an action to all requests
348  * @mdev:       DRBD device.
349  * @what:       The action/event to perform with all request objects
350  *
351  * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
352  * RESTART_FROZEN_DISK_IO.
353  */
354 void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
355 {
356         struct drbd_tl_epoch *b, *tmp, **pn;
357         struct list_head *le, *tle, carry_reads;
358         struct drbd_request *req;
359         int rv, n_writes, n_reads;
360
361         b = tconn->oldest_tle;
362         pn = &tconn->oldest_tle;
363         while (b) {
364                 n_writes = 0;
365                 n_reads = 0;
366                 INIT_LIST_HEAD(&carry_reads);
367                 list_for_each_safe(le, tle, &b->requests) {
368                         req = list_entry(le, struct drbd_request, tl_requests);
369                         rv = _req_mod(req, what);
370
371                         n_writes += (rv & MR_WRITE) >> MR_WRITE_SHIFT;
372                         n_reads  += (rv & MR_READ) >> MR_READ_SHIFT;
373                 }
374                 tmp = b->next;
375
376                 if (n_writes) {
377                         if (what == RESEND) {
378                                 b->n_writes = n_writes;
379                                 if (b->w.cb == NULL) {
380                                         b->w.cb = w_send_barrier;
381                                         inc_ap_pending(b->w.mdev);
382                                         set_bit(CREATE_BARRIER, &b->w.mdev->flags);
383                                 }
384
385                                 drbd_queue_work(&tconn->data.work, &b->w);
386                         }
387                         pn = &b->next;
388                 } else {
389                         if (n_reads)
390                                 list_add(&carry_reads, &b->requests);
391                         /* there could still be requests on that ring list,
392                          * in case local io is still pending */
393                         list_del(&b->requests);
394
395                         /* dec_ap_pending corresponding to queue_barrier.
396                          * the newest barrier may not have been queued yet,
397                          * in which case w.cb is still NULL. */
398                         if (b->w.cb != NULL)
399                                 dec_ap_pending(b->w.mdev);
400
401                         if (b == tconn->newest_tle) {
402                                 /* recycle, but reinit! */
403                                 if (tmp != NULL)
404                                         conn_err(tconn, "ASSERT FAILED tmp == NULL");
405                                 INIT_LIST_HEAD(&b->requests);
406                                 list_splice(&carry_reads, &b->requests);
407                                 INIT_LIST_HEAD(&b->w.list);
408                                 b->w.cb = NULL;
409                                 b->br_number = net_random();
410                                 b->n_writes = 0;
411
412                                 *pn = b;
413                                 break;
414                         }
415                         *pn = tmp;
416                         kfree(b);
417                 }
418                 b = tmp;
419                 list_splice(&carry_reads, &b->requests);
420         }
421 }
422
423
424 /**
425  * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
426  * @mdev:       DRBD device.
427  *
428  * This is called after the connection to the peer was lost. The storage covered
429  * by the requests on the transfer gets marked as our of sync. Called from the
430  * receiver thread and the worker thread.
431  */
432 void tl_clear(struct drbd_tconn *tconn)
433 {
434         struct drbd_conf *mdev;
435         struct list_head *le, *tle;
436         struct drbd_request *r;
437         int vnr;
438
439         spin_lock_irq(&tconn->req_lock);
440
441         _tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING);
442
443         /* we expect this list to be empty. */
444         if (!list_empty(&tconn->out_of_sequence_requests))
445                 conn_err(tconn, "ASSERT FAILED list_empty(&out_of_sequence_requests)\n");
446
447         /* but just in case, clean it up anyways! */
448         list_for_each_safe(le, tle, &tconn->out_of_sequence_requests) {
449                 r = list_entry(le, struct drbd_request, tl_requests);
450                 /* It would be nice to complete outside of spinlock.
451                  * But this is easier for now. */
452                 _req_mod(r, CONNECTION_LOST_WHILE_PENDING);
453         }
454
455         /* ensure bit indicating barrier is required is clear */
456         rcu_read_lock();
457         idr_for_each_entry(&tconn->volumes, mdev, vnr)
458                 clear_bit(CREATE_BARRIER, &mdev->flags);
459         rcu_read_unlock();
460
461         spin_unlock_irq(&tconn->req_lock);
462 }
463
464 void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
465 {
466         spin_lock_irq(&tconn->req_lock);
467         _tl_restart(tconn, what);
468         spin_unlock_irq(&tconn->req_lock);
469 }
470
471 static int drbd_thread_setup(void *arg)
472 {
473         struct drbd_thread *thi = (struct drbd_thread *) arg;
474         struct drbd_tconn *tconn = thi->tconn;
475         unsigned long flags;
476         int retval;
477
478         snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
479                  thi->name[0], thi->tconn->name);
480
481 restart:
482         retval = thi->function(thi);
483
484         spin_lock_irqsave(&thi->t_lock, flags);
485
486         /* if the receiver has been "EXITING", the last thing it did
487          * was set the conn state to "StandAlone",
488          * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
489          * and receiver thread will be "started".
490          * drbd_thread_start needs to set "RESTARTING" in that case.
491          * t_state check and assignment needs to be within the same spinlock,
492          * so either thread_start sees EXITING, and can remap to RESTARTING,
493          * or thread_start see NONE, and can proceed as normal.
494          */
495
496         if (thi->t_state == RESTARTING) {
497                 conn_info(tconn, "Restarting %s thread\n", thi->name);
498                 thi->t_state = RUNNING;
499                 spin_unlock_irqrestore(&thi->t_lock, flags);
500                 goto restart;
501         }
502
503         thi->task = NULL;
504         thi->t_state = NONE;
505         smp_mb();
506         complete_all(&thi->stop);
507         spin_unlock_irqrestore(&thi->t_lock, flags);
508
509         conn_info(tconn, "Terminating %s\n", current->comm);
510
511         /* Release mod reference taken when thread was started */
512
513         kref_put(&tconn->kref, &conn_destroy);
514         module_put(THIS_MODULE);
515         return retval;
516 }
517
518 static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi,
519                              int (*func) (struct drbd_thread *), char *name)
520 {
521         spin_lock_init(&thi->t_lock);
522         thi->task    = NULL;
523         thi->t_state = NONE;
524         thi->function = func;
525         thi->tconn = tconn;
526         strncpy(thi->name, name, ARRAY_SIZE(thi->name));
527 }
528
529 int drbd_thread_start(struct drbd_thread *thi)
530 {
531         struct drbd_tconn *tconn = thi->tconn;
532         struct task_struct *nt;
533         unsigned long flags;
534
535         /* is used from state engine doing drbd_thread_stop_nowait,
536          * while holding the req lock irqsave */
537         spin_lock_irqsave(&thi->t_lock, flags);
538
539         switch (thi->t_state) {
540         case NONE:
541                 conn_info(tconn, "Starting %s thread (from %s [%d])\n",
542                          thi->name, current->comm, current->pid);
543
544                 /* Get ref on module for thread - this is released when thread exits */
545                 if (!try_module_get(THIS_MODULE)) {
546                         conn_err(tconn, "Failed to get module reference in drbd_thread_start\n");
547                         spin_unlock_irqrestore(&thi->t_lock, flags);
548                         return false;
549                 }
550
551                 kref_get(&thi->tconn->kref);
552
553                 init_completion(&thi->stop);
554                 thi->reset_cpu_mask = 1;
555                 thi->t_state = RUNNING;
556                 spin_unlock_irqrestore(&thi->t_lock, flags);
557                 flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
558
559                 nt = kthread_create(drbd_thread_setup, (void *) thi,
560                                     "drbd_%c_%s", thi->name[0], thi->tconn->name);
561
562                 if (IS_ERR(nt)) {
563                         conn_err(tconn, "Couldn't start thread\n");
564
565                         kref_put(&tconn->kref, &conn_destroy);
566                         module_put(THIS_MODULE);
567                         return false;
568                 }
569                 spin_lock_irqsave(&thi->t_lock, flags);
570                 thi->task = nt;
571                 thi->t_state = RUNNING;
572                 spin_unlock_irqrestore(&thi->t_lock, flags);
573                 wake_up_process(nt);
574                 break;
575         case EXITING:
576                 thi->t_state = RESTARTING;
577                 conn_info(tconn, "Restarting %s thread (from %s [%d])\n",
578                                 thi->name, current->comm, current->pid);
579                 /* fall through */
580         case RUNNING:
581         case RESTARTING:
582         default:
583                 spin_unlock_irqrestore(&thi->t_lock, flags);
584                 break;
585         }
586
587         return true;
588 }
589
590
591 void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
592 {
593         unsigned long flags;
594
595         enum drbd_thread_state ns = restart ? RESTARTING : EXITING;
596
597         /* may be called from state engine, holding the req lock irqsave */
598         spin_lock_irqsave(&thi->t_lock, flags);
599
600         if (thi->t_state == NONE) {
601                 spin_unlock_irqrestore(&thi->t_lock, flags);
602                 if (restart)
603                         drbd_thread_start(thi);
604                 return;
605         }
606
607         if (thi->t_state != ns) {
608                 if (thi->task == NULL) {
609                         spin_unlock_irqrestore(&thi->t_lock, flags);
610                         return;
611                 }
612
613                 thi->t_state = ns;
614                 smp_mb();
615                 init_completion(&thi->stop);
616                 if (thi->task != current)
617                         force_sig(DRBD_SIGKILL, thi->task);
618         }
619
620         spin_unlock_irqrestore(&thi->t_lock, flags);
621
622         if (wait)
623                 wait_for_completion(&thi->stop);
624 }
625
626 static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task)
627 {
628         struct drbd_thread *thi =
629                 task == tconn->receiver.task ? &tconn->receiver :
630                 task == tconn->asender.task  ? &tconn->asender :
631                 task == tconn->worker.task   ? &tconn->worker : NULL;
632
633         return thi;
634 }
635
636 char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task)
637 {
638         struct drbd_thread *thi = drbd_task_to_thread(tconn, task);
639         return thi ? thi->name : task->comm;
640 }
641
642 int conn_lowest_minor(struct drbd_tconn *tconn)
643 {
644         struct drbd_conf *mdev;
645         int vnr = 0, m;
646
647         rcu_read_lock();
648         mdev = idr_get_next(&tconn->volumes, &vnr);
649         m = mdev ? mdev_to_minor(mdev) : -1;
650         rcu_read_unlock();
651
652         return m;
653 }
654
655 #ifdef CONFIG_SMP
656 /**
657  * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
658  * @mdev:       DRBD device.
659  *
660  * Forces all threads of a device onto the same CPU. This is beneficial for
661  * DRBD's performance. May be overwritten by user's configuration.
662  */
663 void drbd_calc_cpu_mask(struct drbd_tconn *tconn)
664 {
665         int ord, cpu;
666
667         /* user override. */
668         if (cpumask_weight(tconn->cpu_mask))
669                 return;
670
671         ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask);
672         for_each_online_cpu(cpu) {
673                 if (ord-- == 0) {
674                         cpumask_set_cpu(cpu, tconn->cpu_mask);
675                         return;
676                 }
677         }
678         /* should not be reached */
679         cpumask_setall(tconn->cpu_mask);
680 }
681
682 /**
683  * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
684  * @mdev:       DRBD device.
685  * @thi:        drbd_thread object
686  *
687  * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
688  * prematurely.
689  */
690 void drbd_thread_current_set_cpu(struct drbd_thread *thi)
691 {
692         struct task_struct *p = current;
693
694         if (!thi->reset_cpu_mask)
695                 return;
696         thi->reset_cpu_mask = 0;
697         set_cpus_allowed_ptr(p, thi->tconn->cpu_mask);
698 }
699 #endif
700
701 /**
702  * drbd_header_size  -  size of a packet header
703  *
704  * The header size is a multiple of 8, so any payload following the header is
705  * word aligned on 64-bit architectures.  (The bitmap send and receive code
706  * relies on this.)
707  */
708 unsigned int drbd_header_size(struct drbd_tconn *tconn)
709 {
710         if (tconn->agreed_pro_version >= 100) {
711                 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
712                 return sizeof(struct p_header100);
713         } else {
714                 BUILD_BUG_ON(sizeof(struct p_header80) !=
715                              sizeof(struct p_header95));
716                 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80), 8));
717                 return sizeof(struct p_header80);
718         }
719 }
720
721 static unsigned int prepare_header80(struct p_header80 *h, enum drbd_packet cmd, int size)
722 {
723         h->magic   = cpu_to_be32(DRBD_MAGIC);
724         h->command = cpu_to_be16(cmd);
725         h->length  = cpu_to_be16(size);
726         return sizeof(struct p_header80);
727 }
728
729 static unsigned int prepare_header95(struct p_header95 *h, enum drbd_packet cmd, int size)
730 {
731         h->magic   = cpu_to_be16(DRBD_MAGIC_BIG);
732         h->command = cpu_to_be16(cmd);
733         h->length = cpu_to_be32(size);
734         return sizeof(struct p_header95);
735 }
736
737 static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cmd,
738                                       int size, int vnr)
739 {
740         h->magic = cpu_to_be32(DRBD_MAGIC_100);
741         h->volume = cpu_to_be16(vnr);
742         h->command = cpu_to_be16(cmd);
743         h->length = cpu_to_be32(size);
744         h->pad = 0;
745         return sizeof(struct p_header100);
746 }
747
748 static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr,
749                                    void *buffer, enum drbd_packet cmd, int size)
750 {
751         if (tconn->agreed_pro_version >= 100)
752                 return prepare_header100(buffer, cmd, size, vnr);
753         else if (tconn->agreed_pro_version >= 95 &&
754                  size > DRBD_MAX_SIZE_H80_PACKET)
755                 return prepare_header95(buffer, cmd, size);
756         else
757                 return prepare_header80(buffer, cmd, size);
758 }
759
760 static void *__conn_prepare_command(struct drbd_tconn *tconn,
761                                     struct drbd_socket *sock)
762 {
763         if (!sock->socket)
764                 return NULL;
765         return sock->sbuf + drbd_header_size(tconn);
766 }
767
768 void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
769 {
770         void *p;
771
772         mutex_lock(&sock->mutex);
773         p = __conn_prepare_command(tconn, sock);
774         if (!p)
775                 mutex_unlock(&sock->mutex);
776
777         return p;
778 }
779
780 void *drbd_prepare_command(struct drbd_conf *mdev, struct drbd_socket *sock)
781 {
782         return conn_prepare_command(mdev->tconn, sock);
783 }
784
785 static int __send_command(struct drbd_tconn *tconn, int vnr,
786                           struct drbd_socket *sock, enum drbd_packet cmd,
787                           unsigned int header_size, void *data,
788                           unsigned int size)
789 {
790         int msg_flags;
791         int err;
792
793         /*
794          * Called with @data == NULL and the size of the data blocks in @size
795          * for commands that send data blocks.  For those commands, omit the
796          * MSG_MORE flag: this will increase the likelihood that data blocks
797          * which are page aligned on the sender will end up page aligned on the
798          * receiver.
799          */
800         msg_flags = data ? MSG_MORE : 0;
801
802         header_size += prepare_header(tconn, vnr, sock->sbuf, cmd,
803                                       header_size + size);
804         err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size,
805                             msg_flags);
806         if (data && !err)
807                 err = drbd_send_all(tconn, sock->socket, data, size, 0);
808         return err;
809 }
810
811 static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
812                                enum drbd_packet cmd, unsigned int header_size,
813                                void *data, unsigned int size)
814 {
815         return __send_command(tconn, 0, sock, cmd, header_size, data, size);
816 }
817
818 int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock,
819                       enum drbd_packet cmd, unsigned int header_size,
820                       void *data, unsigned int size)
821 {
822         int err;
823
824         err = __conn_send_command(tconn, sock, cmd, header_size, data, size);
825         mutex_unlock(&sock->mutex);
826         return err;
827 }
828
829 int drbd_send_command(struct drbd_conf *mdev, struct drbd_socket *sock,
830                       enum drbd_packet cmd, unsigned int header_size,
831                       void *data, unsigned int size)
832 {
833         int err;
834
835         err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, header_size,
836                              data, size);
837         mutex_unlock(&sock->mutex);
838         return err;
839 }
840
841 int drbd_send_ping(struct drbd_tconn *tconn)
842 {
843         struct drbd_socket *sock;
844
845         sock = &tconn->meta;
846         if (!conn_prepare_command(tconn, sock))
847                 return -EIO;
848         return conn_send_command(tconn, sock, P_PING, 0, NULL, 0);
849 }
850
851 int drbd_send_ping_ack(struct drbd_tconn *tconn)
852 {
853         struct drbd_socket *sock;
854
855         sock = &tconn->meta;
856         if (!conn_prepare_command(tconn, sock))
857                 return -EIO;
858         return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0);
859 }
860
861 int drbd_send_sync_param(struct drbd_conf *mdev)
862 {
863         struct drbd_socket *sock;
864         struct p_rs_param_95 *p;
865         int size;
866         const int apv = mdev->tconn->agreed_pro_version;
867         enum drbd_packet cmd;
868         struct net_conf *nc;
869         struct disk_conf *dc;
870
871         sock = &mdev->tconn->data;
872         p = drbd_prepare_command(mdev, sock);
873         if (!p)
874                 return -EIO;
875
876         rcu_read_lock();
877         nc = rcu_dereference(mdev->tconn->net_conf);
878
879         size = apv <= 87 ? sizeof(struct p_rs_param)
880                 : apv == 88 ? sizeof(struct p_rs_param)
881                         + strlen(nc->verify_alg) + 1
882                 : apv <= 94 ? sizeof(struct p_rs_param_89)
883                 : /* apv >= 95 */ sizeof(struct p_rs_param_95);
884
885         cmd = apv >= 89 ? P_SYNC_PARAM89 : P_SYNC_PARAM;
886
887         /* initialize verify_alg and csums_alg */
888         memset(p->verify_alg, 0, 2 * SHARED_SECRET_MAX);
889
890         if (get_ldev(mdev)) {
891                 dc = rcu_dereference(mdev->ldev->disk_conf);
892                 p->rate = cpu_to_be32(dc->resync_rate);
893                 p->c_plan_ahead = cpu_to_be32(dc->c_plan_ahead);
894                 p->c_delay_target = cpu_to_be32(dc->c_delay_target);
895                 p->c_fill_target = cpu_to_be32(dc->c_fill_target);
896                 p->c_max_rate = cpu_to_be32(dc->c_max_rate);
897                 put_ldev(mdev);
898         } else {
899                 p->rate = cpu_to_be32(DRBD_RATE_DEF);
900                 p->c_plan_ahead = cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF);
901                 p->c_delay_target = cpu_to_be32(DRBD_C_DELAY_TARGET_DEF);
902                 p->c_fill_target = cpu_to_be32(DRBD_C_FILL_TARGET_DEF);
903                 p->c_max_rate = cpu_to_be32(DRBD_C_MAX_RATE_DEF);
904         }
905
906         if (apv >= 88)
907                 strcpy(p->verify_alg, nc->verify_alg);
908         if (apv >= 89)
909                 strcpy(p->csums_alg, nc->csums_alg);
910         rcu_read_unlock();
911
912         return drbd_send_command(mdev, sock, cmd, size, NULL, 0);
913 }
914
915 int __drbd_send_protocol(struct drbd_tconn *tconn)
916 {
917         struct drbd_socket *sock;
918         struct p_protocol *p;
919         struct net_conf *nc;
920         int size, cf;
921
922         sock = &tconn->data;
923         p = __conn_prepare_command(tconn, sock);
924         if (!p)
925                 return -EIO;
926
927         rcu_read_lock();
928         nc = rcu_dereference(tconn->net_conf);
929
930         if (nc->dry_run && tconn->agreed_pro_version < 92) {
931                 rcu_read_unlock();
932                 mutex_unlock(&sock->mutex);
933                 conn_err(tconn, "--dry-run is not supported by peer");
934                 return -EOPNOTSUPP;
935         }
936
937         size = sizeof(*p);
938         if (tconn->agreed_pro_version >= 87)
939                 size += strlen(nc->integrity_alg) + 1;
940
941         p->protocol      = cpu_to_be32(nc->wire_protocol);
942         p->after_sb_0p   = cpu_to_be32(nc->after_sb_0p);
943         p->after_sb_1p   = cpu_to_be32(nc->after_sb_1p);
944         p->after_sb_2p   = cpu_to_be32(nc->after_sb_2p);
945         p->two_primaries = cpu_to_be32(nc->two_primaries);
946         cf = 0;
947         if (nc->want_lose)
948                 cf |= CF_WANT_LOSE;
949         if (nc->dry_run)
950                 cf |= CF_DRY_RUN;
951         p->conn_flags    = cpu_to_be32(cf);
952
953         if (tconn->agreed_pro_version >= 87)
954                 strcpy(p->integrity_alg, nc->integrity_alg);
955         rcu_read_unlock();
956
957         return __conn_send_command(tconn, sock, P_PROTOCOL, size, NULL, 0);
958 }
959
960 int drbd_send_protocol(struct drbd_tconn *tconn)
961 {
962         int err;
963
964         mutex_lock(&tconn->data.mutex);
965         err = __drbd_send_protocol(tconn);
966         mutex_unlock(&tconn->data.mutex);
967
968         return err;
969 }
970
971 int _drbd_send_uuids(struct drbd_conf *mdev, u64 uuid_flags)
972 {
973         struct drbd_socket *sock;
974         struct p_uuids *p;
975         int i;
976
977         if (!get_ldev_if_state(mdev, D_NEGOTIATING))
978                 return 0;
979
980         sock = &mdev->tconn->data;
981         p = drbd_prepare_command(mdev, sock);
982         if (!p) {
983                 put_ldev(mdev);
984                 return -EIO;
985         }
986         for (i = UI_CURRENT; i < UI_SIZE; i++)
987                 p->uuid[i] = mdev->ldev ? cpu_to_be64(mdev->ldev->md.uuid[i]) : 0;
988
989         mdev->comm_bm_set = drbd_bm_total_weight(mdev);
990         p->uuid[UI_SIZE] = cpu_to_be64(mdev->comm_bm_set);
991         rcu_read_lock();
992         uuid_flags |= rcu_dereference(mdev->tconn->net_conf)->want_lose ? 1 : 0;
993         rcu_read_unlock();
994         uuid_flags |= test_bit(CRASHED_PRIMARY, &mdev->flags) ? 2 : 0;
995         uuid_flags |= mdev->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
996         p->uuid[UI_FLAGS] = cpu_to_be64(uuid_flags);
997
998         put_ldev(mdev);
999         return drbd_send_command(mdev, sock, P_UUIDS, sizeof(*p), NULL, 0);
1000 }
1001
1002 int drbd_send_uuids(struct drbd_conf *mdev)
1003 {
1004         return _drbd_send_uuids(mdev, 0);
1005 }
1006
1007 int drbd_send_uuids_skip_initial_sync(struct drbd_conf *mdev)
1008 {
1009         return _drbd_send_uuids(mdev, 8);
1010 }
1011
1012 void drbd_print_uuids(struct drbd_conf *mdev, const char *text)
1013 {
1014         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1015                 u64 *uuid = mdev->ldev->md.uuid;
1016                 dev_info(DEV, "%s %016llX:%016llX:%016llX:%016llX\n",
1017                      text,
1018                      (unsigned long long)uuid[UI_CURRENT],
1019                      (unsigned long long)uuid[UI_BITMAP],
1020                      (unsigned long long)uuid[UI_HISTORY_START],
1021                      (unsigned long long)uuid[UI_HISTORY_END]);
1022                 put_ldev(mdev);
1023         } else {
1024                 dev_info(DEV, "%s effective data uuid: %016llX\n",
1025                                 text,
1026                                 (unsigned long long)mdev->ed_uuid);
1027         }
1028 }
1029
1030 void drbd_gen_and_send_sync_uuid(struct drbd_conf *mdev)
1031 {
1032         struct drbd_socket *sock;
1033         struct p_rs_uuid *p;
1034         u64 uuid;
1035
1036         D_ASSERT(mdev->state.disk == D_UP_TO_DATE);
1037
1038         uuid = mdev->ldev->md.uuid[UI_BITMAP] + UUID_NEW_BM_OFFSET;
1039         drbd_uuid_set(mdev, UI_BITMAP, uuid);
1040         drbd_print_uuids(mdev, "updated sync UUID");
1041         drbd_md_sync(mdev);
1042
1043         sock = &mdev->tconn->data;
1044         p = drbd_prepare_command(mdev, sock);
1045         if (p) {
1046                 p->uuid = cpu_to_be64(uuid);
1047                 drbd_send_command(mdev, sock, P_SYNC_UUID, sizeof(*p), NULL, 0);
1048         }
1049 }
1050
1051 int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags flags)
1052 {
1053         struct drbd_socket *sock;
1054         struct p_sizes *p;
1055         sector_t d_size, u_size;
1056         int q_order_type, max_bio_size;
1057
1058         if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
1059                 D_ASSERT(mdev->ldev->backing_bdev);
1060                 d_size = drbd_get_max_capacity(mdev->ldev);
1061                 rcu_read_lock();
1062                 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
1063                 rcu_read_unlock();
1064                 q_order_type = drbd_queue_order_type(mdev);
1065                 max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1066                 max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE);
1067                 put_ldev(mdev);
1068         } else {
1069                 d_size = 0;
1070                 u_size = 0;
1071                 q_order_type = QUEUE_ORDERED_NONE;
1072                 max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
1073         }
1074
1075         sock = &mdev->tconn->data;
1076         p = drbd_prepare_command(mdev, sock);
1077         if (!p)
1078                 return -EIO;
1079         p->d_size = cpu_to_be64(d_size);
1080         p->u_size = cpu_to_be64(u_size);
1081         p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(mdev->this_bdev));
1082         p->max_bio_size = cpu_to_be32(max_bio_size);
1083         p->queue_order_type = cpu_to_be16(q_order_type);
1084         p->dds_flags = cpu_to_be16(flags);
1085         return drbd_send_command(mdev, sock, P_SIZES, sizeof(*p), NULL, 0);
1086 }
1087
1088 /**
1089  * drbd_send_state() - Sends the drbd state to the peer
1090  * @mdev:       DRBD device.
1091  */
1092 int drbd_send_state(struct drbd_conf *mdev)
1093 {
1094         struct drbd_socket *sock;
1095         struct p_state *p;
1096
1097         sock = &mdev->tconn->data;
1098         p = drbd_prepare_command(mdev, sock);
1099         if (!p)
1100                 return -EIO;
1101         p->state = cpu_to_be32(mdev->state.i); /* Within the send mutex */
1102         return drbd_send_command(mdev, sock, P_STATE, sizeof(*p), NULL, 0);
1103 }
1104
1105 int drbd_send_state_req(struct drbd_conf *mdev, union drbd_state mask, union drbd_state val)
1106 {
1107         struct drbd_socket *sock;
1108         struct p_req_state *p;
1109
1110         sock = &mdev->tconn->data;
1111         p = drbd_prepare_command(mdev, sock);
1112         if (!p)
1113                 return -EIO;
1114         p->mask = cpu_to_be32(mask.i);
1115         p->val = cpu_to_be32(val.i);
1116         return drbd_send_command(mdev, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
1117
1118 }
1119
1120 int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val)
1121 {
1122         enum drbd_packet cmd;
1123         struct drbd_socket *sock;
1124         struct p_req_state *p;
1125
1126         cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
1127         sock = &tconn->data;
1128         p = conn_prepare_command(tconn, sock);
1129         if (!p)
1130                 return -EIO;
1131         p->mask = cpu_to_be32(mask.i);
1132         p->val = cpu_to_be32(val.i);
1133         return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1134 }
1135
1136 void drbd_send_sr_reply(struct drbd_conf *mdev, enum drbd_state_rv retcode)
1137 {
1138         struct drbd_socket *sock;
1139         struct p_req_state_reply *p;
1140
1141         sock = &mdev->tconn->meta;
1142         p = drbd_prepare_command(mdev, sock);
1143         if (p) {
1144                 p->retcode = cpu_to_be32(retcode);
1145                 drbd_send_command(mdev, sock, P_STATE_CHG_REPLY, sizeof(*p), NULL, 0);
1146         }
1147 }
1148
1149 void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode)
1150 {
1151         struct drbd_socket *sock;
1152         struct p_req_state_reply *p;
1153         enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
1154
1155         sock = &tconn->meta;
1156         p = conn_prepare_command(tconn, sock);
1157         if (p) {
1158                 p->retcode = cpu_to_be32(retcode);
1159                 conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0);
1160         }
1161 }
1162
1163 static void dcbp_set_code(struct p_compressed_bm *p, enum drbd_bitmap_code code)
1164 {
1165         BUG_ON(code & ~0xf);
1166         p->encoding = (p->encoding & ~0xf) | code;
1167 }
1168
1169 static void dcbp_set_start(struct p_compressed_bm *p, int set)
1170 {
1171         p->encoding = (p->encoding & ~0x80) | (set ? 0x80 : 0);
1172 }
1173
1174 static void dcbp_set_pad_bits(struct p_compressed_bm *p, int n)
1175 {
1176         BUG_ON(n & ~0x7);
1177         p->encoding = (p->encoding & (~0x7 << 4)) | (n << 4);
1178 }
1179
1180 int fill_bitmap_rle_bits(struct drbd_conf *mdev,
1181                          struct p_compressed_bm *p,
1182                          unsigned int size,
1183                          struct bm_xfer_ctx *c)
1184 {
1185         struct bitstream bs;
1186         unsigned long plain_bits;
1187         unsigned long tmp;
1188         unsigned long rl;
1189         unsigned len;
1190         unsigned toggle;
1191         int bits, use_rle;
1192
1193         /* may we use this feature? */
1194         rcu_read_lock();
1195         use_rle = rcu_dereference(mdev->tconn->net_conf)->use_rle;
1196         rcu_read_unlock();
1197         if (!use_rle || mdev->tconn->agreed_pro_version < 90)
1198                 return 0;
1199
1200         if (c->bit_offset >= c->bm_bits)
1201                 return 0; /* nothing to do. */
1202
1203         /* use at most thus many bytes */
1204         bitstream_init(&bs, p->code, size, 0);
1205         memset(p->code, 0, size);
1206         /* plain bits covered in this code string */
1207         plain_bits = 0;
1208
1209         /* p->encoding & 0x80 stores whether the first run length is set.
1210          * bit offset is implicit.
1211          * start with toggle == 2 to be able to tell the first iteration */
1212         toggle = 2;
1213
1214         /* see how much plain bits we can stuff into one packet
1215          * using RLE and VLI. */
1216         do {
1217                 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(mdev, c->bit_offset)
1218                                     : _drbd_bm_find_next(mdev, c->bit_offset);
1219                 if (tmp == -1UL)
1220                         tmp = c->bm_bits;
1221                 rl = tmp - c->bit_offset;
1222
1223                 if (toggle == 2) { /* first iteration */
1224                         if (rl == 0) {
1225                                 /* the first checked bit was set,
1226                                  * store start value, */
1227                                 dcbp_set_start(p, 1);
1228                                 /* but skip encoding of zero run length */
1229                                 toggle = !toggle;
1230                                 continue;
1231                         }
1232                         dcbp_set_start(p, 0);
1233                 }
1234
1235                 /* paranoia: catch zero runlength.
1236                  * can only happen if bitmap is modified while we scan it. */
1237                 if (rl == 0) {
1238                         dev_err(DEV, "unexpected zero runlength while encoding bitmap "
1239                             "t:%u bo:%lu\n", toggle, c->bit_offset);
1240                         return -1;
1241                 }
1242
1243                 bits = vli_encode_bits(&bs, rl);
1244                 if (bits == -ENOBUFS) /* buffer full */
1245                         break;
1246                 if (bits <= 0) {
1247                         dev_err(DEV, "error while encoding bitmap: %d\n", bits);
1248                         return 0;
1249                 }
1250
1251                 toggle = !toggle;
1252                 plain_bits += rl;
1253                 c->bit_offset = tmp;
1254         } while (c->bit_offset < c->bm_bits);
1255
1256         len = bs.cur.b - p->code + !!bs.cur.bit;
1257
1258         if (plain_bits < (len << 3)) {
1259                 /* incompressible with this method.
1260                  * we need to rewind both word and bit position. */
1261                 c->bit_offset -= plain_bits;
1262                 bm_xfer_ctx_bit_to_word_offset(c);
1263                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1264                 return 0;
1265         }
1266
1267         /* RLE + VLI was able to compress it just fine.
1268          * update c->word_offset. */
1269         bm_xfer_ctx_bit_to_word_offset(c);
1270
1271         /* store pad_bits */
1272         dcbp_set_pad_bits(p, (8 - bs.cur.bit) & 0x7);
1273
1274         return len;
1275 }
1276
1277 /**
1278  * send_bitmap_rle_or_plain
1279  *
1280  * Return 0 when done, 1 when another iteration is needed, and a negative error
1281  * code upon failure.
1282  */
1283 static int
1284 send_bitmap_rle_or_plain(struct drbd_conf *mdev, struct bm_xfer_ctx *c)
1285 {
1286         struct drbd_socket *sock = &mdev->tconn->data;
1287         unsigned int header_size = drbd_header_size(mdev->tconn);
1288         struct p_compressed_bm *p = sock->sbuf + header_size;
1289         int len, err;
1290
1291         len = fill_bitmap_rle_bits(mdev, p,
1292                         DRBD_SOCKET_BUFFER_SIZE - header_size - sizeof(*p), c);
1293         if (len < 0)
1294                 return -EIO;
1295
1296         if (len) {
1297                 dcbp_set_code(p, RLE_VLI_Bits);
1298                 err = __send_command(mdev->tconn, mdev->vnr, sock,
1299                                      P_COMPRESSED_BITMAP, sizeof(*p) + len,
1300                                      NULL, 0);
1301                 c->packets[0]++;
1302                 c->bytes[0] += header_size + sizeof(*p) + len;
1303
1304                 if (c->bit_offset >= c->bm_bits)
1305                         len = 0; /* DONE */
1306         } else {
1307                 /* was not compressible.
1308                  * send a buffer full of plain text bits instead. */
1309                 unsigned int data_size;
1310                 unsigned long num_words;
1311                 unsigned long *p = sock->sbuf + header_size;
1312
1313                 data_size = DRBD_SOCKET_BUFFER_SIZE - header_size;
1314                 num_words = min_t(size_t, data_size / sizeof(*p),
1315                                   c->bm_words - c->word_offset);
1316                 len = num_words * sizeof(*p);
1317                 if (len)
1318                         drbd_bm_get_lel(mdev, c->word_offset, num_words, p);
1319                 err = __send_command(mdev->tconn, mdev->vnr, sock, P_BITMAP, len, NULL, 0);
1320                 c->word_offset += num_words;
1321                 c->bit_offset = c->word_offset * BITS_PER_LONG;
1322
1323                 c->packets[1]++;
1324                 c->bytes[1] += header_size + len;
1325
1326                 if (c->bit_offset > c->bm_bits)
1327                         c->bit_offset = c->bm_bits;
1328         }
1329         if (!err) {
1330                 if (len == 0) {
1331                         INFO_bm_xfer_stats(mdev, "send", c);
1332                         return 0;
1333                 } else
1334                         return 1;
1335         }
1336         return -EIO;
1337 }
1338
1339 /* See the comment at receive_bitmap() */
1340 static int _drbd_send_bitmap(struct drbd_conf *mdev)
1341 {
1342         struct bm_xfer_ctx c;
1343         int err;
1344
1345         if (!expect(mdev->bitmap))
1346                 return false;
1347
1348         if (get_ldev(mdev)) {
1349                 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1350                         dev_info(DEV, "Writing the whole bitmap, MDF_FullSync was set.\n");
1351                         drbd_bm_set_all(mdev);
1352                         if (drbd_bm_write(mdev)) {
1353                                 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1354                                  * but otherwise process as per normal - need to tell other
1355                                  * side that a full resync is required! */
1356                                 dev_err(DEV, "Failed to write bitmap to disk!\n");
1357                         } else {
1358                                 drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
1359                                 drbd_md_sync(mdev);
1360                         }
1361                 }
1362                 put_ldev(mdev);
1363         }
1364
1365         c = (struct bm_xfer_ctx) {
1366                 .bm_bits = drbd_bm_bits(mdev),
1367                 .bm_words = drbd_bm_words(mdev),
1368         };
1369
1370         do {
1371                 err = send_bitmap_rle_or_plain(mdev, &c);
1372         } while (err > 0);
1373
1374         return err == 0;
1375 }
1376
1377 int drbd_send_bitmap(struct drbd_conf *mdev)
1378 {
1379         struct drbd_socket *sock = &mdev->tconn->data;
1380         int err = -1;
1381
1382         mutex_lock(&sock->mutex);
1383         if (sock->socket)
1384                 err = !_drbd_send_bitmap(mdev);
1385         mutex_unlock(&sock->mutex);
1386         return err;
1387 }
1388
1389 void drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr, u32 set_size)
1390 {
1391         struct drbd_socket *sock;
1392         struct p_barrier_ack *p;
1393
1394         if (mdev->state.conn < C_CONNECTED)
1395                 return;
1396
1397         sock = &mdev->tconn->meta;
1398         p = drbd_prepare_command(mdev, sock);
1399         if (!p)
1400                 return;
1401         p->barrier = barrier_nr;
1402         p->set_size = cpu_to_be32(set_size);
1403         drbd_send_command(mdev, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
1404 }
1405
1406 /**
1407  * _drbd_send_ack() - Sends an ack packet
1408  * @mdev:       DRBD device.
1409  * @cmd:        Packet command code.
1410  * @sector:     sector, needs to be in big endian byte order
1411  * @blksize:    size in byte, needs to be in big endian byte order
1412  * @block_id:   Id, big endian byte order
1413  */
1414 static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1415                           u64 sector, u32 blksize, u64 block_id)
1416 {
1417         struct drbd_socket *sock;
1418         struct p_block_ack *p;
1419
1420         if (mdev->state.conn < C_CONNECTED)
1421                 return -EIO;
1422
1423         sock = &mdev->tconn->meta;
1424         p = drbd_prepare_command(mdev, sock);
1425         if (!p)
1426                 return -EIO;
1427         p->sector = sector;
1428         p->block_id = block_id;
1429         p->blksize = blksize;
1430         p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
1431         return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
1432 }
1433
1434 /* dp->sector and dp->block_id already/still in network byte order,
1435  * data_size is payload size according to dp->head,
1436  * and may need to be corrected for digest size. */
1437 void drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
1438                       struct p_data *dp, int data_size)
1439 {
1440         if (mdev->tconn->peer_integrity_tfm)
1441                 data_size -= crypto_hash_digestsize(mdev->tconn->peer_integrity_tfm);
1442         _drbd_send_ack(mdev, cmd, dp->sector, cpu_to_be32(data_size),
1443                        dp->block_id);
1444 }
1445
1446 void drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
1447                       struct p_block_req *rp)
1448 {
1449         _drbd_send_ack(mdev, cmd, rp->sector, rp->blksize, rp->block_id);
1450 }
1451
1452 /**
1453  * drbd_send_ack() - Sends an ack packet
1454  * @mdev:       DRBD device
1455  * @cmd:        packet command code
1456  * @peer_req:   peer request
1457  */
1458 int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
1459                   struct drbd_peer_request *peer_req)
1460 {
1461         return _drbd_send_ack(mdev, cmd,
1462                               cpu_to_be64(peer_req->i.sector),
1463                               cpu_to_be32(peer_req->i.size),
1464                               peer_req->block_id);
1465 }
1466
1467 /* This function misuses the block_id field to signal if the blocks
1468  * are is sync or not. */
1469 int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
1470                      sector_t sector, int blksize, u64 block_id)
1471 {
1472         return _drbd_send_ack(mdev, cmd,
1473                               cpu_to_be64(sector),
1474                               cpu_to_be32(blksize),
1475                               cpu_to_be64(block_id));
1476 }
1477
1478 int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
1479                        sector_t sector, int size, u64 block_id)
1480 {
1481         struct drbd_socket *sock;
1482         struct p_block_req *p;
1483
1484         sock = &mdev->tconn->data;
1485         p = drbd_prepare_command(mdev, sock);
1486         if (!p)
1487                 return -EIO;
1488         p->sector = cpu_to_be64(sector);
1489         p->block_id = block_id;
1490         p->blksize = cpu_to_be32(size);
1491         return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
1492 }
1493
1494 int drbd_send_drequest_csum(struct drbd_conf *mdev, sector_t sector, int size,
1495                             void *digest, int digest_size, enum drbd_packet cmd)
1496 {
1497         struct drbd_socket *sock;
1498         struct p_block_req *p;
1499
1500         /* FIXME: Put the digest into the preallocated socket buffer.  */
1501
1502         sock = &mdev->tconn->data;
1503         p = drbd_prepare_command(mdev, sock);
1504         if (!p)
1505                 return -EIO;
1506         p->sector = cpu_to_be64(sector);
1507         p->block_id = ID_SYNCER /* unused */;
1508         p->blksize = cpu_to_be32(size);
1509         return drbd_send_command(mdev, sock, cmd, sizeof(*p),
1510                                  digest, digest_size);
1511 }
1512
1513 int drbd_send_ov_request(struct drbd_conf *mdev, sector_t sector, int size)
1514 {
1515         struct drbd_socket *sock;
1516         struct p_block_req *p;
1517
1518         sock = &mdev->tconn->data;
1519         p = drbd_prepare_command(mdev, sock);
1520         if (!p)
1521                 return -EIO;
1522         p->sector = cpu_to_be64(sector);
1523         p->block_id = ID_SYNCER /* unused */;
1524         p->blksize = cpu_to_be32(size);
1525         return drbd_send_command(mdev, sock, P_OV_REQUEST, sizeof(*p), NULL, 0);
1526 }
1527
1528 /* called on sndtimeo
1529  * returns false if we should retry,
1530  * true if we think connection is dead
1531  */
1532 static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock)
1533 {
1534         int drop_it;
1535         /* long elapsed = (long)(jiffies - mdev->last_received); */
1536
1537         drop_it =   tconn->meta.socket == sock
1538                 || !tconn->asender.task
1539                 || get_t_state(&tconn->asender) != RUNNING
1540                 || tconn->cstate < C_WF_REPORT_PARAMS;
1541
1542         if (drop_it)
1543                 return true;
1544
1545         drop_it = !--tconn->ko_count;
1546         if (!drop_it) {
1547                 conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1548                          current->comm, current->pid, tconn->ko_count);
1549                 request_ping(tconn);
1550         }
1551
1552         return drop_it; /* && (mdev->state == R_PRIMARY) */;
1553 }
1554
1555 static void drbd_update_congested(struct drbd_tconn *tconn)
1556 {
1557         struct sock *sk = tconn->data.socket->sk;
1558         if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
1559                 set_bit(NET_CONGESTED, &tconn->flags);
1560 }
1561
1562 /* The idea of sendpage seems to be to put some kind of reference
1563  * to the page into the skb, and to hand it over to the NIC. In
1564  * this process get_page() gets called.
1565  *
1566  * As soon as the page was really sent over the network put_page()
1567  * gets called by some part of the network layer. [ NIC driver? ]
1568  *
1569  * [ get_page() / put_page() increment/decrement the count. If count
1570  *   reaches 0 the page will be freed. ]
1571  *
1572  * This works nicely with pages from FSs.
1573  * But this means that in protocol A we might signal IO completion too early!
1574  *
1575  * In order not to corrupt data during a resync we must make sure
1576  * that we do not reuse our own buffer pages (EEs) to early, therefore
1577  * we have the net_ee list.
1578  *
1579  * XFS seems to have problems, still, it submits pages with page_count == 0!
1580  * As a workaround, we disable sendpage on pages
1581  * with page_count == 0 or PageSlab.
1582  */
1583 static int _drbd_no_send_page(struct drbd_conf *mdev, struct page *page,
1584                               int offset, size_t size, unsigned msg_flags)
1585 {
1586         struct socket *socket;
1587         void *addr;
1588         int err;
1589
1590         socket = mdev->tconn->data.socket;
1591         addr = kmap(page) + offset;
1592         err = drbd_send_all(mdev->tconn, socket, addr, size, msg_flags);
1593         kunmap(page);
1594         if (!err)
1595                 mdev->send_cnt += size >> 9;
1596         return err;
1597 }
1598
1599 static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1600                     int offset, size_t size, unsigned msg_flags)
1601 {
1602         struct socket *socket = mdev->tconn->data.socket;
1603         mm_segment_t oldfs = get_fs();
1604         int len = size;
1605         int err = -EIO;
1606
1607         /* e.g. XFS meta- & log-data is in slab pages, which have a
1608          * page_count of 0 and/or have PageSlab() set.
1609          * we cannot use send_page for those, as that does get_page();
1610          * put_page(); and would cause either a VM_BUG directly, or
1611          * __page_cache_release a page that would actually still be referenced
1612          * by someone, leading to some obscure delayed Oops somewhere else. */
1613         if (disable_sendpage || (page_count(page) < 1) || PageSlab(page))
1614                 return _drbd_no_send_page(mdev, page, offset, size, msg_flags);
1615
1616         msg_flags |= MSG_NOSIGNAL;
1617         drbd_update_congested(mdev->tconn);
1618         set_fs(KERNEL_DS);
1619         do {
1620                 int sent;
1621
1622                 sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
1623                 if (sent <= 0) {
1624                         if (sent == -EAGAIN) {
1625                                 if (we_should_drop_the_connection(mdev->tconn, socket))
1626                                         break;
1627                                 continue;
1628                         }
1629                         dev_warn(DEV, "%s: size=%d len=%d sent=%d\n",
1630                              __func__, (int)size, len, sent);
1631                         if (sent < 0)
1632                                 err = sent;
1633                         break;
1634                 }
1635                 len    -= sent;
1636                 offset += sent;
1637         } while (len > 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
1638         set_fs(oldfs);
1639         clear_bit(NET_CONGESTED, &mdev->tconn->flags);
1640
1641         if (len == 0) {
1642                 err = 0;
1643                 mdev->send_cnt += size >> 9;
1644         }
1645         return err;
1646 }
1647
1648 static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1649 {
1650         struct bio_vec *bvec;
1651         int i;
1652         /* hint all but last page with MSG_MORE */
1653         __bio_for_each_segment(bvec, bio, i, 0) {
1654                 int err;
1655
1656                 err = _drbd_no_send_page(mdev, bvec->bv_page,
1657                                          bvec->bv_offset, bvec->bv_len,
1658                                          i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1659                 if (err)
1660                         return err;
1661         }
1662         return 0;
1663 }
1664
1665 static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1666 {
1667         struct bio_vec *bvec;
1668         int i;
1669         /* hint all but last page with MSG_MORE */
1670         __bio_for_each_segment(bvec, bio, i, 0) {
1671                 int err;
1672
1673                 err = _drbd_send_page(mdev, bvec->bv_page,
1674                                       bvec->bv_offset, bvec->bv_len,
1675                                       i == bio->bi_vcnt - 1 ? 0 : MSG_MORE);
1676                 if (err)
1677                         return err;
1678         }
1679         return 0;
1680 }
1681
1682 static int _drbd_send_zc_ee(struct drbd_conf *mdev,
1683                             struct drbd_peer_request *peer_req)
1684 {
1685         struct page *page = peer_req->pages;
1686         unsigned len = peer_req->i.size;
1687         int err;
1688
1689         /* hint all but last page with MSG_MORE */
1690         page_chain_for_each(page) {
1691                 unsigned l = min_t(unsigned, len, PAGE_SIZE);
1692
1693                 err = _drbd_send_page(mdev, page, 0, l,
1694                                       page_chain_next(page) ? MSG_MORE : 0);
1695                 if (err)
1696                         return err;
1697                 len -= l;
1698         }
1699         return 0;
1700 }
1701
1702 static u32 bio_flags_to_wire(struct drbd_conf *mdev, unsigned long bi_rw)
1703 {
1704         if (mdev->tconn->agreed_pro_version >= 95)
1705                 return  (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
1706                         (bi_rw & REQ_FUA ? DP_FUA : 0) |
1707                         (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
1708                         (bi_rw & REQ_DISCARD ? DP_DISCARD : 0);
1709         else
1710                 return bi_rw & REQ_SYNC ? DP_RW_SYNC : 0;
1711 }
1712
1713 /* Used to send write requests
1714  * R_PRIMARY -> Peer    (P_DATA)
1715  */
1716 int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
1717 {
1718         struct drbd_socket *sock;
1719         struct p_data *p;
1720         unsigned int dp_flags = 0;
1721         int dgs;
1722         int err;
1723
1724         dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
1725                 crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
1726
1727         sock = &mdev->tconn->data;
1728         p = drbd_prepare_command(mdev, sock);
1729         if (!p)
1730                 return -EIO;
1731         p->sector = cpu_to_be64(req->i.sector);
1732         p->block_id = (unsigned long)req;
1733         p->seq_num = cpu_to_be32(req->seq_num = atomic_inc_return(&mdev->packet_seq));
1734         dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
1735         if (mdev->state.conn >= C_SYNC_SOURCE &&
1736             mdev->state.conn <= C_PAUSED_SYNC_T)
1737                 dp_flags |= DP_MAY_SET_IN_SYNC;
1738         if (mdev->tconn->agreed_pro_version >= 100) {
1739                 if (req->rq_state & RQ_EXP_RECEIVE_ACK)
1740                         dp_flags |= DP_SEND_RECEIVE_ACK;
1741                 if (req->rq_state & RQ_EXP_WRITE_ACK)
1742                         dp_flags |= DP_SEND_WRITE_ACK;
1743         }
1744         p->dp_flags = cpu_to_be32(dp_flags);
1745         if (dgs)
1746                 drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, p + 1);
1747         err = __send_command(mdev->tconn, mdev->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
1748         if (!err) {
1749                 /* For protocol A, we have to memcpy the payload into
1750                  * socket buffers, as we may complete right away
1751                  * as soon as we handed it over to tcp, at which point the data
1752                  * pages may become invalid.
1753                  *
1754                  * For data-integrity enabled, we copy it as well, so we can be
1755                  * sure that even if the bio pages may still be modified, it
1756                  * won't change the data on the wire, thus if the digest checks
1757                  * out ok after sending on this side, but does not fit on the
1758                  * receiving side, we sure have detected corruption elsewhere.
1759                  */
1760                 if (!(req->rq_state & (RQ_EXP_RECEIVE_ACK | RQ_EXP_WRITE_ACK)) || dgs)
1761                         err = _drbd_send_bio(mdev, req->master_bio);
1762                 else
1763                         err = _drbd_send_zc_bio(mdev, req->master_bio);
1764
1765                 /* double check digest, sometimes buffers have been modified in flight. */
1766                 if (dgs > 0 && dgs <= 64) {
1767                         /* 64 byte, 512 bit, is the largest digest size
1768                          * currently supported in kernel crypto. */
1769                         unsigned char digest[64];
1770                         drbd_csum_bio(mdev, mdev->tconn->integrity_tfm, req->master_bio, digest);
1771                         if (memcmp(p + 1, digest, dgs)) {
1772                                 dev_warn(DEV,
1773                                         "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1774                                         (unsigned long long)req->i.sector, req->i.size);
1775                         }
1776                 } /* else if (dgs > 64) {
1777                      ... Be noisy about digest too large ...
1778                 } */
1779         }
1780         mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1781
1782         return err;
1783 }
1784
1785 /* answer packet, used to send data back for read requests:
1786  *  Peer       -> (diskless) R_PRIMARY   (P_DATA_REPLY)
1787  *  C_SYNC_SOURCE -> C_SYNC_TARGET         (P_RS_DATA_REPLY)
1788  */
1789 int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
1790                     struct drbd_peer_request *peer_req)
1791 {
1792         struct drbd_socket *sock;
1793         struct p_data *p;
1794         int err;
1795         int dgs;
1796
1797         dgs = (mdev->tconn->agreed_pro_version >= 87 && mdev->tconn->integrity_tfm) ?
1798                 crypto_hash_digestsize(mdev->tconn->integrity_tfm) : 0;
1799
1800         sock = &mdev->tconn->data;
1801         p = drbd_prepare_command(mdev, sock);
1802         if (!p)
1803                 return -EIO;
1804         p->sector = cpu_to_be64(peer_req->i.sector);
1805         p->block_id = peer_req->block_id;
1806         p->seq_num = 0;  /* unused */
1807         if (dgs)
1808                 drbd_csum_ee(mdev, mdev->tconn->integrity_tfm, peer_req, p + 1);
1809         err = __send_command(mdev->tconn, mdev->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
1810         if (!err)
1811                 err = _drbd_send_zc_ee(mdev, peer_req);
1812         mutex_unlock(&sock->mutex);  /* locked by drbd_prepare_command() */
1813
1814         return err;
1815 }
1816
1817 int drbd_send_out_of_sync(struct drbd_conf *mdev, struct drbd_request *req)
1818 {
1819         struct drbd_socket *sock;
1820         struct p_block_desc *p;
1821
1822         sock = &mdev->tconn->data;
1823         p = drbd_prepare_command(mdev, sock);
1824         if (!p)
1825                 return -EIO;
1826         p->sector = cpu_to_be64(req->i.sector);
1827         p->blksize = cpu_to_be32(req->i.size);
1828         return drbd_send_command(mdev, sock, P_OUT_OF_SYNC, sizeof(*p), NULL, 0);
1829 }
1830
1831 /*
1832   drbd_send distinguishes two cases:
1833
1834   Packets sent via the data socket "sock"
1835   and packets sent via the meta data socket "msock"
1836
1837                     sock                      msock
1838   -----------------+-------------------------+------------------------------
1839   timeout           conf.timeout / 2          conf.timeout / 2
1840   timeout action    send a ping via msock     Abort communication
1841                                               and close all sockets
1842 */
1843
1844 /*
1845  * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1846  */
1847 int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
1848               void *buf, size_t size, unsigned msg_flags)
1849 {
1850         struct kvec iov;
1851         struct msghdr msg;
1852         int rv, sent = 0;
1853
1854         if (!sock)
1855                 return -EBADR;
1856
1857         /* THINK  if (signal_pending) return ... ? */
1858
1859         iov.iov_base = buf;
1860         iov.iov_len  = size;
1861
1862         msg.msg_name       = NULL;
1863         msg.msg_namelen    = 0;
1864         msg.msg_control    = NULL;
1865         msg.msg_controllen = 0;
1866         msg.msg_flags      = msg_flags | MSG_NOSIGNAL;
1867
1868         if (sock == tconn->data.socket) {
1869                 rcu_read_lock();
1870                 tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count;
1871                 rcu_read_unlock();
1872                 drbd_update_congested(tconn);
1873         }
1874         do {
1875                 /* STRANGE
1876                  * tcp_sendmsg does _not_ use its size parameter at all ?
1877                  *
1878                  * -EAGAIN on timeout, -EINTR on signal.
1879                  */
1880 /* THINK
1881  * do we need to block DRBD_SIG if sock == &meta.socket ??
1882  * otherwise wake_asender() might interrupt some send_*Ack !
1883  */
1884                 rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
1885                 if (rv == -EAGAIN) {
1886                         if (we_should_drop_the_connection(tconn, sock))
1887                                 break;
1888                         else
1889                                 continue;
1890                 }
1891                 if (rv == -EINTR) {
1892                         flush_signals(current);
1893                         rv = 0;
1894                 }
1895                 if (rv < 0)
1896                         break;
1897                 sent += rv;
1898                 iov.iov_base += rv;
1899                 iov.iov_len  -= rv;
1900         } while (sent < size);
1901
1902         if (sock == tconn->data.socket)
1903                 clear_bit(NET_CONGESTED, &tconn->flags);
1904
1905         if (rv <= 0) {
1906                 if (rv != -EAGAIN) {
1907                         conn_err(tconn, "%s_sendmsg returned %d\n",
1908                                  sock == tconn->meta.socket ? "msock" : "sock",
1909                                  rv);
1910                         conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD);
1911                 } else
1912                         conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD);
1913         }
1914
1915         return sent;
1916 }
1917
1918 /**
1919  * drbd_send_all  -  Send an entire buffer
1920  *
1921  * Returns 0 upon success and a negative error value otherwise.
1922  */
1923 int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer,
1924                   size_t size, unsigned msg_flags)
1925 {
1926         int err;
1927
1928         err = drbd_send(tconn, sock, buffer, size, msg_flags);
1929         if (err < 0)
1930                 return err;
1931         if (err != size)
1932                 return -EIO;
1933         return 0;
1934 }
1935
1936 static int drbd_open(struct block_device *bdev, fmode_t mode)
1937 {
1938         struct drbd_conf *mdev = bdev->bd_disk->private_data;
1939         unsigned long flags;
1940         int rv = 0;
1941
1942         mutex_lock(&drbd_main_mutex);
1943         spin_lock_irqsave(&mdev->tconn->req_lock, flags);
1944         /* to have a stable mdev->state.role
1945          * and no race with updating open_cnt */
1946
1947         if (mdev->state.role != R_PRIMARY) {
1948                 if (mode & FMODE_WRITE)
1949                         rv = -EROFS;
1950                 else if (!allow_oos)
1951                         rv = -EMEDIUMTYPE;
1952         }
1953
1954         if (!rv)
1955                 mdev->open_cnt++;
1956         spin_unlock_irqrestore(&mdev->tconn->req_lock, flags);
1957         mutex_unlock(&drbd_main_mutex);
1958
1959         return rv;
1960 }
1961
1962 static int drbd_release(struct gendisk *gd, fmode_t mode)
1963 {
1964         struct drbd_conf *mdev = gd->private_data;
1965         mutex_lock(&drbd_main_mutex);
1966         mdev->open_cnt--;
1967         mutex_unlock(&drbd_main_mutex);
1968         return 0;
1969 }
1970
1971 static void drbd_set_defaults(struct drbd_conf *mdev)
1972 {
1973         /* Beware! The actual layout differs
1974          * between big endian and little endian */
1975         mdev->state = (union drbd_dev_state) {
1976                 { .role = R_SECONDARY,
1977                   .peer = R_UNKNOWN,
1978                   .conn = C_STANDALONE,
1979                   .disk = D_DISKLESS,
1980                   .pdsk = D_UNKNOWN,
1981                 } };
1982 }
1983
1984 void drbd_init_set_defaults(struct drbd_conf *mdev)
1985 {
1986         /* the memset(,0,) did most of this.
1987          * note: only assignments, no allocation in here */
1988
1989         drbd_set_defaults(mdev);
1990
1991         atomic_set(&mdev->ap_bio_cnt, 0);
1992         atomic_set(&mdev->ap_pending_cnt, 0);
1993         atomic_set(&mdev->rs_pending_cnt, 0);
1994         atomic_set(&mdev->unacked_cnt, 0);
1995         atomic_set(&mdev->local_cnt, 0);
1996         atomic_set(&mdev->pp_in_use_by_net, 0);
1997         atomic_set(&mdev->rs_sect_in, 0);
1998         atomic_set(&mdev->rs_sect_ev, 0);
1999         atomic_set(&mdev->ap_in_flight, 0);
2000
2001         mutex_init(&mdev->md_io_mutex);
2002         mutex_init(&mdev->own_state_mutex);
2003         mdev->state_mutex = &mdev->own_state_mutex;
2004
2005         spin_lock_init(&mdev->al_lock);
2006         spin_lock_init(&mdev->peer_seq_lock);
2007         spin_lock_init(&mdev->epoch_lock);
2008
2009         INIT_LIST_HEAD(&mdev->active_ee);
2010         INIT_LIST_HEAD(&mdev->sync_ee);
2011         INIT_LIST_HEAD(&mdev->done_ee);
2012         INIT_LIST_HEAD(&mdev->read_ee);
2013         INIT_LIST_HEAD(&mdev->net_ee);
2014         INIT_LIST_HEAD(&mdev->resync_reads);
2015         INIT_LIST_HEAD(&mdev->resync_work.list);
2016         INIT_LIST_HEAD(&mdev->unplug_work.list);
2017         INIT_LIST_HEAD(&mdev->go_diskless.list);
2018         INIT_LIST_HEAD(&mdev->md_sync_work.list);
2019         INIT_LIST_HEAD(&mdev->start_resync_work.list);
2020         INIT_LIST_HEAD(&mdev->bm_io_work.w.list);
2021
2022         mdev->resync_work.cb  = w_resync_timer;
2023         mdev->unplug_work.cb  = w_send_write_hint;
2024         mdev->go_diskless.cb  = w_go_diskless;
2025         mdev->md_sync_work.cb = w_md_sync;
2026         mdev->bm_io_work.w.cb = w_bitmap_io;
2027         mdev->start_resync_work.cb = w_start_resync;
2028
2029         mdev->resync_work.mdev  = mdev;
2030         mdev->unplug_work.mdev  = mdev;
2031         mdev->go_diskless.mdev  = mdev;
2032         mdev->md_sync_work.mdev = mdev;
2033         mdev->bm_io_work.w.mdev = mdev;
2034         mdev->start_resync_work.mdev = mdev;
2035
2036         init_timer(&mdev->resync_timer);
2037         init_timer(&mdev->md_sync_timer);
2038         init_timer(&mdev->start_resync_timer);
2039         init_timer(&mdev->request_timer);
2040         mdev->resync_timer.function = resync_timer_fn;
2041         mdev->resync_timer.data = (unsigned long) mdev;
2042         mdev->md_sync_timer.function = md_sync_timer_fn;
2043         mdev->md_sync_timer.data = (unsigned long) mdev;
2044         mdev->start_resync_timer.function = start_resync_timer_fn;
2045         mdev->start_resync_timer.data = (unsigned long) mdev;
2046         mdev->request_timer.function = request_timer_fn;
2047         mdev->request_timer.data = (unsigned long) mdev;
2048
2049         init_waitqueue_head(&mdev->misc_wait);
2050         init_waitqueue_head(&mdev->state_wait);
2051         init_waitqueue_head(&mdev->ee_wait);
2052         init_waitqueue_head(&mdev->al_wait);
2053         init_waitqueue_head(&mdev->seq_wait);
2054
2055         mdev->write_ordering = WO_bdev_flush;
2056         mdev->resync_wenr = LC_FREE;
2057         mdev->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2058         mdev->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE;
2059 }
2060
2061 void drbd_mdev_cleanup(struct drbd_conf *mdev)
2062 {
2063         int i;
2064         if (mdev->tconn->receiver.t_state != NONE)
2065                 dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2066                                 mdev->tconn->receiver.t_state);
2067
2068         /* no need to lock it, I'm the only thread alive */
2069         if (atomic_read(&mdev->current_epoch->epoch_size) !=  0)
2070                 dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
2071         mdev->al_writ_cnt  =
2072         mdev->bm_writ_cnt  =
2073         mdev->read_cnt     =
2074         mdev->recv_cnt     =
2075         mdev->send_cnt     =
2076         mdev->writ_cnt     =
2077         mdev->p_size       =
2078         mdev->rs_start     =
2079         mdev->rs_total     =
2080         mdev->rs_failed    = 0;
2081         mdev->rs_last_events = 0;
2082         mdev->rs_last_sect_ev = 0;
2083         for (i = 0; i < DRBD_SYNC_MARKS; i++) {
2084                 mdev->rs_mark_left[i] = 0;
2085                 mdev->rs_mark_time[i] = 0;
2086         }
2087         D_ASSERT(mdev->tconn->net_conf == NULL);
2088
2089         drbd_set_my_capacity(mdev, 0);
2090         if (mdev->bitmap) {
2091                 /* maybe never allocated. */
2092                 drbd_bm_resize(mdev, 0, 1);
2093                 drbd_bm_cleanup(mdev);
2094         }
2095
2096         drbd_free_bc(mdev->ldev);
2097         mdev->ldev = NULL;
2098
2099         clear_bit(AL_SUSPENDED, &mdev->flags);
2100
2101         D_ASSERT(list_empty(&mdev->active_ee));
2102         D_ASSERT(list_empty(&mdev->sync_ee));
2103         D_ASSERT(list_empty(&mdev->done_ee));
2104         D_ASSERT(list_empty(&mdev->read_ee));
2105         D_ASSERT(list_empty(&mdev->net_ee));
2106         D_ASSERT(list_empty(&mdev->resync_reads));
2107         D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2108         D_ASSERT(list_empty(&mdev->tconn->meta.work.q));
2109         D_ASSERT(list_empty(&mdev->resync_work.list));
2110         D_ASSERT(list_empty(&mdev->unplug_work.list));
2111         D_ASSERT(list_empty(&mdev->go_diskless.list));
2112
2113         drbd_set_defaults(mdev);
2114 }
2115
2116
2117 static void drbd_destroy_mempools(void)
2118 {
2119         struct page *page;
2120
2121         while (drbd_pp_pool) {
2122                 page = drbd_pp_pool;
2123                 drbd_pp_pool = (struct page *)page_private(page);
2124                 __free_page(page);
2125                 drbd_pp_vacant--;
2126         }
2127
2128         /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2129
2130         if (drbd_md_io_bio_set)
2131                 bioset_free(drbd_md_io_bio_set);
2132         if (drbd_md_io_page_pool)
2133                 mempool_destroy(drbd_md_io_page_pool);
2134         if (drbd_ee_mempool)
2135                 mempool_destroy(drbd_ee_mempool);
2136         if (drbd_request_mempool)
2137                 mempool_destroy(drbd_request_mempool);
2138         if (drbd_ee_cache)
2139                 kmem_cache_destroy(drbd_ee_cache);
2140         if (drbd_request_cache)
2141                 kmem_cache_destroy(drbd_request_cache);
2142         if (drbd_bm_ext_cache)
2143                 kmem_cache_destroy(drbd_bm_ext_cache);
2144         if (drbd_al_ext_cache)
2145                 kmem_cache_destroy(drbd_al_ext_cache);
2146
2147         drbd_md_io_bio_set   = NULL;
2148         drbd_md_io_page_pool = NULL;
2149         drbd_ee_mempool      = NULL;
2150         drbd_request_mempool = NULL;
2151         drbd_ee_cache        = NULL;
2152         drbd_request_cache   = NULL;
2153         drbd_bm_ext_cache    = NULL;
2154         drbd_al_ext_cache    = NULL;
2155
2156         return;
2157 }
2158
2159 static int drbd_create_mempools(void)
2160 {
2161         struct page *page;
2162         const int number = (DRBD_MAX_BIO_SIZE/PAGE_SIZE) * minor_count;
2163         int i;
2164
2165         /* prepare our caches and mempools */
2166         drbd_request_mempool = NULL;
2167         drbd_ee_cache        = NULL;
2168         drbd_request_cache   = NULL;
2169         drbd_bm_ext_cache    = NULL;
2170         drbd_al_ext_cache    = NULL;
2171         drbd_pp_pool         = NULL;
2172         drbd_md_io_page_pool = NULL;
2173         drbd_md_io_bio_set   = NULL;
2174
2175         /* caches */
2176         drbd_request_cache = kmem_cache_create(
2177                 "drbd_req", sizeof(struct drbd_request), 0, 0, NULL);
2178         if (drbd_request_cache == NULL)
2179                 goto Enomem;
2180
2181         drbd_ee_cache = kmem_cache_create(
2182                 "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
2183         if (drbd_ee_cache == NULL)
2184                 goto Enomem;
2185
2186         drbd_bm_ext_cache = kmem_cache_create(
2187                 "drbd_bm", sizeof(struct bm_extent), 0, 0, NULL);
2188         if (drbd_bm_ext_cache == NULL)
2189                 goto Enomem;
2190
2191         drbd_al_ext_cache = kmem_cache_create(
2192                 "drbd_al", sizeof(struct lc_element), 0, 0, NULL);
2193         if (drbd_al_ext_cache == NULL)
2194                 goto Enomem;
2195
2196         /* mempools */
2197         drbd_md_io_bio_set = bioset_create(DRBD_MIN_POOL_PAGES, 0);
2198         if (drbd_md_io_bio_set == NULL)
2199                 goto Enomem;
2200
2201         drbd_md_io_page_pool = mempool_create_page_pool(DRBD_MIN_POOL_PAGES, 0);
2202         if (drbd_md_io_page_pool == NULL)
2203                 goto Enomem;
2204
2205         drbd_request_mempool = mempool_create(number,
2206                 mempool_alloc_slab, mempool_free_slab, drbd_request_cache);
2207         if (drbd_request_mempool == NULL)
2208                 goto Enomem;
2209
2210         drbd_ee_mempool = mempool_create(number,
2211                 mempool_alloc_slab, mempool_free_slab, drbd_ee_cache);
2212         if (drbd_ee_mempool == NULL)
2213                 goto Enomem;
2214
2215         /* drbd's page pool */
2216         spin_lock_init(&drbd_pp_lock);
2217
2218         for (i = 0; i < number; i++) {
2219                 page = alloc_page(GFP_HIGHUSER);
2220                 if (!page)
2221                         goto Enomem;
2222                 set_page_private(page, (unsigned long)drbd_pp_pool);
2223                 drbd_pp_pool = page;
2224         }
2225         drbd_pp_vacant = number;
2226
2227         return 0;
2228
2229 Enomem:
2230         drbd_destroy_mempools(); /* in case we allocated some */
2231         return -ENOMEM;
2232 }
2233
2234 static int drbd_notify_sys(struct notifier_block *this, unsigned long code,
2235         void *unused)
2236 {
2237         /* just so we have it.  you never know what interesting things we
2238          * might want to do here some day...
2239          */
2240
2241         return NOTIFY_DONE;
2242 }
2243
2244 static struct notifier_block drbd_notifier = {
2245         .notifier_call = drbd_notify_sys,
2246 };
2247
2248 static void drbd_release_all_peer_reqs(struct drbd_conf *mdev)
2249 {
2250         int rr;
2251
2252         rr = drbd_free_peer_reqs(mdev, &mdev->active_ee);
2253         if (rr)
2254                 dev_err(DEV, "%d EEs in active list found!\n", rr);
2255
2256         rr = drbd_free_peer_reqs(mdev, &mdev->sync_ee);
2257         if (rr)
2258                 dev_err(DEV, "%d EEs in sync list found!\n", rr);
2259
2260         rr = drbd_free_peer_reqs(mdev, &mdev->read_ee);
2261         if (rr)
2262                 dev_err(DEV, "%d EEs in read list found!\n", rr);
2263
2264         rr = drbd_free_peer_reqs(mdev, &mdev->done_ee);
2265         if (rr)
2266                 dev_err(DEV, "%d EEs in done list found!\n", rr);
2267
2268         rr = drbd_free_peer_reqs(mdev, &mdev->net_ee);
2269         if (rr)
2270                 dev_err(DEV, "%d EEs in net list found!\n", rr);
2271 }
2272
2273 /* caution. no locking. */
2274 void drbd_minor_destroy(struct kref *kref)
2275 {
2276         struct drbd_conf *mdev = container_of(kref, struct drbd_conf, kref);
2277         struct drbd_tconn *tconn = mdev->tconn;
2278
2279         /* paranoia asserts */
2280         D_ASSERT(mdev->open_cnt == 0);
2281         D_ASSERT(list_empty(&mdev->tconn->data.work.q));
2282         /* end paranoia asserts */
2283
2284         /* cleanup stuff that may have been allocated during
2285          * device (re-)configuration or state changes */
2286
2287         if (mdev->this_bdev)
2288                 bdput(mdev->this_bdev);
2289
2290         drbd_free_bc(mdev->ldev);
2291         mdev->ldev = NULL;
2292
2293         drbd_release_all_peer_reqs(mdev);
2294
2295         lc_destroy(mdev->act_log);
2296         lc_destroy(mdev->resync);
2297
2298         kfree(mdev->p_uuid);
2299         /* mdev->p_uuid = NULL; */
2300
2301         kfree(mdev->current_epoch);
2302         if (mdev->bitmap) /* should no longer be there. */
2303                 drbd_bm_cleanup(mdev);
2304         __free_page(mdev->md_io_page);
2305         put_disk(mdev->vdisk);
2306         blk_cleanup_queue(mdev->rq_queue);
2307         kfree(mdev->rs_plan_s);
2308         kfree(mdev);
2309
2310         kref_put(&tconn->kref, &conn_destroy);
2311 }
2312
2313 static void drbd_cleanup(void)
2314 {
2315         unsigned int i;
2316         struct drbd_conf *mdev;
2317         struct drbd_tconn *tconn, *tmp;
2318
2319         unregister_reboot_notifier(&drbd_notifier);
2320
2321         /* first remove proc,
2322          * drbdsetup uses it's presence to detect
2323          * whether DRBD is loaded.
2324          * If we would get stuck in proc removal,
2325          * but have netlink already deregistered,
2326          * some drbdsetup commands may wait forever
2327          * for an answer.
2328          */
2329         if (drbd_proc)
2330                 remove_proc_entry("drbd", NULL);
2331
2332         drbd_genl_unregister();
2333
2334         down_write(&drbd_cfg_rwsem);
2335         idr_for_each_entry(&minors, mdev, i) {
2336                 idr_remove(&minors, mdev_to_minor(mdev));
2337                 idr_remove(&mdev->tconn->volumes, mdev->vnr);
2338                 del_gendisk(mdev->vdisk);
2339                 synchronize_rcu();
2340                 kref_put(&mdev->kref, &drbd_minor_destroy);
2341         }
2342
2343         list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2344                 list_del_rcu(&tconn->all_tconn);
2345                 synchronize_rcu();
2346                 kref_put(&tconn->kref, &conn_destroy);
2347         }
2348         up_write(&drbd_cfg_rwsem);
2349
2350         drbd_destroy_mempools();
2351         unregister_blkdev(DRBD_MAJOR, "drbd");
2352
2353         idr_destroy(&minors);
2354
2355         printk(KERN_INFO "drbd: module cleanup done.\n");
2356 }
2357
2358 /**
2359  * drbd_congested() - Callback for pdflush
2360  * @congested_data:     User data
2361  * @bdi_bits:           Bits pdflush is currently interested in
2362  *
2363  * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
2364  */
2365 static int drbd_congested(void *congested_data, int bdi_bits)
2366 {
2367         struct drbd_conf *mdev = congested_data;
2368         struct request_queue *q;
2369         char reason = '-';
2370         int r = 0;
2371
2372         if (!may_inc_ap_bio(mdev)) {
2373                 /* DRBD has frozen IO */
2374                 r = bdi_bits;
2375                 reason = 'd';
2376                 goto out;
2377         }
2378
2379         if (get_ldev(mdev)) {
2380                 q = bdev_get_queue(mdev->ldev->backing_bdev);
2381                 r = bdi_congested(&q->backing_dev_info, bdi_bits);
2382                 put_ldev(mdev);
2383                 if (r)
2384                         reason = 'b';
2385         }
2386
2387         if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &mdev->tconn->flags)) {
2388                 r |= (1 << BDI_async_congested);
2389                 reason = reason == 'b' ? 'a' : 'n';
2390         }
2391
2392 out:
2393         mdev->congestion_reason = reason;
2394         return r;
2395 }
2396
2397 static void drbd_init_workqueue(struct drbd_work_queue* wq)
2398 {
2399         sema_init(&wq->s, 0);
2400         spin_lock_init(&wq->q_lock);
2401         INIT_LIST_HEAD(&wq->q);
2402 }
2403
2404 struct drbd_tconn *conn_get_by_name(const char *name)
2405 {
2406         struct drbd_tconn *tconn;
2407
2408         if (!name || !name[0])
2409                 return NULL;
2410
2411         down_read(&drbd_cfg_rwsem);
2412         list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) {
2413                 if (!strcmp(tconn->name, name)) {
2414                         kref_get(&tconn->kref);
2415                         goto found;
2416                 }
2417         }
2418         tconn = NULL;
2419 found:
2420         up_read(&drbd_cfg_rwsem);
2421         return tconn;
2422 }
2423
2424 static int drbd_alloc_socket(struct drbd_socket *socket)
2425 {
2426         socket->rbuf = (void *) __get_free_page(GFP_KERNEL);
2427         if (!socket->rbuf)
2428                 return -ENOMEM;
2429         socket->sbuf = (void *) __get_free_page(GFP_KERNEL);
2430         if (!socket->sbuf)
2431                 return -ENOMEM;
2432         return 0;
2433 }
2434
2435 static void drbd_free_socket(struct drbd_socket *socket)
2436 {
2437         free_page((unsigned long) socket->sbuf);
2438         free_page((unsigned long) socket->rbuf);
2439 }
2440
2441 void conn_free_crypto(struct drbd_tconn *tconn)
2442 {
2443         drbd_free_sock(tconn);
2444
2445         crypto_free_hash(tconn->csums_tfm);
2446         crypto_free_hash(tconn->verify_tfm);
2447         crypto_free_hash(tconn->cram_hmac_tfm);
2448         crypto_free_hash(tconn->integrity_tfm);
2449         crypto_free_hash(tconn->peer_integrity_tfm);
2450         kfree(tconn->int_dig_in);
2451         kfree(tconn->int_dig_vv);
2452
2453         tconn->csums_tfm = NULL;
2454         tconn->verify_tfm = NULL;
2455         tconn->cram_hmac_tfm = NULL;
2456         tconn->integrity_tfm = NULL;
2457         tconn->peer_integrity_tfm = NULL;
2458         tconn->int_dig_in = NULL;
2459         tconn->int_dig_vv = NULL;
2460 }
2461
2462 /* caller must be under genl_lock() */
2463 struct drbd_tconn *conn_create(const char *name)
2464 {
2465         struct drbd_tconn *tconn;
2466
2467         tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL);
2468         if (!tconn)
2469                 return NULL;
2470
2471         tconn->name = kstrdup(name, GFP_KERNEL);
2472         if (!tconn->name)
2473                 goto fail;
2474
2475         if (drbd_alloc_socket(&tconn->data))
2476                 goto fail;
2477         if (drbd_alloc_socket(&tconn->meta))
2478                 goto fail;
2479
2480         if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL))
2481                 goto fail;
2482
2483         if (!tl_init(tconn))
2484                 goto fail;
2485
2486         tconn->cstate = C_STANDALONE;
2487         mutex_init(&tconn->cstate_mutex);
2488         spin_lock_init(&tconn->req_lock);
2489         mutex_init(&tconn->conf_update);
2490         init_waitqueue_head(&tconn->ping_wait);
2491         idr_init(&tconn->volumes);
2492
2493         drbd_init_workqueue(&tconn->data.work);
2494         mutex_init(&tconn->data.mutex);
2495
2496         drbd_init_workqueue(&tconn->meta.work);
2497         mutex_init(&tconn->meta.mutex);
2498
2499         drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver");
2500         drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker");
2501         drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender");
2502
2503         drbd_set_res_opts_defaults(&tconn->res_opts);
2504
2505         down_write(&drbd_cfg_rwsem);
2506         kref_init(&tconn->kref);
2507         list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns);
2508         up_write(&drbd_cfg_rwsem);
2509
2510         return tconn;
2511
2512 fail:
2513         tl_cleanup(tconn);
2514         free_cpumask_var(tconn->cpu_mask);
2515         drbd_free_socket(&tconn->meta);
2516         drbd_free_socket(&tconn->data);
2517         kfree(tconn->name);
2518         kfree(tconn);
2519
2520         return NULL;
2521 }
2522
2523 void conn_destroy(struct kref *kref)
2524 {
2525         struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
2526
2527         idr_destroy(&tconn->volumes);
2528
2529         free_cpumask_var(tconn->cpu_mask);
2530         drbd_free_socket(&tconn->meta);
2531         drbd_free_socket(&tconn->data);
2532         kfree(tconn->name);
2533         kfree(tconn->int_dig_in);
2534         kfree(tconn->int_dig_vv);
2535         kfree(tconn);
2536 }
2537
2538 enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr)
2539 {
2540         struct drbd_conf *mdev;
2541         struct gendisk *disk;
2542         struct request_queue *q;
2543         int vnr_got = vnr;
2544         int minor_got = minor;
2545         enum drbd_ret_code err = ERR_NOMEM;
2546
2547         mdev = minor_to_mdev(minor);
2548         if (mdev)
2549                 return ERR_MINOR_EXISTS;
2550
2551         /* GFP_KERNEL, we are outside of all write-out paths */
2552         mdev = kzalloc(sizeof(struct drbd_conf), GFP_KERNEL);
2553         if (!mdev)
2554                 return ERR_NOMEM;
2555
2556         kref_get(&tconn->kref);
2557         mdev->tconn = tconn;
2558
2559         mdev->minor = minor;
2560         mdev->vnr = vnr;
2561
2562         drbd_init_set_defaults(mdev);
2563
2564         q = blk_alloc_queue(GFP_KERNEL);
2565         if (!q)
2566                 goto out_no_q;
2567         mdev->rq_queue = q;
2568         q->queuedata   = mdev;
2569
2570         disk = alloc_disk(1);
2571         if (!disk)
2572                 goto out_no_disk;
2573         mdev->vdisk = disk;
2574
2575         set_disk_ro(disk, true);
2576
2577         disk->queue = q;
2578         disk->major = DRBD_MAJOR;
2579         disk->first_minor = minor;
2580         disk->fops = &drbd_ops;
2581         sprintf(disk->disk_name, "drbd%d", minor);
2582         disk->private_data = mdev;
2583
2584         mdev->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor));
2585         /* we have no partitions. we contain only ourselves. */
2586         mdev->this_bdev->bd_contains = mdev->this_bdev;
2587
2588         q->backing_dev_info.congested_fn = drbd_congested;
2589         q->backing_dev_info.congested_data = mdev;
2590
2591         blk_queue_make_request(q, drbd_make_request);
2592         /* Setting the max_hw_sectors to an odd value of 8kibyte here
2593            This triggers a max_bio_size message upon first attach or connect */
2594         blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
2595         blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
2596         blk_queue_merge_bvec(q, drbd_merge_bvec);
2597         q->queue_lock = &mdev->tconn->req_lock; /* needed since we use */
2598
2599         mdev->md_io_page = alloc_page(GFP_KERNEL);
2600         if (!mdev->md_io_page)
2601                 goto out_no_io_page;
2602
2603         if (drbd_bm_init(mdev))
2604                 goto out_no_bitmap;
2605         mdev->read_requests = RB_ROOT;
2606         mdev->write_requests = RB_ROOT;
2607
2608         mdev->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
2609         if (!mdev->current_epoch)
2610                 goto out_no_epoch;
2611
2612         INIT_LIST_HEAD(&mdev->current_epoch->list);
2613         mdev->epochs = 1;
2614
2615         if (!idr_pre_get(&minors, GFP_KERNEL))
2616                 goto out_no_minor_idr;
2617         if (idr_get_new_above(&minors, mdev, minor, &minor_got))
2618                 goto out_no_minor_idr;
2619         if (minor_got != minor) {
2620                 err = ERR_MINOR_EXISTS;
2621                 drbd_msg_put_info("requested minor exists already");
2622                 goto out_idr_remove_minor;
2623         }
2624
2625         if (!idr_pre_get(&tconn->volumes, GFP_KERNEL))
2626                 goto out_idr_remove_minor;
2627         if (idr_get_new_above(&tconn->volumes, mdev, vnr, &vnr_got))
2628                 goto out_idr_remove_minor;
2629         if (vnr_got != vnr) {
2630                 err = ERR_INVALID_REQUEST;
2631                 drbd_msg_put_info("requested volume exists already");
2632                 goto out_idr_remove_vol;
2633         }
2634         add_disk(disk);
2635         kref_init(&mdev->kref); /* one ref for both idrs and the the add_disk */
2636
2637         /* inherit the connection state */
2638         mdev->state.conn = tconn->cstate;
2639         if (mdev->state.conn == C_WF_REPORT_PARAMS)
2640                 drbd_connected(vnr, mdev, tconn);
2641
2642         return NO_ERROR;
2643
2644 out_idr_remove_vol:
2645         idr_remove(&tconn->volumes, vnr_got);
2646 out_idr_remove_minor:
2647         idr_remove(&minors, minor_got);
2648         synchronize_rcu();
2649 out_no_minor_idr:
2650         kfree(mdev->current_epoch);
2651 out_no_epoch:
2652         drbd_bm_cleanup(mdev);
2653 out_no_bitmap:
2654         __free_page(mdev->md_io_page);
2655 out_no_io_page:
2656         put_disk(disk);
2657 out_no_disk:
2658         blk_cleanup_queue(q);
2659 out_no_q:
2660         kfree(mdev);
2661         kref_put(&tconn->kref, &conn_destroy);
2662         return err;
2663 }
2664
2665 int __init drbd_init(void)
2666 {
2667         int err;
2668
2669         if (minor_count < DRBD_MINOR_COUNT_MIN || minor_count > DRBD_MINOR_COUNT_MAX) {
2670                 printk(KERN_ERR
2671                        "drbd: invalid minor_count (%d)\n", minor_count);
2672 #ifdef MODULE
2673                 return -EINVAL;
2674 #else
2675                 minor_count = 8;
2676 #endif
2677         }
2678
2679         err = register_blkdev(DRBD_MAJOR, "drbd");
2680         if (err) {
2681                 printk(KERN_ERR
2682                        "drbd: unable to register block device major %d\n",
2683                        DRBD_MAJOR);
2684                 return err;
2685         }
2686
2687         err = drbd_genl_register();
2688         if (err) {
2689                 printk(KERN_ERR "drbd: unable to register generic netlink family\n");
2690                 goto fail;
2691         }
2692
2693
2694         register_reboot_notifier(&drbd_notifier);
2695
2696         /*
2697          * allocate all necessary structs
2698          */
2699         err = -ENOMEM;
2700
2701         init_waitqueue_head(&drbd_pp_wait);
2702
2703         drbd_proc = NULL; /* play safe for drbd_cleanup */
2704         idr_init(&minors);
2705
2706         err = drbd_create_mempools();
2707         if (err)
2708                 goto fail;
2709
2710         drbd_proc = proc_create_data("drbd", S_IFREG | S_IRUGO , NULL, &drbd_proc_fops, NULL);
2711         if (!drbd_proc) {
2712                 printk(KERN_ERR "drbd: unable to register proc file\n");
2713                 goto fail;
2714         }
2715
2716         rwlock_init(&global_state_lock);
2717         INIT_LIST_HEAD(&drbd_tconns);
2718
2719         printk(KERN_INFO "drbd: initialized. "
2720                "Version: " REL_VERSION " (api:%d/proto:%d-%d)\n",
2721                API_VERSION, PRO_VERSION_MIN, PRO_VERSION_MAX);
2722         printk(KERN_INFO "drbd: %s\n", drbd_buildtag());
2723         printk(KERN_INFO "drbd: registered as block device major %d\n",
2724                 DRBD_MAJOR);
2725
2726         return 0; /* Success! */
2727
2728 fail:
2729         drbd_cleanup();
2730         if (err == -ENOMEM)
2731                 /* currently always the case */
2732                 printk(KERN_ERR "drbd: ran out of memory\n");
2733         else
2734                 printk(KERN_ERR "drbd: initialization failure\n");
2735         return err;
2736 }
2737
2738 void drbd_free_bc(struct drbd_backing_dev *ldev)
2739 {
2740         if (ldev == NULL)
2741                 return;
2742
2743         blkdev_put(ldev->backing_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2744         blkdev_put(ldev->md_bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2745
2746         kfree(ldev);
2747 }
2748
2749 void drbd_free_sock(struct drbd_tconn *tconn)
2750 {
2751         if (tconn->data.socket) {
2752                 mutex_lock(&tconn->data.mutex);
2753                 kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR);
2754                 sock_release(tconn->data.socket);
2755                 tconn->data.socket = NULL;
2756                 mutex_unlock(&tconn->data.mutex);
2757         }
2758         if (tconn->meta.socket) {
2759                 mutex_lock(&tconn->meta.mutex);
2760                 kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR);
2761                 sock_release(tconn->meta.socket);
2762                 tconn->meta.socket = NULL;
2763                 mutex_unlock(&tconn->meta.mutex);
2764         }
2765 }
2766
2767 /* meta data management */
2768
2769 struct meta_data_on_disk {
2770         u64 la_size;           /* last agreed size. */
2771         u64 uuid[UI_SIZE];   /* UUIDs. */
2772         u64 device_uuid;
2773         u64 reserved_u64_1;
2774         u32 flags;             /* MDF */
2775         u32 magic;
2776         u32 md_size_sect;
2777         u32 al_offset;         /* offset to this block */
2778         u32 al_nr_extents;     /* important for restoring the AL */
2779               /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
2780         u32 bm_offset;         /* offset to the bitmap, from here */
2781         u32 bm_bytes_per_bit;  /* BM_BLOCK_SIZE */
2782         u32 la_peer_max_bio_size;   /* last peer max_bio_size */
2783         u32 reserved_u32[3];
2784
2785 } __packed;
2786
2787 /**
2788  * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
2789  * @mdev:       DRBD device.
2790  */
2791 void drbd_md_sync(struct drbd_conf *mdev)
2792 {
2793         struct meta_data_on_disk *buffer;
2794         sector_t sector;
2795         int i;
2796
2797         del_timer(&mdev->md_sync_timer);
2798         /* timer may be rearmed by drbd_md_mark_dirty() now. */
2799         if (!test_and_clear_bit(MD_DIRTY, &mdev->flags))
2800                 return;
2801
2802         /* We use here D_FAILED and not D_ATTACHING because we try to write
2803          * metadata even if we detach due to a disk failure! */
2804         if (!get_ldev_if_state(mdev, D_FAILED))
2805                 return;
2806
2807         mutex_lock(&mdev->md_io_mutex);
2808         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2809         memset(buffer, 0, 512);
2810
2811         buffer->la_size = cpu_to_be64(drbd_get_capacity(mdev->this_bdev));
2812         for (i = UI_CURRENT; i < UI_SIZE; i++)
2813                 buffer->uuid[i] = cpu_to_be64(mdev->ldev->md.uuid[i]);
2814         buffer->flags = cpu_to_be32(mdev->ldev->md.flags);
2815         buffer->magic = cpu_to_be32(DRBD_MD_MAGIC);
2816
2817         buffer->md_size_sect  = cpu_to_be32(mdev->ldev->md.md_size_sect);
2818         buffer->al_offset     = cpu_to_be32(mdev->ldev->md.al_offset);
2819         buffer->al_nr_extents = cpu_to_be32(mdev->act_log->nr_elements);
2820         buffer->bm_bytes_per_bit = cpu_to_be32(BM_BLOCK_SIZE);
2821         buffer->device_uuid = cpu_to_be64(mdev->ldev->md.device_uuid);
2822
2823         buffer->bm_offset = cpu_to_be32(mdev->ldev->md.bm_offset);
2824         buffer->la_peer_max_bio_size = cpu_to_be32(mdev->peer_max_bio_size);
2825
2826         D_ASSERT(drbd_md_ss__(mdev, mdev->ldev) == mdev->ldev->md.md_offset);
2827         sector = mdev->ldev->md.md_offset;
2828
2829         if (drbd_md_sync_page_io(mdev, mdev->ldev, sector, WRITE)) {
2830                 /* this was a try anyways ... */
2831                 dev_err(DEV, "meta data update failed!\n");
2832                 drbd_chk_io_error(mdev, 1, true);
2833         }
2834
2835         /* Update mdev->ldev->md.la_size_sect,
2836          * since we updated it on metadata. */
2837         mdev->ldev->md.la_size_sect = drbd_get_capacity(mdev->this_bdev);
2838
2839         mutex_unlock(&mdev->md_io_mutex);
2840         put_ldev(mdev);
2841 }
2842
2843 /**
2844  * drbd_md_read() - Reads in the meta data super block
2845  * @mdev:       DRBD device.
2846  * @bdev:       Device from which the meta data should be read in.
2847  *
2848  * Return 0 (NO_ERROR) on success, and an enum drbd_ret_code in case
2849  * something goes wrong.  Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
2850  */
2851 int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
2852 {
2853         struct meta_data_on_disk *buffer;
2854         int i, rv = NO_ERROR;
2855
2856         if (!get_ldev_if_state(mdev, D_ATTACHING))
2857                 return ERR_IO_MD_DISK;
2858
2859         mutex_lock(&mdev->md_io_mutex);
2860         buffer = (struct meta_data_on_disk *)page_address(mdev->md_io_page);
2861
2862         if (drbd_md_sync_page_io(mdev, bdev, bdev->md.md_offset, READ)) {
2863                 /* NOTE: can't do normal error processing here as this is
2864                    called BEFORE disk is attached */
2865                 dev_err(DEV, "Error while reading metadata.\n");
2866                 rv = ERR_IO_MD_DISK;
2867                 goto err;
2868         }
2869
2870         if (buffer->magic != cpu_to_be32(DRBD_MD_MAGIC)) {
2871                 dev_err(DEV, "Error while reading metadata, magic not found.\n");
2872                 rv = ERR_MD_INVALID;
2873                 goto err;
2874         }
2875         if (be32_to_cpu(buffer->al_offset) != bdev->md.al_offset) {
2876                 dev_err(DEV, "unexpected al_offset: %d (expected %d)\n",
2877                     be32_to_cpu(buffer->al_offset), bdev->md.al_offset);
2878                 rv = ERR_MD_INVALID;
2879                 goto err;
2880         }
2881         if (be32_to_cpu(buffer->bm_offset) != bdev->md.bm_offset) {
2882                 dev_err(DEV, "unexpected bm_offset: %d (expected %d)\n",
2883                     be32_to_cpu(buffer->bm_offset), bdev->md.bm_offset);
2884                 rv = ERR_MD_INVALID;
2885                 goto err;
2886         }
2887         if (be32_to_cpu(buffer->md_size_sect) != bdev->md.md_size_sect) {
2888                 dev_err(DEV, "unexpected md_size: %u (expected %u)\n",
2889                     be32_to_cpu(buffer->md_size_sect), bdev->md.md_size_sect);
2890                 rv = ERR_MD_INVALID;
2891                 goto err;
2892         }
2893
2894         if (be32_to_cpu(buffer->bm_bytes_per_bit) != BM_BLOCK_SIZE) {
2895                 dev_err(DEV, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
2896                     be32_to_cpu(buffer->bm_bytes_per_bit), BM_BLOCK_SIZE);
2897                 rv = ERR_MD_INVALID;
2898                 goto err;
2899         }
2900
2901         bdev->md.la_size_sect = be64_to_cpu(buffer->la_size);
2902         for (i = UI_CURRENT; i < UI_SIZE; i++)
2903                 bdev->md.uuid[i] = be64_to_cpu(buffer->uuid[i]);
2904         bdev->md.flags = be32_to_cpu(buffer->flags);
2905         bdev->md.device_uuid = be64_to_cpu(buffer->device_uuid);
2906
2907         spin_lock_irq(&mdev->tconn->req_lock);
2908         if (mdev->state.conn < C_CONNECTED) {
2909                 int peer;
2910                 peer = be32_to_cpu(buffer->la_peer_max_bio_size);
2911                 peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE);
2912                 mdev->peer_max_bio_size = peer;
2913         }
2914         spin_unlock_irq(&mdev->tconn->req_lock);
2915
2916         mutex_lock(&mdev->tconn->conf_update);
2917         /* This blocks wants to be get removed... */
2918         bdev->disk_conf->al_extents = be32_to_cpu(buffer->al_nr_extents);
2919         if (bdev->disk_conf->al_extents < DRBD_AL_EXTENTS_MIN)
2920                 bdev->disk_conf->al_extents = DRBD_AL_EXTENTS_DEF;
2921         mutex_unlock(&mdev->tconn->conf_update);
2922
2923  err:
2924         mutex_unlock(&mdev->md_io_mutex);
2925         put_ldev(mdev);
2926
2927         return rv;
2928 }
2929
2930 /**
2931  * drbd_md_mark_dirty() - Mark meta data super block as dirty
2932  * @mdev:       DRBD device.
2933  *
2934  * Call this function if you change anything that should be written to
2935  * the meta-data super block. This function sets MD_DIRTY, and starts a
2936  * timer that ensures that within five seconds you have to call drbd_md_sync().
2937  */
2938 #ifdef DEBUG
2939 void drbd_md_mark_dirty_(struct drbd_conf *mdev, unsigned int line, const char *func)
2940 {
2941         if (!test_and_set_bit(MD_DIRTY, &mdev->flags)) {
2942                 mod_timer(&mdev->md_sync_timer, jiffies + HZ);
2943                 mdev->last_md_mark_dirty.line = line;
2944                 mdev->last_md_mark_dirty.func = func;
2945         }
2946 }
2947 #else
2948 void drbd_md_mark_dirty(struct drbd_conf *mdev)
2949 {
2950         if (!test_and_set_bit(MD_DIRTY, &mdev->flags))
2951                 mod_timer(&mdev->md_sync_timer, jiffies + 5*HZ);
2952 }
2953 #endif
2954
2955 static void drbd_uuid_move_history(struct drbd_conf *mdev) __must_hold(local)
2956 {
2957         int i;
2958
2959         for (i = UI_HISTORY_START; i < UI_HISTORY_END; i++)
2960                 mdev->ldev->md.uuid[i+1] = mdev->ldev->md.uuid[i];
2961 }
2962
2963 void _drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2964 {
2965         if (idx == UI_CURRENT) {
2966                 if (mdev->state.role == R_PRIMARY)
2967                         val |= 1;
2968                 else
2969                         val &= ~((u64)1);
2970
2971                 drbd_set_ed_uuid(mdev, val);
2972         }
2973
2974         mdev->ldev->md.uuid[idx] = val;
2975         drbd_md_mark_dirty(mdev);
2976 }
2977
2978
2979 void drbd_uuid_set(struct drbd_conf *mdev, int idx, u64 val) __must_hold(local)
2980 {
2981         if (mdev->ldev->md.uuid[idx]) {
2982                 drbd_uuid_move_history(mdev);
2983                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[idx];
2984         }
2985         _drbd_uuid_set(mdev, idx, val);
2986 }
2987
2988 /**
2989  * drbd_uuid_new_current() - Creates a new current UUID
2990  * @mdev:       DRBD device.
2991  *
2992  * Creates a new current UUID, and rotates the old current UUID into
2993  * the bitmap slot. Causes an incremental resync upon next connect.
2994  */
2995 void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
2996 {
2997         u64 val;
2998         unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
2999
3000         if (bm_uuid)
3001                 dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3002
3003         mdev->ldev->md.uuid[UI_BITMAP] = mdev->ldev->md.uuid[UI_CURRENT];
3004
3005         get_random_bytes(&val, sizeof(u64));
3006         _drbd_uuid_set(mdev, UI_CURRENT, val);
3007         drbd_print_uuids(mdev, "new current UUID");
3008         /* get it to stable storage _now_ */
3009         drbd_md_sync(mdev);
3010 }
3011
3012 void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
3013 {
3014         if (mdev->ldev->md.uuid[UI_BITMAP] == 0 && val == 0)
3015                 return;
3016
3017         if (val == 0) {
3018                 drbd_uuid_move_history(mdev);
3019                 mdev->ldev->md.uuid[UI_HISTORY_START] = mdev->ldev->md.uuid[UI_BITMAP];
3020                 mdev->ldev->md.uuid[UI_BITMAP] = 0;
3021         } else {
3022                 unsigned long long bm_uuid = mdev->ldev->md.uuid[UI_BITMAP];
3023                 if (bm_uuid)
3024                         dev_warn(DEV, "bm UUID was already set: %llX\n", bm_uuid);
3025
3026                 mdev->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1);
3027         }
3028         drbd_md_mark_dirty(mdev);
3029 }
3030
3031 /**
3032  * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3033  * @mdev:       DRBD device.
3034  *
3035  * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3036  */
3037 int drbd_bmio_set_n_write(struct drbd_conf *mdev)
3038 {
3039         int rv = -EIO;
3040
3041         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3042                 drbd_md_set_flag(mdev, MDF_FULL_SYNC);
3043                 drbd_md_sync(mdev);
3044                 drbd_bm_set_all(mdev);
3045
3046                 rv = drbd_bm_write(mdev);
3047
3048                 if (!rv) {
3049                         drbd_md_clear_flag(mdev, MDF_FULL_SYNC);
3050                         drbd_md_sync(mdev);
3051                 }
3052
3053                 put_ldev(mdev);
3054         }
3055
3056         return rv;
3057 }
3058
3059 /**
3060  * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3061  * @mdev:       DRBD device.
3062  *
3063  * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3064  */
3065 int drbd_bmio_clear_n_write(struct drbd_conf *mdev)
3066 {
3067         int rv = -EIO;
3068
3069         drbd_resume_al(mdev);
3070         if (get_ldev_if_state(mdev, D_ATTACHING)) {
3071                 drbd_bm_clear_all(mdev);
3072                 rv = drbd_bm_write(mdev);
3073                 put_ldev(mdev);
3074         }
3075
3076         return rv;
3077 }
3078
3079 static int w_bitmap_io(struct drbd_work *w, int unused)
3080 {
3081         struct bm_io_work *work = container_of(w, struct bm_io_work, w);
3082         struct drbd_conf *mdev = w->mdev;
3083         int rv = -EIO;
3084
3085         D_ASSERT(atomic_read(&mdev->ap_bio_cnt) == 0);
3086
3087         if (get_ldev(mdev)) {
3088                 drbd_bm_lock(mdev, work->why, work->flags);
3089                 rv = work->io_fn(mdev);
3090                 drbd_bm_unlock(mdev);
3091                 put_ldev(mdev);
3092         }
3093
3094         clear_bit_unlock(BITMAP_IO, &mdev->flags);
3095         wake_up(&mdev->misc_wait);
3096
3097         if (work->done)
3098                 work->done(mdev, rv);
3099
3100         clear_bit(BITMAP_IO_QUEUED, &mdev->flags);
3101         work->why = NULL;
3102         work->flags = 0;
3103
3104         return 0;
3105 }
3106
3107 void drbd_ldev_destroy(struct drbd_conf *mdev)
3108 {
3109         lc_destroy(mdev->resync);
3110         mdev->resync = NULL;
3111         lc_destroy(mdev->act_log);
3112         mdev->act_log = NULL;
3113         __no_warn(local,
3114                 drbd_free_bc(mdev->ldev);
3115                 mdev->ldev = NULL;);
3116
3117         clear_bit(GO_DISKLESS, &mdev->flags);
3118 }
3119
3120 static int w_go_diskless(struct drbd_work *w, int unused)
3121 {
3122         struct drbd_conf *mdev = w->mdev;
3123
3124         D_ASSERT(mdev->state.disk == D_FAILED);
3125         /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3126          * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3127          * the protected members anymore, though, so once put_ldev reaches zero
3128          * again, it will be safe to free them. */
3129         drbd_force_state(mdev, NS(disk, D_DISKLESS));
3130         return 0;
3131 }
3132
3133 void drbd_go_diskless(struct drbd_conf *mdev)
3134 {
3135         D_ASSERT(mdev->state.disk == D_FAILED);
3136         if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
3137                 drbd_queue_work(&mdev->tconn->data.work, &mdev->go_diskless);
3138 }
3139
3140 /**
3141  * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3142  * @mdev:       DRBD device.
3143  * @io_fn:      IO callback to be called when bitmap IO is possible
3144  * @done:       callback to be called after the bitmap IO was performed
3145  * @why:        Descriptive text of the reason for doing the IO
3146  *
3147  * While IO on the bitmap happens we freeze application IO thus we ensure
3148  * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3149  * called from worker context. It MUST NOT be used while a previous such
3150  * work is still pending!
3151  */
3152 void drbd_queue_bitmap_io(struct drbd_conf *mdev,
3153                           int (*io_fn)(struct drbd_conf *),
3154                           void (*done)(struct drbd_conf *, int),
3155                           char *why, enum bm_flag flags)
3156 {
3157         D_ASSERT(current == mdev->tconn->worker.task);
3158
3159         D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &mdev->flags));
3160         D_ASSERT(!test_bit(BITMAP_IO, &mdev->flags));
3161         D_ASSERT(list_empty(&mdev->bm_io_work.w.list));
3162         if (mdev->bm_io_work.why)
3163                 dev_err(DEV, "FIXME going to queue '%s' but '%s' still pending?\n",
3164                         why, mdev->bm_io_work.why);
3165
3166         mdev->bm_io_work.io_fn = io_fn;
3167         mdev->bm_io_work.done = done;
3168         mdev->bm_io_work.why = why;
3169         mdev->bm_io_work.flags = flags;
3170
3171         spin_lock_irq(&mdev->tconn->req_lock);
3172         set_bit(BITMAP_IO, &mdev->flags);
3173         if (atomic_read(&mdev->ap_bio_cnt) == 0) {
3174                 if (!test_and_set_bit(BITMAP_IO_QUEUED, &mdev->flags))
3175                         drbd_queue_work(&mdev->tconn->data.work, &mdev->bm_io_work.w);
3176         }
3177         spin_unlock_irq(&mdev->tconn->req_lock);
3178 }
3179
3180 /**
3181  * drbd_bitmap_io() -  Does an IO operation on the whole bitmap
3182  * @mdev:       DRBD device.
3183  * @io_fn:      IO callback to be called when bitmap IO is possible
3184  * @why:        Descriptive text of the reason for doing the IO
3185  *
3186  * freezes application IO while that the actual IO operations runs. This
3187  * functions MAY NOT be called from worker context.
3188  */
3189 int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *),
3190                 char *why, enum bm_flag flags)
3191 {
3192         int rv;
3193
3194         D_ASSERT(current != mdev->tconn->worker.task);
3195
3196         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3197                 drbd_suspend_io(mdev);
3198
3199         drbd_bm_lock(mdev, why, flags);
3200         rv = io_fn(mdev);
3201         drbd_bm_unlock(mdev);
3202
3203         if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
3204                 drbd_resume_io(mdev);
3205
3206         return rv;
3207 }
3208
3209 void drbd_md_set_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3210 {
3211         if ((mdev->ldev->md.flags & flag) != flag) {
3212                 drbd_md_mark_dirty(mdev);
3213                 mdev->ldev->md.flags |= flag;
3214         }
3215 }
3216
3217 void drbd_md_clear_flag(struct drbd_conf *mdev, int flag) __must_hold(local)
3218 {
3219         if ((mdev->ldev->md.flags & flag) != 0) {
3220                 drbd_md_mark_dirty(mdev);
3221                 mdev->ldev->md.flags &= ~flag;
3222         }
3223 }
3224 int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag)
3225 {
3226         return (bdev->md.flags & flag) != 0;
3227 }
3228
3229 static void md_sync_timer_fn(unsigned long data)
3230 {
3231         struct drbd_conf *mdev = (struct drbd_conf *) data;
3232
3233         drbd_queue_work_front(&mdev->tconn->data.work, &mdev->md_sync_work);
3234 }
3235
3236 static int w_md_sync(struct drbd_work *w, int unused)
3237 {
3238         struct drbd_conf *mdev = w->mdev;
3239
3240         dev_warn(DEV, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3241 #ifdef DEBUG
3242         dev_warn(DEV, "last md_mark_dirty: %s:%u\n",
3243                 mdev->last_md_mark_dirty.func, mdev->last_md_mark_dirty.line);
3244 #endif
3245         drbd_md_sync(mdev);
3246         return 0;
3247 }
3248
3249 const char *cmdname(enum drbd_packet cmd)
3250 {
3251         /* THINK may need to become several global tables
3252          * when we want to support more than
3253          * one PRO_VERSION */
3254         static const char *cmdnames[] = {
3255                 [P_DATA]                = "Data",
3256                 [P_DATA_REPLY]          = "DataReply",
3257                 [P_RS_DATA_REPLY]       = "RSDataReply",
3258                 [P_BARRIER]             = "Barrier",
3259                 [P_BITMAP]              = "ReportBitMap",
3260                 [P_BECOME_SYNC_TARGET]  = "BecomeSyncTarget",
3261                 [P_BECOME_SYNC_SOURCE]  = "BecomeSyncSource",
3262                 [P_UNPLUG_REMOTE]       = "UnplugRemote",
3263                 [P_DATA_REQUEST]        = "DataRequest",
3264                 [P_RS_DATA_REQUEST]     = "RSDataRequest",
3265                 [P_SYNC_PARAM]          = "SyncParam",
3266                 [P_SYNC_PARAM89]        = "SyncParam89",
3267                 [P_PROTOCOL]            = "ReportProtocol",
3268                 [P_UUIDS]               = "ReportUUIDs",
3269                 [P_SIZES]               = "ReportSizes",
3270                 [P_STATE]               = "ReportState",
3271                 [P_SYNC_UUID]           = "ReportSyncUUID",
3272                 [P_AUTH_CHALLENGE]      = "AuthChallenge",
3273                 [P_AUTH_RESPONSE]       = "AuthResponse",
3274                 [P_PING]                = "Ping",
3275                 [P_PING_ACK]            = "PingAck",
3276                 [P_RECV_ACK]            = "RecvAck",
3277                 [P_WRITE_ACK]           = "WriteAck",
3278                 [P_RS_WRITE_ACK]        = "RSWriteAck",
3279                 [P_DISCARD_WRITE]        = "DiscardWrite",
3280                 [P_NEG_ACK]             = "NegAck",
3281                 [P_NEG_DREPLY]          = "NegDReply",
3282                 [P_NEG_RS_DREPLY]       = "NegRSDReply",
3283                 [P_BARRIER_ACK]         = "BarrierAck",
3284                 [P_STATE_CHG_REQ]       = "StateChgRequest",
3285                 [P_STATE_CHG_REPLY]     = "StateChgReply",
3286                 [P_OV_REQUEST]          = "OVRequest",
3287                 [P_OV_REPLY]            = "OVReply",
3288                 [P_OV_RESULT]           = "OVResult",
3289                 [P_CSUM_RS_REQUEST]     = "CsumRSRequest",
3290                 [P_RS_IS_IN_SYNC]       = "CsumRSIsInSync",
3291                 [P_COMPRESSED_BITMAP]   = "CBitmap",
3292                 [P_DELAY_PROBE]         = "DelayProbe",
3293                 [P_OUT_OF_SYNC]         = "OutOfSync",
3294                 [P_RETRY_WRITE]         = "RetryWrite",
3295                 [P_RS_CANCEL]           = "RSCancel",
3296                 [P_CONN_ST_CHG_REQ]     = "conn_st_chg_req",
3297                 [P_CONN_ST_CHG_REPLY]   = "conn_st_chg_reply",
3298
3299                 /* enum drbd_packet, but not commands - obsoleted flags:
3300                  *      P_MAY_IGNORE
3301                  *      P_MAX_OPT_CMD
3302                  */
3303         };
3304
3305         /* too big for the array: 0xfffX */
3306         if (cmd == P_INITIAL_META)
3307                 return "InitialMeta";
3308         if (cmd == P_INITIAL_DATA)
3309                 return "InitialData";
3310         if (cmd == P_CONNECTION_FEATURES)
3311                 return "ConnectionFeatures";
3312         if (cmd >= ARRAY_SIZE(cmdnames))
3313                 return "Unknown";
3314         return cmdnames[cmd];
3315 }
3316
3317 /**
3318  * drbd_wait_misc  -  wait for a request to make progress
3319  * @mdev:       device associated with the request
3320  * @i:          the struct drbd_interval embedded in struct drbd_request or
3321  *              struct drbd_peer_request
3322  */
3323 int drbd_wait_misc(struct drbd_conf *mdev, struct drbd_interval *i)
3324 {
3325         struct net_conf *nc;
3326         DEFINE_WAIT(wait);
3327         long timeout;
3328
3329         rcu_read_lock();
3330         nc = rcu_dereference(mdev->tconn->net_conf);
3331         if (!nc) {
3332                 rcu_read_unlock();
3333                 return -ETIMEDOUT;
3334         }
3335         timeout = nc->ko_count ? nc->timeout * HZ / 10 * nc->ko_count : MAX_SCHEDULE_TIMEOUT;
3336         rcu_read_unlock();
3337
3338         /* Indicate to wake up mdev->misc_wait on progress.  */
3339         i->waiting = true;
3340         prepare_to_wait(&mdev->misc_wait, &wait, TASK_INTERRUPTIBLE);
3341         spin_unlock_irq(&mdev->tconn->req_lock);
3342         timeout = schedule_timeout(timeout);
3343         finish_wait(&mdev->misc_wait, &wait);
3344         spin_lock_irq(&mdev->tconn->req_lock);
3345         if (!timeout || mdev->state.conn < C_CONNECTED)
3346                 return -ETIMEDOUT;
3347         if (signal_pending(current))
3348                 return -ERESTARTSYS;
3349         return 0;
3350 }
3351
3352 #ifdef CONFIG_DRBD_FAULT_INJECTION
3353 /* Fault insertion support including random number generator shamelessly
3354  * stolen from kernel/rcutorture.c */
3355 struct fault_random_state {
3356         unsigned long state;
3357         unsigned long count;
3358 };
3359
3360 #define FAULT_RANDOM_MULT 39916801  /* prime */
3361 #define FAULT_RANDOM_ADD        479001701 /* prime */
3362 #define FAULT_RANDOM_REFRESH 10000
3363
3364 /*
3365  * Crude but fast random-number generator.  Uses a linear congruential
3366  * generator, with occasional help from get_random_bytes().
3367  */
3368 static unsigned long
3369 _drbd_fault_random(struct fault_random_state *rsp)
3370 {
3371         long refresh;
3372
3373         if (!rsp->count--) {
3374                 get_random_bytes(&refresh, sizeof(refresh));
3375                 rsp->state += refresh;
3376                 rsp->count = FAULT_RANDOM_REFRESH;
3377         }
3378         rsp->state = rsp->state * FAULT_RANDOM_MULT + FAULT_RANDOM_ADD;
3379         return swahw32(rsp->state);
3380 }
3381
3382 static char *
3383 _drbd_fault_str(unsigned int type) {
3384         static char *_faults[] = {
3385                 [DRBD_FAULT_MD_WR] = "Meta-data write",
3386                 [DRBD_FAULT_MD_RD] = "Meta-data read",
3387                 [DRBD_FAULT_RS_WR] = "Resync write",
3388                 [DRBD_FAULT_RS_RD] = "Resync read",
3389                 [DRBD_FAULT_DT_WR] = "Data write",
3390                 [DRBD_FAULT_DT_RD] = "Data read",
3391                 [DRBD_FAULT_DT_RA] = "Data read ahead",
3392                 [DRBD_FAULT_BM_ALLOC] = "BM allocation",
3393                 [DRBD_FAULT_AL_EE] = "EE allocation",
3394                 [DRBD_FAULT_RECEIVE] = "receive data corruption",
3395         };
3396
3397         return (type < DRBD_FAULT_MAX) ? _faults[type] : "**Unknown**";
3398 }
3399
3400 unsigned int
3401 _drbd_insert_fault(struct drbd_conf *mdev, unsigned int type)
3402 {
3403         static struct fault_random_state rrs = {0, 0};
3404
3405         unsigned int ret = (
3406                 (fault_devs == 0 ||
3407                         ((1 << mdev_to_minor(mdev)) & fault_devs) != 0) &&
3408                 (((_drbd_fault_random(&rrs) % 100) + 1) <= fault_rate));
3409
3410         if (ret) {
3411                 fault_count++;
3412
3413                 if (__ratelimit(&drbd_ratelimit_state))
3414                         dev_warn(DEV, "***Simulating %s failure\n",
3415                                 _drbd_fault_str(type));
3416         }
3417
3418         return ret;
3419 }
3420 #endif
3421
3422 const char *drbd_buildtag(void)
3423 {
3424         /* DRBD built from external sources has here a reference to the
3425            git hash of the source code. */
3426
3427         static char buildtag[38] = "\0uilt-in";
3428
3429         if (buildtag[0] == 0) {
3430 #ifdef CONFIG_MODULES
3431                 if (THIS_MODULE != NULL)
3432                         sprintf(buildtag, "srcversion: %-24s", THIS_MODULE->srcversion);
3433                 else
3434 #endif
3435                         buildtag[0] = 'b';
3436         }
3437
3438         return buildtag;
3439 }
3440
3441 module_init(drbd_init)
3442 module_exit(drbd_cleanup)
3443
3444 EXPORT_SYMBOL(drbd_conn_str);
3445 EXPORT_SYMBOL(drbd_role_str);
3446 EXPORT_SYMBOL(drbd_disk_str);
3447 EXPORT_SYMBOL(drbd_set_st_err_str);