drbd: drbd_adm_get_status needs to show some more detail
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52
53 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
71 /* .dumpit */
72 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
73
74 #include <linux/drbd_genl_api.h>
75 #include <linux/genl_magic_func.h>
76
77 /* used blkdev_get_by_path, to claim our meta data device(s) */
78 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
79
80 /* Configuration is strictly serialized, because generic netlink message
81  * processing is strictly serialized by the genl_lock().
82  * Which means we can use one static global drbd_config_context struct.
83  */
84 static struct drbd_config_context {
85         /* assigned from drbd_genlmsghdr */
86         unsigned int minor;
87         /* assigned from request attributes, if present */
88         unsigned int volume;
89 #define VOLUME_UNSPECIFIED              (-1U)
90         /* pointer into the request skb,
91          * limited lifetime! */
92         char *conn_name;
93
94         /* reply buffer */
95         struct sk_buff *reply_skb;
96         /* pointer into reply buffer */
97         struct drbd_genlmsghdr *reply_dh;
98         /* resolved from attributes, if possible */
99         struct drbd_conf *mdev;
100         struct drbd_tconn *tconn;
101 } adm_ctx;
102
103 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
104 {
105         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
106         if (genlmsg_reply(skb, info))
107                 printk(KERN_ERR "drbd: error sending genl reply\n");
108 }
109
110 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
111  * reason it could fail was no space in skb, and there are 4k available. */
112 int drbd_msg_put_info(const char *info)
113 {
114         struct sk_buff *skb = adm_ctx.reply_skb;
115         struct nlattr *nla;
116         int err = -EMSGSIZE;
117
118         if (!info || !info[0])
119                 return 0;
120
121         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
122         if (!nla)
123                 return err;
124
125         err = nla_put_string(skb, T_info_text, info);
126         if (err) {
127                 nla_nest_cancel(skb, nla);
128                 return err;
129         } else
130                 nla_nest_end(skb, nla);
131         return 0;
132 }
133
134 /* This would be a good candidate for a "pre_doit" hook,
135  * and per-family private info->pointers.
136  * But we need to stay compatible with older kernels.
137  * If it returns successfully, adm_ctx members are valid.
138  */
139 #define DRBD_ADM_NEED_MINOR     1
140 #define DRBD_ADM_NEED_CONN      2
141 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
142                 unsigned flags)
143 {
144         struct drbd_genlmsghdr *d_in = info->userhdr;
145         const u8 cmd = info->genlhdr->cmd;
146         int err;
147
148         memset(&adm_ctx, 0, sizeof(adm_ctx));
149
150         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
151         if (cmd != DRBD_ADM_GET_STATUS
152         && security_netlink_recv(skb, CAP_SYS_ADMIN))
153                return -EPERM;
154
155         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156         if (!adm_ctx.reply_skb)
157                 goto fail;
158
159         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
160                                         info, &drbd_genl_family, 0, cmd);
161         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
162          * but anyways */
163         if (!adm_ctx.reply_dh)
164                 goto fail;
165
166         adm_ctx.reply_dh->minor = d_in->minor;
167         adm_ctx.reply_dh->ret_code = NO_ERROR;
168
169         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
170                 struct nlattr *nla;
171                 /* parse and validate only */
172                 err = drbd_cfg_context_from_attrs(NULL, info->attrs);
173                 if (err)
174                         goto fail;
175
176                 /* It was present, and valid,
177                  * copy it over to the reply skb. */
178                 err = nla_put_nohdr(adm_ctx.reply_skb,
179                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
180                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
181                 if (err)
182                         goto fail;
183
184                 /* and assign stuff to the global adm_ctx */
185                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
186                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
187                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
188                 if (nla)
189                         adm_ctx.conn_name = nla_data(nla);
190         } else
191                 adm_ctx.volume = VOLUME_UNSPECIFIED;
192
193         adm_ctx.minor = d_in->minor;
194         adm_ctx.mdev = minor_to_mdev(d_in->minor);
195         adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
196
197         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
198                 drbd_msg_put_info("unknown minor");
199                 return ERR_MINOR_INVALID;
200         }
201         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
202                 drbd_msg_put_info("unknown connection");
203                 return ERR_INVALID_REQUEST;
204         }
205
206         /* some more paranoia, if the request was over-determined */
207         if (adm_ctx.mdev &&
208             adm_ctx.volume != VOLUME_UNSPECIFIED &&
209             adm_ctx.volume != adm_ctx.mdev->vnr) {
210                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
211                                 adm_ctx.minor, adm_ctx.volume,
212                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
213                 drbd_msg_put_info("over-determined configuration context mismatch");
214                 return ERR_INVALID_REQUEST;
215         }
216         if (adm_ctx.mdev && adm_ctx.tconn &&
217             adm_ctx.mdev->tconn != adm_ctx.tconn) {
218                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
219                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
220                 drbd_msg_put_info("over-determined configuration context mismatch");
221                 return ERR_INVALID_REQUEST;
222         }
223         return NO_ERROR;
224
225 fail:
226         nlmsg_free(adm_ctx.reply_skb);
227         adm_ctx.reply_skb = NULL;
228         return -ENOMEM;
229 }
230
231 static int drbd_adm_finish(struct genl_info *info, int retcode)
232 {
233         struct nlattr *nla;
234         const char *conn_name = NULL;
235
236         if (!adm_ctx.reply_skb)
237                 return -ENOMEM;
238
239         adm_ctx.reply_dh->ret_code = retcode;
240
241         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
242         if (nla) {
243                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
244                 if (nla)
245                         conn_name = nla_data(nla);
246         }
247
248         drbd_adm_send_reply(adm_ctx.reply_skb, info);
249         return 0;
250 }
251
252 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
253 {
254         char *envp[] = { "HOME=/",
255                         "TERM=linux",
256                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
257                         NULL, /* Will be set to address family */
258                         NULL, /* Will be set to address */
259                         NULL };
260         char mb[12], af[20], ad[60], *afs;
261         char *argv[] = {usermode_helper, cmd, mb, NULL };
262         struct sib_info sib;
263         int ret;
264
265         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
266
267         if (get_net_conf(mdev->tconn)) {
268                 switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
269                 case AF_INET6:
270                         afs = "ipv6";
271                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
272                                  &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
273                         break;
274                 case AF_INET:
275                         afs = "ipv4";
276                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
277                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
278                         break;
279                 default:
280                         afs = "ssocks";
281                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
282                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
283                 }
284                 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
285                 envp[3]=af;
286                 envp[4]=ad;
287                 put_net_conf(mdev->tconn);
288         }
289
290         /* The helper may take some time.
291          * write out any unsynced meta data changes now */
292         drbd_md_sync(mdev);
293
294         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
295         sib.sib_reason = SIB_HELPER_PRE;
296         sib.helper_name = cmd;
297         drbd_bcast_event(mdev, &sib);
298         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
299         if (ret)
300                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
301                                 usermode_helper, cmd, mb,
302                                 (ret >> 8) & 0xff, ret);
303         else
304                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
305                                 usermode_helper, cmd, mb,
306                                 (ret >> 8) & 0xff, ret);
307         sib.sib_reason = SIB_HELPER_POST;
308         sib.helper_exit_code = ret;
309         drbd_bcast_event(mdev, &sib);
310
311         if (ret < 0) /* Ignore any ERRNOs we got. */
312                 ret = 0;
313
314         return ret;
315 }
316
317 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
318 {
319         char *ex_to_string;
320         int r;
321         enum drbd_disk_state nps;
322         enum drbd_fencing_p fp;
323
324         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
325
326         if (get_ldev_if_state(mdev, D_CONSISTENT)) {
327                 fp = mdev->ldev->dc.fencing;
328                 put_ldev(mdev);
329         } else {
330                 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
331                 nps = mdev->state.pdsk;
332                 goto out;
333         }
334
335         r = drbd_khelper(mdev, "fence-peer");
336
337         switch ((r>>8) & 0xff) {
338         case 3: /* peer is inconsistent */
339                 ex_to_string = "peer is inconsistent or worse";
340                 nps = D_INCONSISTENT;
341                 break;
342         case 4: /* peer got outdated, or was already outdated */
343                 ex_to_string = "peer was fenced";
344                 nps = D_OUTDATED;
345                 break;
346         case 5: /* peer was down */
347                 if (mdev->state.disk == D_UP_TO_DATE) {
348                         /* we will(have) create(d) a new UUID anyways... */
349                         ex_to_string = "peer is unreachable, assumed to be dead";
350                         nps = D_OUTDATED;
351                 } else {
352                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
353                         nps = mdev->state.pdsk;
354                 }
355                 break;
356         case 6: /* Peer is primary, voluntarily outdate myself.
357                  * This is useful when an unconnected R_SECONDARY is asked to
358                  * become R_PRIMARY, but finds the other peer being active. */
359                 ex_to_string = "peer is active";
360                 dev_warn(DEV, "Peer is primary, outdating myself.\n");
361                 nps = D_UNKNOWN;
362                 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
363                 break;
364         case 7:
365                 if (fp != FP_STONITH)
366                         dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
367                 ex_to_string = "peer was stonithed";
368                 nps = D_OUTDATED;
369                 break;
370         default:
371                 /* The script is broken ... */
372                 nps = D_UNKNOWN;
373                 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
374                 return nps;
375         }
376
377         dev_info(DEV, "fence-peer helper returned %d (%s)\n",
378                         (r>>8) & 0xff, ex_to_string);
379
380 out:
381         if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
382                 /* The handler was not successful... unfreeze here, the
383                    state engine can not unfreeze... */
384                 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
385         }
386
387         return nps;
388 }
389
390 static int _try_outdate_peer_async(void *data)
391 {
392         struct drbd_conf *mdev = (struct drbd_conf *)data;
393         enum drbd_disk_state nps;
394         union drbd_state ns;
395
396         nps = drbd_try_outdate_peer(mdev);
397
398         /* Not using
399            drbd_request_state(mdev, NS(pdsk, nps));
400            here, because we might were able to re-establish the connection
401            in the meantime. This can only partially be solved in the state's
402            engine is_valid_state() and is_valid_state_transition()
403            functions.
404
405            nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
406            pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
407            therefore we have to have the pre state change check here.
408         */
409         spin_lock_irq(&mdev->tconn->req_lock);
410         ns = mdev->state;
411         if (ns.conn < C_WF_REPORT_PARAMS) {
412                 ns.pdsk = nps;
413                 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
414         }
415         spin_unlock_irq(&mdev->tconn->req_lock);
416
417         return 0;
418 }
419
420 void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
421 {
422         struct task_struct *opa;
423
424         opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
425         if (IS_ERR(opa))
426                 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
427 }
428
429 enum drbd_state_rv
430 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
431 {
432         const int max_tries = 4;
433         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
434         int try = 0;
435         int forced = 0;
436         union drbd_state mask, val;
437         enum drbd_disk_state nps;
438
439         if (new_role == R_PRIMARY)
440                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
441
442         mutex_lock(mdev->state_mutex);
443
444         mask.i = 0; mask.role = R_MASK;
445         val.i  = 0; val.role  = new_role;
446
447         while (try++ < max_tries) {
448                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
449
450                 /* in case we first succeeded to outdate,
451                  * but now suddenly could establish a connection */
452                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
453                         val.pdsk = 0;
454                         mask.pdsk = 0;
455                         continue;
456                 }
457
458                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
459                     (mdev->state.disk < D_UP_TO_DATE &&
460                      mdev->state.disk >= D_INCONSISTENT)) {
461                         mask.disk = D_MASK;
462                         val.disk  = D_UP_TO_DATE;
463                         forced = 1;
464                         continue;
465                 }
466
467                 if (rv == SS_NO_UP_TO_DATE_DISK &&
468                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
469                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
470                         nps = drbd_try_outdate_peer(mdev);
471
472                         if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
473                                 val.disk = D_UP_TO_DATE;
474                                 mask.disk = D_MASK;
475                         }
476
477                         val.pdsk = nps;
478                         mask.pdsk = D_MASK;
479
480                         continue;
481                 }
482
483                 if (rv == SS_NOTHING_TO_DO)
484                         goto out;
485                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
486                         nps = drbd_try_outdate_peer(mdev);
487
488                         if (force && nps > D_OUTDATED) {
489                                 dev_warn(DEV, "Forced into split brain situation!\n");
490                                 nps = D_OUTDATED;
491                         }
492
493                         mask.pdsk = D_MASK;
494                         val.pdsk  = nps;
495
496                         continue;
497                 }
498                 if (rv == SS_TWO_PRIMARIES) {
499                         /* Maybe the peer is detected as dead very soon...
500                            retry at most once more in this case. */
501                         schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
502                         if (try < max_tries)
503                                 try = max_tries - 1;
504                         continue;
505                 }
506                 if (rv < SS_SUCCESS) {
507                         rv = _drbd_request_state(mdev, mask, val,
508                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
509                         if (rv < SS_SUCCESS)
510                                 goto out;
511                 }
512                 break;
513         }
514
515         if (rv < SS_SUCCESS)
516                 goto out;
517
518         if (forced)
519                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
520
521         /* Wait until nothing is on the fly :) */
522         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
523
524         if (new_role == R_SECONDARY) {
525                 set_disk_ro(mdev->vdisk, true);
526                 if (get_ldev(mdev)) {
527                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
528                         put_ldev(mdev);
529                 }
530         } else {
531                 if (get_net_conf(mdev->tconn)) {
532                         mdev->tconn->net_conf->want_lose = 0;
533                         put_net_conf(mdev->tconn);
534                 }
535                 set_disk_ro(mdev->vdisk, false);
536                 if (get_ldev(mdev)) {
537                         if (((mdev->state.conn < C_CONNECTED ||
538                                mdev->state.pdsk <= D_FAILED)
539                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
540                                 drbd_uuid_new_current(mdev);
541
542                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
543                         put_ldev(mdev);
544                 }
545         }
546
547         /* writeout of activity log covered areas of the bitmap
548          * to stable storage done in after state change already */
549
550         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
551                 /* if this was forced, we should consider sync */
552                 if (forced)
553                         drbd_send_uuids(mdev);
554                 drbd_send_state(mdev);
555         }
556
557         drbd_md_sync(mdev);
558
559         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
560 out:
561         mutex_unlock(mdev->state_mutex);
562         return rv;
563 }
564
565 static const char *from_attrs_err_to_txt(int err)
566 {
567         return  err == -ENOMSG ? "required attribute missing" :
568                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
569                 "invalid attribute value";
570 }
571
572 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
573 {
574         struct set_role_parms parms;
575         int err;
576         enum drbd_ret_code retcode;
577
578         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
579         if (!adm_ctx.reply_skb)
580                 return retcode;
581         if (retcode != NO_ERROR)
582                 goto out;
583
584         memset(&parms, 0, sizeof(parms));
585         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
586                 err = set_role_parms_from_attrs(&parms, info->attrs);
587                 if (err) {
588                         retcode = ERR_MANDATORY_TAG;
589                         drbd_msg_put_info(from_attrs_err_to_txt(err));
590                         goto out;
591                 }
592         }
593
594         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
595                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
596         else
597                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
598 out:
599         drbd_adm_finish(info, retcode);
600         return 0;
601 }
602
603 /* initializes the md.*_offset members, so we are able to find
604  * the on disk meta data */
605 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
606                                        struct drbd_backing_dev *bdev)
607 {
608         sector_t md_size_sect = 0;
609         switch (bdev->dc.meta_dev_idx) {
610         default:
611                 /* v07 style fixed size indexed meta data */
612                 bdev->md.md_size_sect = MD_RESERVED_SECT;
613                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
614                 bdev->md.al_offset = MD_AL_OFFSET;
615                 bdev->md.bm_offset = MD_BM_OFFSET;
616                 break;
617         case DRBD_MD_INDEX_FLEX_EXT:
618                 /* just occupy the full device; unit: sectors */
619                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
620                 bdev->md.md_offset = 0;
621                 bdev->md.al_offset = MD_AL_OFFSET;
622                 bdev->md.bm_offset = MD_BM_OFFSET;
623                 break;
624         case DRBD_MD_INDEX_INTERNAL:
625         case DRBD_MD_INDEX_FLEX_INT:
626                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
627                 /* al size is still fixed */
628                 bdev->md.al_offset = -MD_AL_SECTORS;
629                 /* we need (slightly less than) ~ this much bitmap sectors: */
630                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
631                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
632                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
633                 md_size_sect = ALIGN(md_size_sect, 8);
634
635                 /* plus the "drbd meta data super block",
636                  * and the activity log; */
637                 md_size_sect += MD_BM_OFFSET;
638
639                 bdev->md.md_size_sect = md_size_sect;
640                 /* bitmap offset is adjusted by 'super' block size */
641                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
642                 break;
643         }
644 }
645
646 /* input size is expected to be in KB */
647 char *ppsize(char *buf, unsigned long long size)
648 {
649         /* Needs 9 bytes at max including trailing NUL:
650          * -1ULL ==> "16384 EB" */
651         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
652         int base = 0;
653         while (size >= 10000 && base < sizeof(units)-1) {
654                 /* shift + round */
655                 size = (size >> 10) + !!(size & (1<<9));
656                 base++;
657         }
658         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
659
660         return buf;
661 }
662
663 /* there is still a theoretical deadlock when called from receiver
664  * on an D_INCONSISTENT R_PRIMARY:
665  *  remote READ does inc_ap_bio, receiver would need to receive answer
666  *  packet from remote to dec_ap_bio again.
667  *  receiver receive_sizes(), comes here,
668  *  waits for ap_bio_cnt == 0. -> deadlock.
669  * but this cannot happen, actually, because:
670  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
671  *  (not connected, or bad/no disk on peer):
672  *  see drbd_fail_request_early, ap_bio_cnt is zero.
673  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
674  *  peer may not initiate a resize.
675  */
676 /* Note these are not to be confused with
677  * drbd_adm_suspend_io/drbd_adm_resume_io,
678  * which are (sub) state changes triggered by admin (drbdsetup),
679  * and can be long lived.
680  * This changes an mdev->flag, is triggered by drbd internals,
681  * and should be short-lived. */
682 void drbd_suspend_io(struct drbd_conf *mdev)
683 {
684         set_bit(SUSPEND_IO, &mdev->flags);
685         if (is_susp(mdev->state))
686                 return;
687         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
688 }
689
690 void drbd_resume_io(struct drbd_conf *mdev)
691 {
692         clear_bit(SUSPEND_IO, &mdev->flags);
693         wake_up(&mdev->misc_wait);
694 }
695
696 /**
697  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
698  * @mdev:       DRBD device.
699  *
700  * Returns 0 on success, negative return values indicate errors.
701  * You should call drbd_md_sync() after calling this function.
702  */
703 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
704 {
705         sector_t prev_first_sect, prev_size; /* previous meta location */
706         sector_t la_size;
707         sector_t size;
708         char ppb[10];
709
710         int md_moved, la_size_changed;
711         enum determine_dev_size rv = unchanged;
712
713         /* race:
714          * application request passes inc_ap_bio,
715          * but then cannot get an AL-reference.
716          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
717          *
718          * to avoid that:
719          * Suspend IO right here.
720          * still lock the act_log to not trigger ASSERTs there.
721          */
722         drbd_suspend_io(mdev);
723
724         /* no wait necessary anymore, actually we could assert that */
725         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
726
727         prev_first_sect = drbd_md_first_sector(mdev->ldev);
728         prev_size = mdev->ldev->md.md_size_sect;
729         la_size = mdev->ldev->md.la_size_sect;
730
731         /* TODO: should only be some assert here, not (re)init... */
732         drbd_md_set_sector_offsets(mdev, mdev->ldev);
733
734         size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
735
736         if (drbd_get_capacity(mdev->this_bdev) != size ||
737             drbd_bm_capacity(mdev) != size) {
738                 int err;
739                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
740                 if (unlikely(err)) {
741                         /* currently there is only one error: ENOMEM! */
742                         size = drbd_bm_capacity(mdev)>>1;
743                         if (size == 0) {
744                                 dev_err(DEV, "OUT OF MEMORY! "
745                                     "Could not allocate bitmap!\n");
746                         } else {
747                                 dev_err(DEV, "BM resizing failed. "
748                                     "Leaving size unchanged at size = %lu KB\n",
749                                     (unsigned long)size);
750                         }
751                         rv = dev_size_error;
752                 }
753                 /* racy, see comments above. */
754                 drbd_set_my_capacity(mdev, size);
755                 mdev->ldev->md.la_size_sect = size;
756                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
757                      (unsigned long long)size>>1);
758         }
759         if (rv == dev_size_error)
760                 goto out;
761
762         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
763
764         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
765                 || prev_size       != mdev->ldev->md.md_size_sect;
766
767         if (la_size_changed || md_moved) {
768                 int err;
769
770                 drbd_al_shrink(mdev); /* All extents inactive. */
771                 dev_info(DEV, "Writing the whole bitmap, %s\n",
772                          la_size_changed && md_moved ? "size changed and md moved" :
773                          la_size_changed ? "size changed" : "md moved");
774                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
775                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
776                                 "size changed", BM_LOCKED_MASK);
777                 if (err) {
778                         rv = dev_size_error;
779                         goto out;
780                 }
781                 drbd_md_mark_dirty(mdev);
782         }
783
784         if (size > la_size)
785                 rv = grew;
786         if (size < la_size)
787                 rv = shrunk;
788 out:
789         lc_unlock(mdev->act_log);
790         wake_up(&mdev->al_wait);
791         drbd_resume_io(mdev);
792
793         return rv;
794 }
795
796 sector_t
797 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
798 {
799         sector_t p_size = mdev->p_size;   /* partner's disk size. */
800         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
801         sector_t m_size; /* my size */
802         sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
803         sector_t size = 0;
804
805         m_size = drbd_get_max_capacity(bdev);
806
807         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
808                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
809                 p_size = m_size;
810         }
811
812         if (p_size && m_size) {
813                 size = min_t(sector_t, p_size, m_size);
814         } else {
815                 if (la_size) {
816                         size = la_size;
817                         if (m_size && m_size < size)
818                                 size = m_size;
819                         if (p_size && p_size < size)
820                                 size = p_size;
821                 } else {
822                         if (m_size)
823                                 size = m_size;
824                         if (p_size)
825                                 size = p_size;
826                 }
827         }
828
829         if (size == 0)
830                 dev_err(DEV, "Both nodes diskless!\n");
831
832         if (u_size) {
833                 if (u_size > size)
834                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
835                             (unsigned long)u_size>>1, (unsigned long)size>>1);
836                 else
837                         size = u_size;
838         }
839
840         return size;
841 }
842
843 /**
844  * drbd_check_al_size() - Ensures that the AL is of the right size
845  * @mdev:       DRBD device.
846  *
847  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
848  * failed, and 0 on success. You should call drbd_md_sync() after you called
849  * this function.
850  */
851 static int drbd_check_al_size(struct drbd_conf *mdev)
852 {
853         struct lru_cache *n, *t;
854         struct lc_element *e;
855         unsigned int in_use;
856         int i;
857
858         if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
859                 mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
860
861         if (mdev->act_log &&
862             mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
863                 return 0;
864
865         in_use = 0;
866         t = mdev->act_log;
867         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
868                 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
869
870         if (n == NULL) {
871                 dev_err(DEV, "Cannot allocate act_log lru!\n");
872                 return -ENOMEM;
873         }
874         spin_lock_irq(&mdev->al_lock);
875         if (t) {
876                 for (i = 0; i < t->nr_elements; i++) {
877                         e = lc_element_by_index(t, i);
878                         if (e->refcnt)
879                                 dev_err(DEV, "refcnt(%d)==%d\n",
880                                     e->lc_number, e->refcnt);
881                         in_use += e->refcnt;
882                 }
883         }
884         if (!in_use)
885                 mdev->act_log = n;
886         spin_unlock_irq(&mdev->al_lock);
887         if (in_use) {
888                 dev_err(DEV, "Activity log still in use!\n");
889                 lc_destroy(n);
890                 return -EBUSY;
891         } else {
892                 if (t)
893                         lc_destroy(t);
894         }
895         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
896         return 0;
897 }
898
899 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
900 {
901         struct request_queue * const q = mdev->rq_queue;
902         int max_hw_sectors = max_bio_size >> 9;
903         int max_segments = 0;
904
905         if (get_ldev_if_state(mdev, D_ATTACHING)) {
906                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
907
908                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
909                 max_segments = mdev->ldev->dc.max_bio_bvecs;
910                 put_ldev(mdev);
911         }
912
913         blk_queue_logical_block_size(q, 512);
914         blk_queue_max_hw_sectors(q, max_hw_sectors);
915         /* This is the workaround for "bio would need to, but cannot, be split" */
916         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
917         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
918
919         if (get_ldev_if_state(mdev, D_ATTACHING)) {
920                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
921
922                 blk_queue_stack_limits(q, b);
923
924                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
925                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
926                                  q->backing_dev_info.ra_pages,
927                                  b->backing_dev_info.ra_pages);
928                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
929                 }
930                 put_ldev(mdev);
931         }
932 }
933
934 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
935 {
936         int now, new, local, peer;
937
938         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
939         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
940         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
941
942         if (get_ldev_if_state(mdev, D_ATTACHING)) {
943                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
944                 mdev->local_max_bio_size = local;
945                 put_ldev(mdev);
946         }
947
948         /* We may ignore peer limits if the peer is modern enough.
949            Because new from 8.3.8 onwards the peer can use multiple
950            BIOs for a single peer_request */
951         if (mdev->state.conn >= C_CONNECTED) {
952                 if (mdev->tconn->agreed_pro_version < 94)
953                         peer = mdev->peer_max_bio_size;
954                 else if (mdev->tconn->agreed_pro_version == 94)
955                         peer = DRBD_MAX_SIZE_H80_PACKET;
956                 else /* drbd 8.3.8 onwards */
957                         peer = DRBD_MAX_BIO_SIZE;
958         }
959
960         new = min_t(int, local, peer);
961
962         if (mdev->state.role == R_PRIMARY && new < now)
963                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
964
965         if (new != now)
966                 dev_info(DEV, "max BIO size = %u\n", new);
967
968         drbd_setup_queue_param(mdev, new);
969 }
970
971 /* serialize deconfig (worker exiting, doing cleanup)
972  * and reconfig (drbdsetup disk, drbdsetup net)
973  *
974  * Wait for a potentially exiting worker, then restart it,
975  * or start a new one.  Flush any pending work, there may still be an
976  * after_state_change queued.
977  */
978 static void conn_reconfig_start(struct drbd_tconn *tconn)
979 {
980         wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
981         wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
982         drbd_thread_start(&tconn->worker);
983         conn_flush_workqueue(tconn);
984 }
985
986 /* if still unconfigured, stops worker again.
987  * if configured now, clears CONFIG_PENDING.
988  * wakes potential waiters */
989 static void conn_reconfig_done(struct drbd_tconn *tconn)
990 {
991         spin_lock_irq(&tconn->req_lock);
992         if (conn_all_vols_unconf(tconn)) {
993                 set_bit(OBJECT_DYING, &tconn->flags);
994                 drbd_thread_stop_nowait(&tconn->worker);
995         } else
996                 clear_bit(CONFIG_PENDING, &tconn->flags);
997         spin_unlock_irq(&tconn->req_lock);
998         wake_up(&tconn->ping_wait);
999 }
1000
1001 /* Make sure IO is suspended before calling this function(). */
1002 static void drbd_suspend_al(struct drbd_conf *mdev)
1003 {
1004         int s = 0;
1005
1006         if (!lc_try_lock(mdev->act_log)) {
1007                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1008                 return;
1009         }
1010
1011         drbd_al_shrink(mdev);
1012         spin_lock_irq(&mdev->tconn->req_lock);
1013         if (mdev->state.conn < C_CONNECTED)
1014                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1015         spin_unlock_irq(&mdev->tconn->req_lock);
1016         lc_unlock(mdev->act_log);
1017
1018         if (s)
1019                 dev_info(DEV, "Suspended AL updates\n");
1020 }
1021
1022 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1023 {
1024         struct drbd_conf *mdev;
1025         int err;
1026         enum drbd_ret_code retcode;
1027         enum determine_dev_size dd;
1028         sector_t max_possible_sectors;
1029         sector_t min_md_device_sectors;
1030         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1031         struct block_device *bdev;
1032         struct lru_cache *resync_lru = NULL;
1033         union drbd_state ns, os;
1034         enum drbd_state_rv rv;
1035         int cp_discovered = 0;
1036
1037         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1038         if (!adm_ctx.reply_skb)
1039                 return retcode;
1040         if (retcode != NO_ERROR)
1041                 goto fail;
1042
1043         mdev = adm_ctx.mdev;
1044         conn_reconfig_start(mdev->tconn);
1045
1046         /* if you want to reconfigure, please tear down first */
1047         if (mdev->state.disk > D_DISKLESS) {
1048                 retcode = ERR_DISK_CONFIGURED;
1049                 goto fail;
1050         }
1051         /* It may just now have detached because of IO error.  Make sure
1052          * drbd_ldev_destroy is done already, we may end up here very fast,
1053          * e.g. if someone calls attach from the on-io-error handler,
1054          * to realize a "hot spare" feature (not that I'd recommend that) */
1055         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1056
1057         /* allocation not in the IO path, drbdsetup context */
1058         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1059         if (!nbc) {
1060                 retcode = ERR_NOMEM;
1061                 goto fail;
1062         }
1063
1064         nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
1065         nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
1066         nbc->dc.fencing       = DRBD_FENCING_DEF;
1067         nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
1068
1069         err = disk_conf_from_attrs(&nbc->dc, info->attrs);
1070         if (err) {
1071                 retcode = ERR_MANDATORY_TAG;
1072                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1073                 goto fail;
1074         }
1075
1076         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1077                 retcode = ERR_MD_IDX_INVALID;
1078                 goto fail;
1079         }
1080
1081         if (get_net_conf(mdev->tconn)) {
1082                 int prot = mdev->tconn->net_conf->wire_protocol;
1083                 put_net_conf(mdev->tconn);
1084                 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1085                         retcode = ERR_STONITH_AND_PROT_A;
1086                         goto fail;
1087                 }
1088         }
1089
1090         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1091                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1092         if (IS_ERR(bdev)) {
1093                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1094                         PTR_ERR(bdev));
1095                 retcode = ERR_OPEN_DISK;
1096                 goto fail;
1097         }
1098         nbc->backing_bdev = bdev;
1099
1100         /*
1101          * meta_dev_idx >= 0: external fixed size, possibly multiple
1102          * drbd sharing one meta device.  TODO in that case, paranoia
1103          * check that [md_bdev, meta_dev_idx] is not yet used by some
1104          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1105          * should check it for you already; but if you don't, or
1106          * someone fooled it, we need to double check here)
1107          */
1108         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1109                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1110                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1111                                   (void *)mdev : (void *)drbd_m_holder);
1112         if (IS_ERR(bdev)) {
1113                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1114                         PTR_ERR(bdev));
1115                 retcode = ERR_OPEN_MD_DISK;
1116                 goto fail;
1117         }
1118         nbc->md_bdev = bdev;
1119
1120         if ((nbc->backing_bdev == nbc->md_bdev) !=
1121             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1122              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1123                 retcode = ERR_MD_IDX_INVALID;
1124                 goto fail;
1125         }
1126
1127         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1128                         1, 61, sizeof(struct bm_extent),
1129                         offsetof(struct bm_extent, lce));
1130         if (!resync_lru) {
1131                 retcode = ERR_NOMEM;
1132                 goto fail;
1133         }
1134
1135         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1136         drbd_md_set_sector_offsets(mdev, nbc);
1137
1138         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1139                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1140                         (unsigned long long) drbd_get_max_capacity(nbc),
1141                         (unsigned long long) nbc->dc.disk_size);
1142                 retcode = ERR_DISK_TO_SMALL;
1143                 goto fail;
1144         }
1145
1146         if ((int)nbc->dc.meta_dev_idx < 0) {
1147                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1148                 /* at least one MB, otherwise it does not make sense */
1149                 min_md_device_sectors = (2<<10);
1150         } else {
1151                 max_possible_sectors = DRBD_MAX_SECTORS;
1152                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1153         }
1154
1155         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1156                 retcode = ERR_MD_DISK_TO_SMALL;
1157                 dev_warn(DEV, "refusing attach: md-device too small, "
1158                      "at least %llu sectors needed for this meta-disk type\n",
1159                      (unsigned long long) min_md_device_sectors);
1160                 goto fail;
1161         }
1162
1163         /* Make sure the new disk is big enough
1164          * (we may currently be R_PRIMARY with no local disk...) */
1165         if (drbd_get_max_capacity(nbc) <
1166             drbd_get_capacity(mdev->this_bdev)) {
1167                 retcode = ERR_DISK_TO_SMALL;
1168                 goto fail;
1169         }
1170
1171         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1172
1173         if (nbc->known_size > max_possible_sectors) {
1174                 dev_warn(DEV, "==> truncating very big lower level device "
1175                         "to currently maximum possible %llu sectors <==\n",
1176                         (unsigned long long) max_possible_sectors);
1177                 if ((int)nbc->dc.meta_dev_idx >= 0)
1178                         dev_warn(DEV, "==>> using internal or flexible "
1179                                       "meta data may help <<==\n");
1180         }
1181
1182         drbd_suspend_io(mdev);
1183         /* also wait for the last barrier ack. */
1184         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1185         /* and for any other previously queued work */
1186         drbd_flush_workqueue(mdev);
1187
1188         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1189         retcode = rv;  /* FIXME: Type mismatch. */
1190         drbd_resume_io(mdev);
1191         if (rv < SS_SUCCESS)
1192                 goto fail;
1193
1194         if (!get_ldev_if_state(mdev, D_ATTACHING))
1195                 goto force_diskless;
1196
1197         drbd_md_set_sector_offsets(mdev, nbc);
1198
1199         if (!mdev->bitmap) {
1200                 if (drbd_bm_init(mdev)) {
1201                         retcode = ERR_NOMEM;
1202                         goto force_diskless_dec;
1203                 }
1204         }
1205
1206         retcode = drbd_md_read(mdev, nbc);
1207         if (retcode != NO_ERROR)
1208                 goto force_diskless_dec;
1209
1210         if (mdev->state.conn < C_CONNECTED &&
1211             mdev->state.role == R_PRIMARY &&
1212             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1213                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1214                     (unsigned long long)mdev->ed_uuid);
1215                 retcode = ERR_DATA_NOT_CURRENT;
1216                 goto force_diskless_dec;
1217         }
1218
1219         /* Since we are diskless, fix the activity log first... */
1220         if (drbd_check_al_size(mdev)) {
1221                 retcode = ERR_NOMEM;
1222                 goto force_diskless_dec;
1223         }
1224
1225         /* Prevent shrinking of consistent devices ! */
1226         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1227             drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1228                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1229                 retcode = ERR_DISK_TO_SMALL;
1230                 goto force_diskless_dec;
1231         }
1232
1233         if (!drbd_al_read_log(mdev, nbc)) {
1234                 retcode = ERR_IO_MD_DISK;
1235                 goto force_diskless_dec;
1236         }
1237
1238         /* Reset the "barriers don't work" bits here, then force meta data to
1239          * be written, to ensure we determine if barriers are supported. */
1240         if (nbc->dc.no_md_flush)
1241                 set_bit(MD_NO_FUA, &mdev->flags);
1242         else
1243                 clear_bit(MD_NO_FUA, &mdev->flags);
1244
1245         /* Point of no return reached.
1246          * Devices and memory are no longer released by error cleanup below.
1247          * now mdev takes over responsibility, and the state engine should
1248          * clean it up somewhere.  */
1249         D_ASSERT(mdev->ldev == NULL);
1250         mdev->ldev = nbc;
1251         mdev->resync = resync_lru;
1252         nbc = NULL;
1253         resync_lru = NULL;
1254
1255         mdev->write_ordering = WO_bdev_flush;
1256         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1257
1258         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1259                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1260         else
1261                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1262
1263         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1264             !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1265                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1266                 cp_discovered = 1;
1267         }
1268
1269         mdev->send_cnt = 0;
1270         mdev->recv_cnt = 0;
1271         mdev->read_cnt = 0;
1272         mdev->writ_cnt = 0;
1273
1274         drbd_reconsider_max_bio_size(mdev);
1275
1276         /* If I am currently not R_PRIMARY,
1277          * but meta data primary indicator is set,
1278          * I just now recover from a hard crash,
1279          * and have been R_PRIMARY before that crash.
1280          *
1281          * Now, if I had no connection before that crash
1282          * (have been degraded R_PRIMARY), chances are that
1283          * I won't find my peer now either.
1284          *
1285          * In that case, and _only_ in that case,
1286          * we use the degr-wfc-timeout instead of the default,
1287          * so we can automatically recover from a crash of a
1288          * degraded but active "cluster" after a certain timeout.
1289          */
1290         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1291         if (mdev->state.role != R_PRIMARY &&
1292              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1293             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1294                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1295
1296         dd = drbd_determine_dev_size(mdev, 0);
1297         if (dd == dev_size_error) {
1298                 retcode = ERR_NOMEM_BITMAP;
1299                 goto force_diskless_dec;
1300         } else if (dd == grew)
1301                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1302
1303         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1304                 dev_info(DEV, "Assuming that all blocks are out of sync "
1305                      "(aka FullSync)\n");
1306                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1307                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1308                         retcode = ERR_IO_MD_DISK;
1309                         goto force_diskless_dec;
1310                 }
1311         } else {
1312                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1313                         "read from attaching", BM_LOCKED_MASK) < 0) {
1314                         retcode = ERR_IO_MD_DISK;
1315                         goto force_diskless_dec;
1316                 }
1317         }
1318
1319         if (cp_discovered) {
1320                 drbd_al_apply_to_bm(mdev);
1321                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1322                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1323                         retcode = ERR_IO_MD_DISK;
1324                         goto force_diskless_dec;
1325                 }
1326         }
1327
1328         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1329                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1330
1331         spin_lock_irq(&mdev->tconn->req_lock);
1332         os = mdev->state;
1333         ns.i = os.i;
1334         /* If MDF_CONSISTENT is not set go into inconsistent state,
1335            otherwise investigate MDF_WasUpToDate...
1336            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1337            otherwise into D_CONSISTENT state.
1338         */
1339         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1340                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1341                         ns.disk = D_CONSISTENT;
1342                 else
1343                         ns.disk = D_OUTDATED;
1344         } else {
1345                 ns.disk = D_INCONSISTENT;
1346         }
1347
1348         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1349                 ns.pdsk = D_OUTDATED;
1350
1351         if ( ns.disk == D_CONSISTENT &&
1352             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1353                 ns.disk = D_UP_TO_DATE;
1354
1355         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1356            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1357            this point, because drbd_request_state() modifies these
1358            flags. */
1359
1360         /* In case we are C_CONNECTED postpone any decision on the new disk
1361            state after the negotiation phase. */
1362         if (mdev->state.conn == C_CONNECTED) {
1363                 mdev->new_state_tmp.i = ns.i;
1364                 ns.i = os.i;
1365                 ns.disk = D_NEGOTIATING;
1366
1367                 /* We expect to receive up-to-date UUIDs soon.
1368                    To avoid a race in receive_state, free p_uuid while
1369                    holding req_lock. I.e. atomic with the state change */
1370                 kfree(mdev->p_uuid);
1371                 mdev->p_uuid = NULL;
1372         }
1373
1374         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1375         ns = mdev->state;
1376         spin_unlock_irq(&mdev->tconn->req_lock);
1377
1378         if (rv < SS_SUCCESS)
1379                 goto force_diskless_dec;
1380
1381         if (mdev->state.role == R_PRIMARY)
1382                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1383         else
1384                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1385
1386         drbd_md_mark_dirty(mdev);
1387         drbd_md_sync(mdev);
1388
1389         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1390         put_ldev(mdev);
1391         conn_reconfig_done(mdev->tconn);
1392         drbd_adm_finish(info, retcode);
1393         return 0;
1394
1395  force_diskless_dec:
1396         put_ldev(mdev);
1397  force_diskless:
1398         drbd_force_state(mdev, NS(disk, D_FAILED));
1399         drbd_md_sync(mdev);
1400         conn_reconfig_done(mdev->tconn);
1401  fail:
1402         if (nbc) {
1403                 if (nbc->backing_bdev)
1404                         blkdev_put(nbc->backing_bdev,
1405                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1406                 if (nbc->md_bdev)
1407                         blkdev_put(nbc->md_bdev,
1408                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1409                 kfree(nbc);
1410         }
1411         lc_destroy(resync_lru);
1412
1413         drbd_adm_finish(info, retcode);
1414         return 0;
1415 }
1416
1417 /* Detaching the disk is a process in multiple stages.  First we need to lock
1418  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1419  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1420  * internal references as well.
1421  * Only then we have finally detached. */
1422 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1423 {
1424         struct drbd_conf *mdev;
1425         enum drbd_ret_code retcode;
1426
1427         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1428         if (!adm_ctx.reply_skb)
1429                 return retcode;
1430         if (retcode != NO_ERROR)
1431                 goto out;
1432
1433         mdev = adm_ctx.mdev;
1434         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1435         retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1436         wait_event(mdev->misc_wait,
1437                         mdev->state.disk != D_DISKLESS ||
1438                         !atomic_read(&mdev->local_cnt));
1439         drbd_resume_io(mdev);
1440 out:
1441         drbd_adm_finish(info, retcode);
1442         return 0;
1443 }
1444
1445 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1446 {
1447         char hmac_name[CRYPTO_MAX_ALG_NAME];
1448         struct drbd_conf *mdev;
1449         struct net_conf *new_conf = NULL;
1450         struct crypto_hash *tfm = NULL;
1451         struct crypto_hash *integrity_w_tfm = NULL;
1452         struct crypto_hash *integrity_r_tfm = NULL;
1453         void *int_dig_out = NULL;
1454         void *int_dig_in = NULL;
1455         void *int_dig_vv = NULL;
1456         struct drbd_tconn *oconn;
1457         struct drbd_tconn *tconn;
1458         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1459         enum drbd_ret_code retcode;
1460         int i;
1461         int err;
1462
1463         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1464         if (!adm_ctx.reply_skb)
1465                 return retcode;
1466         if (retcode != NO_ERROR)
1467                 goto out;
1468
1469         tconn = adm_ctx.tconn;
1470         conn_reconfig_start(tconn);
1471
1472         if (tconn->cstate > C_STANDALONE) {
1473                 retcode = ERR_NET_CONFIGURED;
1474                 goto fail;
1475         }
1476
1477         /* allocation not in the IO path, cqueue thread context */
1478         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1479         if (!new_conf) {
1480                 retcode = ERR_NOMEM;
1481                 goto fail;
1482         }
1483
1484         new_conf->timeout          = DRBD_TIMEOUT_DEF;
1485         new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
1486         new_conf->ping_int         = DRBD_PING_INT_DEF;
1487         new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
1488         new_conf->max_buffers      = DRBD_MAX_BUFFERS_DEF;
1489         new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1490         new_conf->sndbuf_size      = DRBD_SNDBUF_SIZE_DEF;
1491         new_conf->rcvbuf_size      = DRBD_RCVBUF_SIZE_DEF;
1492         new_conf->ko_count         = DRBD_KO_COUNT_DEF;
1493         new_conf->after_sb_0p      = DRBD_AFTER_SB_0P_DEF;
1494         new_conf->after_sb_1p      = DRBD_AFTER_SB_1P_DEF;
1495         new_conf->after_sb_2p      = DRBD_AFTER_SB_2P_DEF;
1496         new_conf->want_lose        = 0;
1497         new_conf->two_primaries    = 0;
1498         new_conf->wire_protocol    = DRBD_PROT_C;
1499         new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
1500         new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
1501         new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
1502         new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
1503
1504         err = net_conf_from_attrs(new_conf, info->attrs);
1505         if (err) {
1506                 retcode = ERR_MANDATORY_TAG;
1507                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1508                 goto fail;
1509         }
1510
1511         if (new_conf->two_primaries
1512             && (new_conf->wire_protocol != DRBD_PROT_C)) {
1513                 retcode = ERR_NOT_PROTO_C;
1514                 goto fail;
1515         }
1516
1517         idr_for_each_entry(&tconn->volumes, mdev, i) {
1518                 if (get_ldev(mdev)) {
1519                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1520                         put_ldev(mdev);
1521                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1522                                 retcode = ERR_STONITH_AND_PROT_A;
1523                                 goto fail;
1524                         }
1525                 }
1526                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1527                         retcode = ERR_DISCARD;
1528                         goto fail;
1529                 }
1530                 if (!mdev->bitmap) {
1531                         if(drbd_bm_init(mdev)) {
1532                                 retcode = ERR_NOMEM;
1533                                 goto fail;
1534                         }
1535                 }
1536         }
1537
1538         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1539                 retcode = ERR_CONG_NOT_PROTO_A;
1540                 goto fail;
1541         }
1542
1543         retcode = NO_ERROR;
1544
1545         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1546         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1547
1548         /* No need to take drbd_cfg_mutex here.  All reconfiguration is
1549          * strictly serialized on genl_lock(). We are protected against
1550          * concurrent reconfiguration/addition/deletion */
1551         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1552                 if (oconn == tconn)
1553                         continue;
1554                 if (get_net_conf(oconn)) {
1555                         taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1556                         if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1557                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1558                                 retcode = ERR_LOCAL_ADDR;
1559
1560                         taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1561                         if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1562                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1563                                 retcode = ERR_PEER_ADDR;
1564
1565                         put_net_conf(oconn);
1566                         if (retcode != NO_ERROR)
1567                                 goto fail;
1568                 }
1569         }
1570
1571         if (new_conf->cram_hmac_alg[0] != 0) {
1572                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1573                         new_conf->cram_hmac_alg);
1574                 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1575                 if (IS_ERR(tfm)) {
1576                         tfm = NULL;
1577                         retcode = ERR_AUTH_ALG;
1578                         goto fail;
1579                 }
1580
1581                 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1582                         retcode = ERR_AUTH_ALG_ND;
1583                         goto fail;
1584                 }
1585         }
1586
1587         if (new_conf->integrity_alg[0]) {
1588                 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1589                 if (IS_ERR(integrity_w_tfm)) {
1590                         integrity_w_tfm = NULL;
1591                         retcode=ERR_INTEGRITY_ALG;
1592                         goto fail;
1593                 }
1594
1595                 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1596                         retcode=ERR_INTEGRITY_ALG_ND;
1597                         goto fail;
1598                 }
1599
1600                 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1601                 if (IS_ERR(integrity_r_tfm)) {
1602                         integrity_r_tfm = NULL;
1603                         retcode=ERR_INTEGRITY_ALG;
1604                         goto fail;
1605                 }
1606         }
1607
1608         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1609
1610         /* allocation not in the IO path, cqueue thread context */
1611         if (integrity_w_tfm) {
1612                 i = crypto_hash_digestsize(integrity_w_tfm);
1613                 int_dig_out = kmalloc(i, GFP_KERNEL);
1614                 if (!int_dig_out) {
1615                         retcode = ERR_NOMEM;
1616                         goto fail;
1617                 }
1618                 int_dig_in = kmalloc(i, GFP_KERNEL);
1619                 if (!int_dig_in) {
1620                         retcode = ERR_NOMEM;
1621                         goto fail;
1622                 }
1623                 int_dig_vv = kmalloc(i, GFP_KERNEL);
1624                 if (!int_dig_vv) {
1625                         retcode = ERR_NOMEM;
1626                         goto fail;
1627                 }
1628         }
1629
1630         conn_flush_workqueue(tconn);
1631         spin_lock_irq(&tconn->req_lock);
1632         if (tconn->net_conf != NULL) {
1633                 retcode = ERR_NET_CONFIGURED;
1634                 spin_unlock_irq(&tconn->req_lock);
1635                 goto fail;
1636         }
1637         tconn->net_conf = new_conf;
1638
1639         crypto_free_hash(tconn->cram_hmac_tfm);
1640         tconn->cram_hmac_tfm = tfm;
1641
1642         crypto_free_hash(tconn->integrity_w_tfm);
1643         tconn->integrity_w_tfm = integrity_w_tfm;
1644
1645         crypto_free_hash(tconn->integrity_r_tfm);
1646         tconn->integrity_r_tfm = integrity_r_tfm;
1647
1648         kfree(tconn->int_dig_out);
1649         kfree(tconn->int_dig_in);
1650         kfree(tconn->int_dig_vv);
1651         tconn->int_dig_out=int_dig_out;
1652         tconn->int_dig_in=int_dig_in;
1653         tconn->int_dig_vv=int_dig_vv;
1654         retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1655         spin_unlock_irq(&tconn->req_lock);
1656
1657         idr_for_each_entry(&tconn->volumes, mdev, i) {
1658                 mdev->send_cnt = 0;
1659                 mdev->recv_cnt = 0;
1660                 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1661         }
1662         conn_reconfig_done(tconn);
1663         drbd_adm_finish(info, retcode);
1664         return 0;
1665
1666 fail:
1667         kfree(int_dig_out);
1668         kfree(int_dig_in);
1669         kfree(int_dig_vv);
1670         crypto_free_hash(tfm);
1671         crypto_free_hash(integrity_w_tfm);
1672         crypto_free_hash(integrity_r_tfm);
1673         kfree(new_conf);
1674
1675         conn_reconfig_done(tconn);
1676 out:
1677         drbd_adm_finish(info, retcode);
1678         return 0;
1679 }
1680
1681 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
1682 {
1683         struct disconnect_parms parms;
1684         struct drbd_tconn *tconn;
1685         enum drbd_ret_code retcode;
1686         int err;
1687
1688         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1689         if (!adm_ctx.reply_skb)
1690                 return retcode;
1691         if (retcode != NO_ERROR)
1692                 goto fail;
1693
1694         tconn = adm_ctx.tconn;
1695         memset(&parms, 0, sizeof(parms));
1696         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
1697                 err = disconnect_parms_from_attrs(&parms, info->attrs);
1698                 if (err) {
1699                         retcode = ERR_MANDATORY_TAG;
1700                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1701                         goto fail;
1702                 }
1703         }
1704
1705         if (parms.force_disconnect) {
1706                 spin_lock_irq(&tconn->req_lock);
1707                 if (tconn->cstate >= C_WF_CONNECTION)
1708                         _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1709                 spin_unlock_irq(&tconn->req_lock);
1710                 goto done;
1711         }
1712
1713         retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
1714
1715         if (retcode == SS_NOTHING_TO_DO)
1716                 goto done;
1717         else if (retcode == SS_ALREADY_STANDALONE)
1718                 goto done;
1719         else if (retcode == SS_PRIMARY_NOP) {
1720                 /* Our state checking code wants to see the peer outdated. */
1721                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1722                                                         pdsk, D_OUTDATED), CS_VERBOSE);
1723         } else if (retcode == SS_CW_FAILED_BY_PEER) {
1724                 /* The peer probably wants to see us outdated. */
1725                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1726                                                         disk, D_OUTDATED), 0);
1727                 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1728                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1729                         retcode = SS_SUCCESS;
1730                 }
1731         }
1732
1733         if (retcode < SS_SUCCESS)
1734                 goto fail;
1735
1736         if (wait_event_interruptible(tconn->ping_wait,
1737                                      tconn->cstate != C_DISCONNECTING)) {
1738                 /* Do not test for mdev->state.conn == C_STANDALONE, since
1739                    someone else might connect us in the mean time! */
1740                 retcode = ERR_INTR;
1741                 goto fail;
1742         }
1743
1744  done:
1745         retcode = NO_ERROR;
1746  fail:
1747         drbd_adm_finish(info, retcode);
1748         return 0;
1749 }
1750
1751 void resync_after_online_grow(struct drbd_conf *mdev)
1752 {
1753         int iass; /* I am sync source */
1754
1755         dev_info(DEV, "Resync of new storage after online grow\n");
1756         if (mdev->state.role != mdev->state.peer)
1757                 iass = (mdev->state.role == R_PRIMARY);
1758         else
1759                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
1760
1761         if (iass)
1762                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1763         else
1764                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1765 }
1766
1767 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1768 {
1769         struct resize_parms rs;
1770         struct drbd_conf *mdev;
1771         enum drbd_ret_code retcode;
1772         enum determine_dev_size dd;
1773         enum dds_flags ddsf;
1774         int err;
1775
1776         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1777         if (!adm_ctx.reply_skb)
1778                 return retcode;
1779         if (retcode != NO_ERROR)
1780                 goto fail;
1781
1782         memset(&rs, 0, sizeof(struct resize_parms));
1783         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
1784                 err = resize_parms_from_attrs(&rs, info->attrs);
1785                 if (err) {
1786                         retcode = ERR_MANDATORY_TAG;
1787                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1788                         goto fail;
1789                 }
1790         }
1791
1792         mdev = adm_ctx.mdev;
1793         if (mdev->state.conn > C_CONNECTED) {
1794                 retcode = ERR_RESIZE_RESYNC;
1795                 goto fail;
1796         }
1797
1798         if (mdev->state.role == R_SECONDARY &&
1799             mdev->state.peer == R_SECONDARY) {
1800                 retcode = ERR_NO_PRIMARY;
1801                 goto fail;
1802         }
1803
1804         if (!get_ldev(mdev)) {
1805                 retcode = ERR_NO_DISK;
1806                 goto fail;
1807         }
1808
1809         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
1810                 retcode = ERR_NEED_APV_93;
1811                 goto fail;
1812         }
1813
1814         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1815                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1816
1817         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1818         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1819         dd = drbd_determine_dev_size(mdev, ddsf);
1820         drbd_md_sync(mdev);
1821         put_ldev(mdev);
1822         if (dd == dev_size_error) {
1823                 retcode = ERR_NOMEM_BITMAP;
1824                 goto fail;
1825         }
1826
1827         if (mdev->state.conn == C_CONNECTED) {
1828                 if (dd == grew)
1829                         set_bit(RESIZE_PENDING, &mdev->flags);
1830
1831                 drbd_send_uuids(mdev);
1832                 drbd_send_sizes(mdev, 1, ddsf);
1833         }
1834
1835  fail:
1836         drbd_adm_finish(info, retcode);
1837         return 0;
1838 }
1839
1840 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
1841 {
1842         struct drbd_conf *mdev;
1843         enum drbd_ret_code retcode;
1844         int err;
1845         int ovr; /* online verify running */
1846         int rsr; /* re-sync running */
1847         struct crypto_hash *verify_tfm = NULL;
1848         struct crypto_hash *csums_tfm = NULL;
1849         struct syncer_conf sc;
1850         cpumask_var_t new_cpu_mask;
1851         int *rs_plan_s = NULL;
1852         int fifo_size;
1853
1854         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1855         if (!adm_ctx.reply_skb)
1856                 return retcode;
1857         if (retcode != NO_ERROR)
1858                 goto fail;
1859         mdev = adm_ctx.mdev;
1860
1861         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1862                 retcode = ERR_NOMEM;
1863                 drbd_msg_put_info("unable to allocate cpumask");
1864                 goto fail;
1865         }
1866
1867         if (((struct drbd_genlmsghdr*)info->userhdr)->flags
1868                         & DRBD_GENL_F_SET_DEFAULTS) {
1869                 memset(&sc, 0, sizeof(struct syncer_conf));
1870                 sc.rate       = DRBD_RATE_DEF;
1871                 sc.after      = DRBD_AFTER_DEF;
1872                 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1873                 sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
1874                 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1875                 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1876                 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1877                 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1878                 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1879         } else
1880                 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1881
1882         err = syncer_conf_from_attrs(&sc, info->attrs);
1883         if (err) {
1884                 retcode = ERR_MANDATORY_TAG;
1885                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1886                 goto fail;
1887         }
1888
1889         /* re-sync running */
1890         rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1891                 mdev->state.conn == C_SYNC_TARGET ||
1892                 mdev->state.conn == C_PAUSED_SYNC_S ||
1893                 mdev->state.conn == C_PAUSED_SYNC_T );
1894
1895         if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1896                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1897                 goto fail;
1898         }
1899
1900         if (!rsr && sc.csums_alg[0]) {
1901                 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1902                 if (IS_ERR(csums_tfm)) {
1903                         csums_tfm = NULL;
1904                         retcode = ERR_CSUMS_ALG;
1905                         goto fail;
1906                 }
1907
1908                 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1909                         retcode = ERR_CSUMS_ALG_ND;
1910                         goto fail;
1911                 }
1912         }
1913
1914         /* online verify running */
1915         ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1916
1917         if (ovr) {
1918                 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1919                         retcode = ERR_VERIFY_RUNNING;
1920                         goto fail;
1921                 }
1922         }
1923
1924         if (!ovr && sc.verify_alg[0]) {
1925                 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1926                 if (IS_ERR(verify_tfm)) {
1927                         verify_tfm = NULL;
1928                         retcode = ERR_VERIFY_ALG;
1929                         goto fail;
1930                 }
1931
1932                 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1933                         retcode = ERR_VERIFY_ALG_ND;
1934                         goto fail;
1935                 }
1936         }
1937
1938         /* silently ignore cpu mask on UP kernel */
1939         if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1940                 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1941                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1942                 if (err) {
1943                         dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1944                         retcode = ERR_CPU_MASK_PARSE;
1945                         goto fail;
1946                 }
1947         }
1948
1949         if (!expect(sc.rate >= 1))
1950                 sc.rate = 1;
1951
1952         /* clip to allowed range */
1953         if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
1954                 sc.al_extents = DRBD_AL_EXTENTS_MIN;
1955         if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
1956                 sc.al_extents = DRBD_AL_EXTENTS_MAX;
1957
1958         /* most sanity checks done, try to assign the new sync-after
1959          * dependency.  need to hold the global lock in there,
1960          * to avoid a race in the dependency loop check. */
1961         retcode = drbd_alter_sa(mdev, sc.after);
1962         if (retcode != NO_ERROR)
1963                 goto fail;
1964
1965         fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1966         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1967                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1968                 if (!rs_plan_s) {
1969                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1970                         retcode = ERR_NOMEM;
1971                         goto fail;
1972                 }
1973         }
1974
1975         /* ok, assign the rest of it as well.
1976          * lock against receive_SyncParam() */
1977         spin_lock(&mdev->peer_seq_lock);
1978         mdev->sync_conf = sc;
1979
1980         if (!rsr) {
1981                 crypto_free_hash(mdev->csums_tfm);
1982                 mdev->csums_tfm = csums_tfm;
1983                 csums_tfm = NULL;
1984         }
1985
1986         if (!ovr) {
1987                 crypto_free_hash(mdev->verify_tfm);
1988                 mdev->verify_tfm = verify_tfm;
1989                 verify_tfm = NULL;
1990         }
1991
1992         if (fifo_size != mdev->rs_plan_s.size) {
1993                 kfree(mdev->rs_plan_s.values);
1994                 mdev->rs_plan_s.values = rs_plan_s;
1995                 mdev->rs_plan_s.size   = fifo_size;
1996                 mdev->rs_planed = 0;
1997                 rs_plan_s = NULL;
1998         }
1999
2000         spin_unlock(&mdev->peer_seq_lock);
2001
2002         if (get_ldev(mdev)) {
2003                 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
2004                 drbd_al_shrink(mdev);
2005                 err = drbd_check_al_size(mdev);
2006                 lc_unlock(mdev->act_log);
2007                 wake_up(&mdev->al_wait);
2008
2009                 put_ldev(mdev);
2010                 drbd_md_sync(mdev);
2011
2012                 if (err) {
2013                         retcode = ERR_NOMEM;
2014                         goto fail;
2015                 }
2016         }
2017
2018         if (mdev->state.conn >= C_CONNECTED)
2019                 drbd_send_sync_param(mdev, &sc);
2020
2021         if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
2022                 cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
2023                 drbd_calc_cpu_mask(mdev->tconn);
2024                 mdev->tconn->receiver.reset_cpu_mask = 1;
2025                 mdev->tconn->asender.reset_cpu_mask = 1;
2026                 mdev->tconn->worker.reset_cpu_mask = 1;
2027         }
2028
2029         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2030 fail:
2031         kfree(rs_plan_s);
2032         free_cpumask_var(new_cpu_mask);
2033         crypto_free_hash(csums_tfm);
2034         crypto_free_hash(verify_tfm);
2035
2036         drbd_adm_finish(info, retcode);
2037         return 0;
2038 }
2039
2040 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2041 {
2042         struct drbd_conf *mdev;
2043         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2044
2045         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2046         if (!adm_ctx.reply_skb)
2047                 return retcode;
2048         if (retcode != NO_ERROR)
2049                 goto out;
2050
2051         mdev = adm_ctx.mdev;
2052
2053         /* If there is still bitmap IO pending, probably because of a previous
2054          * resync just being finished, wait for it before requesting a new resync. */
2055         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2056
2057         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2058
2059         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2060                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2061
2062         while (retcode == SS_NEED_CONNECTION) {
2063                 spin_lock_irq(&mdev->tconn->req_lock);
2064                 if (mdev->state.conn < C_CONNECTED)
2065                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2066                 spin_unlock_irq(&mdev->tconn->req_lock);
2067
2068                 if (retcode != SS_NEED_CONNECTION)
2069                         break;
2070
2071                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2072         }
2073
2074 out:
2075         drbd_adm_finish(info, retcode);
2076         return 0;
2077 }
2078
2079 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2080 {
2081         int rv;
2082
2083         rv = drbd_bmio_set_n_write(mdev);
2084         drbd_suspend_al(mdev);
2085         return rv;
2086 }
2087
2088 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2089                 union drbd_state mask, union drbd_state val)
2090 {
2091         enum drbd_ret_code retcode;
2092
2093         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2094         if (!adm_ctx.reply_skb)
2095                 return retcode;
2096         if (retcode != NO_ERROR)
2097                 goto out;
2098
2099         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2100 out:
2101         drbd_adm_finish(info, retcode);
2102         return 0;
2103 }
2104
2105 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2106 {
2107         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2108 }
2109
2110 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2111 {
2112         enum drbd_ret_code retcode;
2113
2114         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2115         if (!adm_ctx.reply_skb)
2116                 return retcode;
2117         if (retcode != NO_ERROR)
2118                 goto out;
2119
2120         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2121                 retcode = ERR_PAUSE_IS_SET;
2122 out:
2123         drbd_adm_finish(info, retcode);
2124         return 0;
2125 }
2126
2127 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2128 {
2129         union drbd_state s;
2130         enum drbd_ret_code retcode;
2131
2132         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2133         if (!adm_ctx.reply_skb)
2134                 return retcode;
2135         if (retcode != NO_ERROR)
2136                 goto out;
2137
2138         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2139                 s = adm_ctx.mdev->state;
2140                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2141                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2142                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2143                 } else {
2144                         retcode = ERR_PAUSE_IS_CLEAR;
2145                 }
2146         }
2147
2148 out:
2149         drbd_adm_finish(info, retcode);
2150         return 0;
2151 }
2152
2153 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2154 {
2155         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2156 }
2157
2158 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2159 {
2160         struct drbd_conf *mdev;
2161         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2162
2163         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2164         if (!adm_ctx.reply_skb)
2165                 return retcode;
2166         if (retcode != NO_ERROR)
2167                 goto out;
2168
2169         mdev = adm_ctx.mdev;
2170         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2171                 drbd_uuid_new_current(mdev);
2172                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2173         }
2174         drbd_suspend_io(mdev);
2175         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2176         if (retcode == SS_SUCCESS) {
2177                 if (mdev->state.conn < C_CONNECTED)
2178                         tl_clear(mdev->tconn);
2179                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2180                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2181         }
2182         drbd_resume_io(mdev);
2183
2184 out:
2185         drbd_adm_finish(info, retcode);
2186         return 0;
2187 }
2188
2189 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2190 {
2191         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2192 }
2193
2194 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2195 {
2196         struct nlattr *nla;
2197         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2198         if (!nla)
2199                 goto nla_put_failure;
2200         if (vnr != VOLUME_UNSPECIFIED)
2201                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2202         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2203         nla_nest_end(skb, nla);
2204         return 0;
2205
2206 nla_put_failure:
2207         if (nla)
2208                 nla_nest_cancel(skb, nla);
2209         return -EMSGSIZE;
2210 }
2211
2212 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2213                 const struct sib_info *sib)
2214 {
2215         struct state_info *si = NULL; /* for sizeof(si->member); */
2216         struct nlattr *nla;
2217         int got_ldev;
2218         int got_net;
2219         int err = 0;
2220         int exclude_sensitive;
2221
2222         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2223          * to.  So we better exclude_sensitive information.
2224          *
2225          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2226          * in the context of the requesting user process. Exclude sensitive
2227          * information, unless current has superuser.
2228          *
2229          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2230          * relies on the current implementation of netlink_dump(), which
2231          * executes the dump callback successively from netlink_recvmsg(),
2232          * always in the context of the receiving process */
2233         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2234
2235         got_ldev = get_ldev(mdev);
2236         got_net = get_net_conf(mdev->tconn);
2237
2238         /* We need to add connection name and volume number information still.
2239          * Minor number is in drbd_genlmsghdr. */
2240         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2241                 goto nla_put_failure;
2242
2243         if (got_ldev)
2244                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2245                         goto nla_put_failure;
2246         if (got_net)
2247                 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2248                         goto nla_put_failure;
2249
2250         if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
2251                         goto nla_put_failure;
2252
2253         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2254         if (!nla)
2255                 goto nla_put_failure;
2256         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2257         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2258         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2259         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2260
2261         if (got_ldev) {
2262                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2263                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2264                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2265                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2266                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2267                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2268                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2269                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2270                 }
2271         }
2272
2273         if (sib) {
2274                 switch(sib->sib_reason) {
2275                 case SIB_SYNC_PROGRESS:
2276                 case SIB_GET_STATUS_REPLY:
2277                         break;
2278                 case SIB_STATE_CHANGE:
2279                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2280                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2281                         break;
2282                 case SIB_HELPER_POST:
2283                         NLA_PUT_U32(skb,
2284                                 T_helper_exit_code, sib->helper_exit_code);
2285                         /* fall through */
2286                 case SIB_HELPER_PRE:
2287                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2288                         break;
2289                 }
2290         }
2291         nla_nest_end(skb, nla);
2292
2293         if (0)
2294 nla_put_failure:
2295                 err = -EMSGSIZE;
2296         if (got_ldev)
2297                 put_ldev(mdev);
2298         if (got_net)
2299                 put_net_conf(mdev->tconn);
2300         return err;
2301 }
2302
2303 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2304 {
2305         enum drbd_ret_code retcode;
2306         int err;
2307
2308         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2309         if (!adm_ctx.reply_skb)
2310                 return retcode;
2311         if (retcode != NO_ERROR)
2312                 goto out;
2313
2314         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2315         if (err) {
2316                 nlmsg_free(adm_ctx.reply_skb);
2317                 return err;
2318         }
2319 out:
2320         drbd_adm_finish(info, retcode);
2321         return 0;
2322 }
2323
2324 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2325 {
2326         struct drbd_conf *mdev;
2327         struct drbd_genlmsghdr *dh;
2328         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2329         struct drbd_tconn *tconn = NULL;
2330         struct drbd_tconn *tmp;
2331         unsigned volume = cb->args[1];
2332
2333         /* Open coded, deferred, iteration:
2334          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2335          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2336          *        ...
2337          *      }
2338          * }
2339          * where tconn is cb->args[0];
2340          * and i is cb->args[1];
2341          *
2342          * This may miss entries inserted after this dump started,
2343          * or entries deleted before they are reached.
2344          *
2345          * We need to make sure the mdev won't disappear while
2346          * we are looking at it, and revalidate our iterators
2347          * on each iteration.
2348          */
2349
2350         /* synchronize with drbd_new_tconn/drbd_free_tconn */
2351         mutex_lock(&drbd_cfg_mutex);
2352         /* synchronize with drbd_delete_device */
2353         rcu_read_lock();
2354 next_tconn:
2355         /* revalidate iterator position */
2356         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2357                 if (pos == NULL) {
2358                         /* first iteration */
2359                         pos = tmp;
2360                         tconn = pos;
2361                         break;
2362                 }
2363                 if (tmp == pos) {
2364                         tconn = pos;
2365                         break;
2366                 }
2367         }
2368         if (tconn) {
2369                 mdev = idr_get_next(&tconn->volumes, &volume);
2370                 if (!mdev) {
2371                         /* No more volumes to dump on this tconn.
2372                          * Advance tconn iterator. */
2373                         pos = list_entry(tconn->all_tconn.next,
2374                                         struct drbd_tconn, all_tconn);
2375                         /* But, did we dump any volume on this tconn yet? */
2376                         if (volume != 0) {
2377                                 tconn = NULL;
2378                                 volume = 0;
2379                                 goto next_tconn;
2380                         }
2381                 }
2382
2383                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2384                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2385                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2386                 if (!dh)
2387                         goto out;
2388
2389                 if (!mdev) {
2390                         /* this is a tconn without a single volume */
2391                         dh->minor = -1U;
2392                         dh->ret_code = NO_ERROR;
2393                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2394                                 genlmsg_cancel(skb, dh);
2395                         else
2396                                 genlmsg_end(skb, dh);
2397                         goto out;
2398                 }
2399
2400                 D_ASSERT(mdev->vnr == volume);
2401                 D_ASSERT(mdev->tconn == tconn);
2402
2403                 dh->minor = mdev_to_minor(mdev);
2404                 dh->ret_code = NO_ERROR;
2405
2406                 if (nla_put_status_info(skb, mdev, NULL)) {
2407                         genlmsg_cancel(skb, dh);
2408                         goto out;
2409                 }
2410                 genlmsg_end(skb, dh);
2411         }
2412
2413 out:
2414         rcu_read_unlock();
2415         mutex_unlock(&drbd_cfg_mutex);
2416         /* where to start the next iteration */
2417         cb->args[0] = (long)pos;
2418         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2419
2420         /* No more tconns/volumes/minors found results in an empty skb.
2421          * Which will terminate the dump. */
2422         return skb->len;
2423 }
2424
2425 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2426 {
2427         enum drbd_ret_code retcode;
2428         struct timeout_parms tp;
2429         int err;
2430
2431         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2432         if (!adm_ctx.reply_skb)
2433                 return retcode;
2434         if (retcode != NO_ERROR)
2435                 goto out;
2436
2437         tp.timeout_type =
2438                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2439                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2440                 UT_DEFAULT;
2441
2442         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2443         if (err) {
2444                 nlmsg_free(adm_ctx.reply_skb);
2445                 return err;
2446         }
2447 out:
2448         drbd_adm_finish(info, retcode);
2449         return 0;
2450 }
2451
2452 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2453 {
2454         struct drbd_conf *mdev;
2455         enum drbd_ret_code retcode;
2456
2457         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2458         if (!adm_ctx.reply_skb)
2459                 return retcode;
2460         if (retcode != NO_ERROR)
2461                 goto out;
2462
2463         mdev = adm_ctx.mdev;
2464         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2465                 /* resume from last known position, if possible */
2466                 struct start_ov_parms parms =
2467                         { .ov_start_sector = mdev->ov_start_sector };
2468                 int err = start_ov_parms_from_attrs(&parms, info->attrs);
2469                 if (err) {
2470                         retcode = ERR_MANDATORY_TAG;
2471                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2472                         goto out;
2473                 }
2474                 /* w_make_ov_request expects position to be aligned */
2475                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2476         }
2477         /* If there is still bitmap IO pending, e.g. previous resync or verify
2478          * just being finished, wait for it before requesting a new resync. */
2479         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2480         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2481 out:
2482         drbd_adm_finish(info, retcode);
2483         return 0;
2484 }
2485
2486
2487 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2488 {
2489         struct drbd_conf *mdev;
2490         enum drbd_ret_code retcode;
2491         int skip_initial_sync = 0;
2492         int err;
2493         struct new_c_uuid_parms args;
2494
2495         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2496         if (!adm_ctx.reply_skb)
2497                 return retcode;
2498         if (retcode != NO_ERROR)
2499                 goto out_nolock;
2500
2501         mdev = adm_ctx.mdev;
2502         memset(&args, 0, sizeof(args));
2503         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2504                 err = new_c_uuid_parms_from_attrs(&args, info->attrs);
2505                 if (err) {
2506                         retcode = ERR_MANDATORY_TAG;
2507                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2508                         goto out_nolock;
2509                 }
2510         }
2511
2512         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2513
2514         if (!get_ldev(mdev)) {
2515                 retcode = ERR_NO_DISK;
2516                 goto out;
2517         }
2518
2519         /* this is "skip initial sync", assume to be clean */
2520         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2521             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2522                 dev_info(DEV, "Preparing to skip initial sync\n");
2523                 skip_initial_sync = 1;
2524         } else if (mdev->state.conn != C_STANDALONE) {
2525                 retcode = ERR_CONNECTED;
2526                 goto out_dec;
2527         }
2528
2529         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2530         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2531
2532         if (args.clear_bm) {
2533                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2534                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2535                 if (err) {
2536                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2537                         retcode = ERR_IO_MD_DISK;
2538                 }
2539                 if (skip_initial_sync) {
2540                         drbd_send_uuids_skip_initial_sync(mdev);
2541                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2542                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2543                         spin_lock_irq(&mdev->tconn->req_lock);
2544                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2545                                         CS_VERBOSE, NULL);
2546                         spin_unlock_irq(&mdev->tconn->req_lock);
2547                 }
2548         }
2549
2550         drbd_md_sync(mdev);
2551 out_dec:
2552         put_ldev(mdev);
2553 out:
2554         mutex_unlock(mdev->state_mutex);
2555 out_nolock:
2556         drbd_adm_finish(info, retcode);
2557         return 0;
2558 }
2559
2560 static enum drbd_ret_code
2561 drbd_check_conn_name(const char *name)
2562 {
2563         if (!name || !name[0]) {
2564                 drbd_msg_put_info("connection name missing");
2565                 return ERR_MANDATORY_TAG;
2566         }
2567         /* if we want to use these in sysfs/configfs/debugfs some day,
2568          * we must not allow slashes */
2569         if (strchr(name, '/')) {
2570                 drbd_msg_put_info("invalid connection name");
2571                 return ERR_INVALID_REQUEST;
2572         }
2573         return NO_ERROR;
2574 }
2575
2576 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2577 {
2578         enum drbd_ret_code retcode;
2579
2580         retcode = drbd_adm_prepare(skb, info, 0);
2581         if (!adm_ctx.reply_skb)
2582                 return retcode;
2583         if (retcode != NO_ERROR)
2584                 goto out;
2585
2586         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2587         if (retcode != NO_ERROR)
2588                 goto out;
2589
2590         if (adm_ctx.tconn) {
2591                 retcode = ERR_INVALID_REQUEST;
2592                 drbd_msg_put_info("connection exists");
2593                 goto out;
2594         }
2595
2596         if (!drbd_new_tconn(adm_ctx.conn_name))
2597                 retcode = ERR_NOMEM;
2598 out:
2599         drbd_adm_finish(info, retcode);
2600         return 0;
2601 }
2602
2603 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2604 {
2605         struct drbd_genlmsghdr *dh = info->userhdr;
2606         enum drbd_ret_code retcode;
2607
2608         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2609         if (!adm_ctx.reply_skb)
2610                 return retcode;
2611         if (retcode != NO_ERROR)
2612                 goto out;
2613
2614         /* FIXME drop minor_count parameter, limit to MINORMASK */
2615         if (dh->minor >= minor_count) {
2616                 drbd_msg_put_info("requested minor out of range");
2617                 retcode = ERR_INVALID_REQUEST;
2618                 goto out;
2619         }
2620         /* FIXME we need a define here */
2621         if (adm_ctx.volume >= 256) {
2622                 drbd_msg_put_info("requested volume id out of range");
2623                 retcode = ERR_INVALID_REQUEST;
2624                 goto out;
2625         }
2626
2627         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2628 out:
2629         drbd_adm_finish(info, retcode);
2630         return 0;
2631 }
2632
2633 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2634 {
2635         struct drbd_conf *mdev;
2636         enum drbd_ret_code retcode;
2637
2638         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2639         if (!adm_ctx.reply_skb)
2640                 return retcode;
2641         if (retcode != NO_ERROR)
2642                 goto out;
2643
2644         mdev = adm_ctx.mdev;
2645         if (mdev->state.disk == D_DISKLESS &&
2646             mdev->state.conn == C_STANDALONE &&
2647             mdev->state.role == R_SECONDARY) {
2648                 drbd_delete_device(mdev_to_minor(mdev));
2649                 retcode = NO_ERROR;
2650         } else
2651                 retcode = ERR_MINOR_CONFIGURED;
2652 out:
2653         drbd_adm_finish(info, retcode);
2654         return 0;
2655 }
2656
2657 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
2658 {
2659         enum drbd_ret_code retcode;
2660
2661         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2662         if (!adm_ctx.reply_skb)
2663                 return retcode;
2664         if (retcode != NO_ERROR)
2665                 goto out;
2666
2667         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2668                 drbd_free_tconn(adm_ctx.tconn);
2669                 retcode = NO_ERROR;
2670         } else {
2671                 retcode = ERR_CONN_IN_USE;
2672         }
2673
2674 out:
2675         drbd_adm_finish(info, retcode);
2676         return 0;
2677 }
2678
2679 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
2680 {
2681         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
2682         struct sk_buff *msg;
2683         struct drbd_genlmsghdr *d_out;
2684         unsigned seq;
2685         int err = -ENOMEM;
2686
2687         seq = atomic_inc_return(&drbd_genl_seq);
2688         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
2689         if (!msg)
2690                 goto failed;
2691
2692         err = -EMSGSIZE;
2693         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
2694         if (!d_out) /* cannot happen, but anyways. */
2695                 goto nla_put_failure;
2696         d_out->minor = mdev_to_minor(mdev);
2697         d_out->ret_code = 0;
2698
2699         if (nla_put_status_info(msg, mdev, sib))
2700                 goto nla_put_failure;
2701         genlmsg_end(msg, d_out);
2702         err = drbd_genl_multicast_events(msg, 0);
2703         /* msg has been consumed or freed in netlink_broadcast() */
2704         if (err && err != -ESRCH)
2705                 goto failed;
2706
2707         return;
2708
2709 nla_put_failure:
2710         nlmsg_free(msg);
2711 failed:
2712         dev_err(DEV, "Error %d while broadcasting event. "
2713                         "Event seq:%u sib_reason:%u\n",
2714                         err, seq, sib->sib_reason);
2715 }