drbd: switch configuration interface from connector to genetlink
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52
53 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
71 /* .dumpit */
72 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
73
74 #include <linux/drbd_genl_api.h>
75 #include <linux/genl_magic_func.h>
76
77 /* used blkdev_get_by_path, to claim our meta data device(s) */
78 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
79
80 /* Configuration is strictly serialized, because generic netlink message
81  * processing is strictly serialized by the genl_lock().
82  * Which means we can use one static global drbd_config_context struct.
83  */
84 static struct drbd_config_context {
85         /* assigned from drbd_genlmsghdr */
86         unsigned int minor;
87         /* assigned from request attributes, if present */
88         unsigned int volume;
89 #define VOLUME_UNSPECIFIED              (-1U)
90         /* pointer into the request skb,
91          * limited lifetime! */
92         char *conn_name;
93
94         /* reply buffer */
95         struct sk_buff *reply_skb;
96         /* pointer into reply buffer */
97         struct drbd_genlmsghdr *reply_dh;
98         /* resolved from attributes, if possible */
99         struct drbd_conf *mdev;
100         struct drbd_tconn *tconn;
101 } adm_ctx;
102
103 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
104 {
105         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
106         if (genlmsg_reply(skb, info))
107                 printk(KERN_ERR "drbd: error sending genl reply\n");
108 }
109
110 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
111  * reason it could fail was no space in skb, and there are 4k available. */
112 static int drbd_msg_put_info(const char *info)
113 {
114         struct sk_buff *skb = adm_ctx.reply_skb;
115         struct nlattr *nla;
116         int err = -EMSGSIZE;
117
118         if (!info || !info[0])
119                 return 0;
120
121         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
122         if (!nla)
123                 return err;
124
125         err = nla_put_string(skb, T_info_text, info);
126         if (err) {
127                 nla_nest_cancel(skb, nla);
128                 return err;
129         } else
130                 nla_nest_end(skb, nla);
131         return 0;
132 }
133
134 /* This would be a good candidate for a "pre_doit" hook,
135  * and per-family private info->pointers.
136  * But we need to stay compatible with older kernels.
137  * If it returns successfully, adm_ctx members are valid.
138  */
139 #define DRBD_ADM_NEED_MINOR     1
140 #define DRBD_ADM_NEED_CONN      2
141 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
142                 unsigned flags)
143 {
144         struct drbd_genlmsghdr *d_in = info->userhdr;
145         const u8 cmd = info->genlhdr->cmd;
146         int err;
147
148         memset(&adm_ctx, 0, sizeof(adm_ctx));
149
150         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
151         if (cmd != DRBD_ADM_GET_STATUS
152         && security_netlink_recv(skb, CAP_SYS_ADMIN))
153                return -EPERM;
154
155         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156         if (!adm_ctx.reply_skb)
157                 goto fail;
158
159         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
160                                         info, &drbd_genl_family, 0, cmd);
161         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
162          * but anyways */
163         if (!adm_ctx.reply_dh)
164                 goto fail;
165
166         adm_ctx.reply_dh->minor = d_in->minor;
167         adm_ctx.reply_dh->ret_code = NO_ERROR;
168
169         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
170                 struct nlattr *nla;
171                 /* parse and validate only */
172                 err = drbd_cfg_context_from_attrs(NULL, info->attrs);
173                 if (err)
174                         goto fail;
175
176                 /* It was present, and valid,
177                  * copy it over to the reply skb. */
178                 err = nla_put_nohdr(adm_ctx.reply_skb,
179                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
180                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
181                 if (err)
182                         goto fail;
183
184                 /* and assign stuff to the global adm_ctx */
185                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
186                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
187                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
188                 if (nla)
189                         adm_ctx.conn_name = nla_data(nla);
190         } else
191                 adm_ctx.volume = VOLUME_UNSPECIFIED;
192
193         adm_ctx.minor = d_in->minor;
194         adm_ctx.mdev = minor_to_mdev(d_in->minor);
195         adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
196
197         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
198                 drbd_msg_put_info("unknown minor");
199                 return ERR_MINOR_INVALID;
200         }
201         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
202                 drbd_msg_put_info("unknown connection");
203                 return ERR_INVALID_REQUEST;
204         }
205
206         /* some more paranoia, if the request was over-determined */
207         if (adm_ctx.mdev &&
208             adm_ctx.volume != VOLUME_UNSPECIFIED &&
209             adm_ctx.volume != adm_ctx.mdev->vnr) {
210                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
211                                 adm_ctx.minor, adm_ctx.volume,
212                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
213                 drbd_msg_put_info("over-determined configuration context mismatch");
214                 return ERR_INVALID_REQUEST;
215         }
216         if (adm_ctx.mdev && adm_ctx.tconn &&
217             adm_ctx.mdev->tconn != adm_ctx.tconn) {
218                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
219                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
220                 drbd_msg_put_info("over-determined configuration context mismatch");
221                 return ERR_INVALID_REQUEST;
222         }
223         return NO_ERROR;
224
225 fail:
226         nlmsg_free(adm_ctx.reply_skb);
227         adm_ctx.reply_skb = NULL;
228         return -ENOMEM;
229 }
230
231 static int drbd_adm_finish(struct genl_info *info, int retcode)
232 {
233         struct nlattr *nla;
234         const char *conn_name = NULL;
235
236         if (!adm_ctx.reply_skb)
237                 return -ENOMEM;
238
239         adm_ctx.reply_dh->ret_code = retcode;
240
241         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
242         if (nla) {
243                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
244                 if (nla)
245                         conn_name = nla_data(nla);
246         }
247
248         drbd_adm_send_reply(adm_ctx.reply_skb, info);
249         return 0;
250 }
251
252 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
253 {
254         char *envp[] = { "HOME=/",
255                         "TERM=linux",
256                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
257                         NULL, /* Will be set to address family */
258                         NULL, /* Will be set to address */
259                         NULL };
260         char mb[12], af[20], ad[60], *afs;
261         char *argv[] = {usermode_helper, cmd, mb, NULL };
262         struct sib_info sib;
263         int ret;
264
265         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
266
267         if (get_net_conf(mdev->tconn)) {
268                 switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
269                 case AF_INET6:
270                         afs = "ipv6";
271                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
272                                  &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
273                         break;
274                 case AF_INET:
275                         afs = "ipv4";
276                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
277                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
278                         break;
279                 default:
280                         afs = "ssocks";
281                         snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
282                                  &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
283                 }
284                 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
285                 envp[3]=af;
286                 envp[4]=ad;
287                 put_net_conf(mdev->tconn);
288         }
289
290         /* The helper may take some time.
291          * write out any unsynced meta data changes now */
292         drbd_md_sync(mdev);
293
294         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
295         sib.sib_reason = SIB_HELPER_PRE;
296         sib.helper_name = cmd;
297         drbd_bcast_event(mdev, &sib);
298         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
299         if (ret)
300                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
301                                 usermode_helper, cmd, mb,
302                                 (ret >> 8) & 0xff, ret);
303         else
304                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
305                                 usermode_helper, cmd, mb,
306                                 (ret >> 8) & 0xff, ret);
307         sib.sib_reason = SIB_HELPER_POST;
308         sib.helper_exit_code = ret;
309         drbd_bcast_event(mdev, &sib);
310
311         if (ret < 0) /* Ignore any ERRNOs we got. */
312                 ret = 0;
313
314         return ret;
315 }
316
317 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
318 {
319         char *ex_to_string;
320         int r;
321         enum drbd_disk_state nps;
322         enum drbd_fencing_p fp;
323
324         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
325
326         if (get_ldev_if_state(mdev, D_CONSISTENT)) {
327                 fp = mdev->ldev->dc.fencing;
328                 put_ldev(mdev);
329         } else {
330                 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
331                 nps = mdev->state.pdsk;
332                 goto out;
333         }
334
335         r = drbd_khelper(mdev, "fence-peer");
336
337         switch ((r>>8) & 0xff) {
338         case 3: /* peer is inconsistent */
339                 ex_to_string = "peer is inconsistent or worse";
340                 nps = D_INCONSISTENT;
341                 break;
342         case 4: /* peer got outdated, or was already outdated */
343                 ex_to_string = "peer was fenced";
344                 nps = D_OUTDATED;
345                 break;
346         case 5: /* peer was down */
347                 if (mdev->state.disk == D_UP_TO_DATE) {
348                         /* we will(have) create(d) a new UUID anyways... */
349                         ex_to_string = "peer is unreachable, assumed to be dead";
350                         nps = D_OUTDATED;
351                 } else {
352                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
353                         nps = mdev->state.pdsk;
354                 }
355                 break;
356         case 6: /* Peer is primary, voluntarily outdate myself.
357                  * This is useful when an unconnected R_SECONDARY is asked to
358                  * become R_PRIMARY, but finds the other peer being active. */
359                 ex_to_string = "peer is active";
360                 dev_warn(DEV, "Peer is primary, outdating myself.\n");
361                 nps = D_UNKNOWN;
362                 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
363                 break;
364         case 7:
365                 if (fp != FP_STONITH)
366                         dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
367                 ex_to_string = "peer was stonithed";
368                 nps = D_OUTDATED;
369                 break;
370         default:
371                 /* The script is broken ... */
372                 nps = D_UNKNOWN;
373                 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
374                 return nps;
375         }
376
377         dev_info(DEV, "fence-peer helper returned %d (%s)\n",
378                         (r>>8) & 0xff, ex_to_string);
379
380 out:
381         if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
382                 /* The handler was not successful... unfreeze here, the
383                    state engine can not unfreeze... */
384                 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
385         }
386
387         return nps;
388 }
389
390 static int _try_outdate_peer_async(void *data)
391 {
392         struct drbd_conf *mdev = (struct drbd_conf *)data;
393         enum drbd_disk_state nps;
394         union drbd_state ns;
395
396         nps = drbd_try_outdate_peer(mdev);
397
398         /* Not using
399            drbd_request_state(mdev, NS(pdsk, nps));
400            here, because we might were able to re-establish the connection
401            in the meantime. This can only partially be solved in the state's
402            engine is_valid_state() and is_valid_state_transition()
403            functions.
404
405            nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
406            pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
407            therefore we have to have the pre state change check here.
408         */
409         spin_lock_irq(&mdev->tconn->req_lock);
410         ns = mdev->state;
411         if (ns.conn < C_WF_REPORT_PARAMS) {
412                 ns.pdsk = nps;
413                 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
414         }
415         spin_unlock_irq(&mdev->tconn->req_lock);
416
417         return 0;
418 }
419
420 void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
421 {
422         struct task_struct *opa;
423
424         opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
425         if (IS_ERR(opa))
426                 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
427 }
428
429 enum drbd_state_rv
430 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
431 {
432         const int max_tries = 4;
433         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
434         int try = 0;
435         int forced = 0;
436         union drbd_state mask, val;
437         enum drbd_disk_state nps;
438
439         if (new_role == R_PRIMARY)
440                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
441
442         mutex_lock(mdev->state_mutex);
443
444         mask.i = 0; mask.role = R_MASK;
445         val.i  = 0; val.role  = new_role;
446
447         while (try++ < max_tries) {
448                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
449
450                 /* in case we first succeeded to outdate,
451                  * but now suddenly could establish a connection */
452                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
453                         val.pdsk = 0;
454                         mask.pdsk = 0;
455                         continue;
456                 }
457
458                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
459                     (mdev->state.disk < D_UP_TO_DATE &&
460                      mdev->state.disk >= D_INCONSISTENT)) {
461                         mask.disk = D_MASK;
462                         val.disk  = D_UP_TO_DATE;
463                         forced = 1;
464                         continue;
465                 }
466
467                 if (rv == SS_NO_UP_TO_DATE_DISK &&
468                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
469                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
470                         nps = drbd_try_outdate_peer(mdev);
471
472                         if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
473                                 val.disk = D_UP_TO_DATE;
474                                 mask.disk = D_MASK;
475                         }
476
477                         val.pdsk = nps;
478                         mask.pdsk = D_MASK;
479
480                         continue;
481                 }
482
483                 if (rv == SS_NOTHING_TO_DO)
484                         goto out;
485                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
486                         nps = drbd_try_outdate_peer(mdev);
487
488                         if (force && nps > D_OUTDATED) {
489                                 dev_warn(DEV, "Forced into split brain situation!\n");
490                                 nps = D_OUTDATED;
491                         }
492
493                         mask.pdsk = D_MASK;
494                         val.pdsk  = nps;
495
496                         continue;
497                 }
498                 if (rv == SS_TWO_PRIMARIES) {
499                         /* Maybe the peer is detected as dead very soon...
500                            retry at most once more in this case. */
501                         schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
502                         if (try < max_tries)
503                                 try = max_tries - 1;
504                         continue;
505                 }
506                 if (rv < SS_SUCCESS) {
507                         rv = _drbd_request_state(mdev, mask, val,
508                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
509                         if (rv < SS_SUCCESS)
510                                 goto out;
511                 }
512                 break;
513         }
514
515         if (rv < SS_SUCCESS)
516                 goto out;
517
518         if (forced)
519                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
520
521         /* Wait until nothing is on the fly :) */
522         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
523
524         if (new_role == R_SECONDARY) {
525                 set_disk_ro(mdev->vdisk, true);
526                 if (get_ldev(mdev)) {
527                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
528                         put_ldev(mdev);
529                 }
530         } else {
531                 if (get_net_conf(mdev->tconn)) {
532                         mdev->tconn->net_conf->want_lose = 0;
533                         put_net_conf(mdev->tconn);
534                 }
535                 set_disk_ro(mdev->vdisk, false);
536                 if (get_ldev(mdev)) {
537                         if (((mdev->state.conn < C_CONNECTED ||
538                                mdev->state.pdsk <= D_FAILED)
539                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
540                                 drbd_uuid_new_current(mdev);
541
542                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
543                         put_ldev(mdev);
544                 }
545         }
546
547         /* writeout of activity log covered areas of the bitmap
548          * to stable storage done in after state change already */
549
550         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
551                 /* if this was forced, we should consider sync */
552                 if (forced)
553                         drbd_send_uuids(mdev);
554                 drbd_send_state(mdev);
555         }
556
557         drbd_md_sync(mdev);
558
559         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
560 out:
561         mutex_unlock(mdev->state_mutex);
562         return rv;
563 }
564
565 static const char *from_attrs_err_to_txt(int err)
566 {
567         return  err == -ENOMSG ? "required attribute missing" :
568                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
569                 "invalid attribute value";
570 }
571
572 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
573 {
574         struct set_role_parms parms;
575         int err;
576         enum drbd_ret_code retcode;
577
578         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
579         if (!adm_ctx.reply_skb)
580                 return retcode;
581         if (retcode != NO_ERROR)
582                 goto out;
583
584         memset(&parms, 0, sizeof(parms));
585         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
586                 err = set_role_parms_from_attrs(&parms, info->attrs);
587                 if (err) {
588                         retcode = ERR_MANDATORY_TAG;
589                         drbd_msg_put_info(from_attrs_err_to_txt(err));
590                         goto out;
591                 }
592         }
593
594         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
595                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
596         else
597                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
598 out:
599         drbd_adm_finish(info, retcode);
600         return 0;
601 }
602
603 /* initializes the md.*_offset members, so we are able to find
604  * the on disk meta data */
605 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
606                                        struct drbd_backing_dev *bdev)
607 {
608         sector_t md_size_sect = 0;
609         switch (bdev->dc.meta_dev_idx) {
610         default:
611                 /* v07 style fixed size indexed meta data */
612                 bdev->md.md_size_sect = MD_RESERVED_SECT;
613                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
614                 bdev->md.al_offset = MD_AL_OFFSET;
615                 bdev->md.bm_offset = MD_BM_OFFSET;
616                 break;
617         case DRBD_MD_INDEX_FLEX_EXT:
618                 /* just occupy the full device; unit: sectors */
619                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
620                 bdev->md.md_offset = 0;
621                 bdev->md.al_offset = MD_AL_OFFSET;
622                 bdev->md.bm_offset = MD_BM_OFFSET;
623                 break;
624         case DRBD_MD_INDEX_INTERNAL:
625         case DRBD_MD_INDEX_FLEX_INT:
626                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
627                 /* al size is still fixed */
628                 bdev->md.al_offset = -MD_AL_SECTORS;
629                 /* we need (slightly less than) ~ this much bitmap sectors: */
630                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
631                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
632                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
633                 md_size_sect = ALIGN(md_size_sect, 8);
634
635                 /* plus the "drbd meta data super block",
636                  * and the activity log; */
637                 md_size_sect += MD_BM_OFFSET;
638
639                 bdev->md.md_size_sect = md_size_sect;
640                 /* bitmap offset is adjusted by 'super' block size */
641                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
642                 break;
643         }
644 }
645
646 /* input size is expected to be in KB */
647 char *ppsize(char *buf, unsigned long long size)
648 {
649         /* Needs 9 bytes at max including trailing NUL:
650          * -1ULL ==> "16384 EB" */
651         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
652         int base = 0;
653         while (size >= 10000 && base < sizeof(units)-1) {
654                 /* shift + round */
655                 size = (size >> 10) + !!(size & (1<<9));
656                 base++;
657         }
658         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
659
660         return buf;
661 }
662
663 /* there is still a theoretical deadlock when called from receiver
664  * on an D_INCONSISTENT R_PRIMARY:
665  *  remote READ does inc_ap_bio, receiver would need to receive answer
666  *  packet from remote to dec_ap_bio again.
667  *  receiver receive_sizes(), comes here,
668  *  waits for ap_bio_cnt == 0. -> deadlock.
669  * but this cannot happen, actually, because:
670  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
671  *  (not connected, or bad/no disk on peer):
672  *  see drbd_fail_request_early, ap_bio_cnt is zero.
673  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
674  *  peer may not initiate a resize.
675  */
676 /* Note these are not to be confused with
677  * drbd_adm_suspend_io/drbd_adm_resume_io,
678  * which are (sub) state changes triggered by admin (drbdsetup),
679  * and can be long lived.
680  * This changes an mdev->flag, is triggered by drbd internals,
681  * and should be short-lived. */
682 void drbd_suspend_io(struct drbd_conf *mdev)
683 {
684         set_bit(SUSPEND_IO, &mdev->flags);
685         if (is_susp(mdev->state))
686                 return;
687         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
688 }
689
690 void drbd_resume_io(struct drbd_conf *mdev)
691 {
692         clear_bit(SUSPEND_IO, &mdev->flags);
693         wake_up(&mdev->misc_wait);
694 }
695
696 /**
697  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
698  * @mdev:       DRBD device.
699  *
700  * Returns 0 on success, negative return values indicate errors.
701  * You should call drbd_md_sync() after calling this function.
702  */
703 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
704 {
705         sector_t prev_first_sect, prev_size; /* previous meta location */
706         sector_t la_size;
707         sector_t size;
708         char ppb[10];
709
710         int md_moved, la_size_changed;
711         enum determine_dev_size rv = unchanged;
712
713         /* race:
714          * application request passes inc_ap_bio,
715          * but then cannot get an AL-reference.
716          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
717          *
718          * to avoid that:
719          * Suspend IO right here.
720          * still lock the act_log to not trigger ASSERTs there.
721          */
722         drbd_suspend_io(mdev);
723
724         /* no wait necessary anymore, actually we could assert that */
725         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
726
727         prev_first_sect = drbd_md_first_sector(mdev->ldev);
728         prev_size = mdev->ldev->md.md_size_sect;
729         la_size = mdev->ldev->md.la_size_sect;
730
731         /* TODO: should only be some assert here, not (re)init... */
732         drbd_md_set_sector_offsets(mdev, mdev->ldev);
733
734         size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
735
736         if (drbd_get_capacity(mdev->this_bdev) != size ||
737             drbd_bm_capacity(mdev) != size) {
738                 int err;
739                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
740                 if (unlikely(err)) {
741                         /* currently there is only one error: ENOMEM! */
742                         size = drbd_bm_capacity(mdev)>>1;
743                         if (size == 0) {
744                                 dev_err(DEV, "OUT OF MEMORY! "
745                                     "Could not allocate bitmap!\n");
746                         } else {
747                                 dev_err(DEV, "BM resizing failed. "
748                                     "Leaving size unchanged at size = %lu KB\n",
749                                     (unsigned long)size);
750                         }
751                         rv = dev_size_error;
752                 }
753                 /* racy, see comments above. */
754                 drbd_set_my_capacity(mdev, size);
755                 mdev->ldev->md.la_size_sect = size;
756                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
757                      (unsigned long long)size>>1);
758         }
759         if (rv == dev_size_error)
760                 goto out;
761
762         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
763
764         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
765                 || prev_size       != mdev->ldev->md.md_size_sect;
766
767         if (la_size_changed || md_moved) {
768                 int err;
769
770                 drbd_al_shrink(mdev); /* All extents inactive. */
771                 dev_info(DEV, "Writing the whole bitmap, %s\n",
772                          la_size_changed && md_moved ? "size changed and md moved" :
773                          la_size_changed ? "size changed" : "md moved");
774                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
775                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
776                                 "size changed", BM_LOCKED_MASK);
777                 if (err) {
778                         rv = dev_size_error;
779                         goto out;
780                 }
781                 drbd_md_mark_dirty(mdev);
782         }
783
784         if (size > la_size)
785                 rv = grew;
786         if (size < la_size)
787                 rv = shrunk;
788 out:
789         lc_unlock(mdev->act_log);
790         wake_up(&mdev->al_wait);
791         drbd_resume_io(mdev);
792
793         return rv;
794 }
795
796 sector_t
797 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
798 {
799         sector_t p_size = mdev->p_size;   /* partner's disk size. */
800         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
801         sector_t m_size; /* my size */
802         sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
803         sector_t size = 0;
804
805         m_size = drbd_get_max_capacity(bdev);
806
807         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
808                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
809                 p_size = m_size;
810         }
811
812         if (p_size && m_size) {
813                 size = min_t(sector_t, p_size, m_size);
814         } else {
815                 if (la_size) {
816                         size = la_size;
817                         if (m_size && m_size < size)
818                                 size = m_size;
819                         if (p_size && p_size < size)
820                                 size = p_size;
821                 } else {
822                         if (m_size)
823                                 size = m_size;
824                         if (p_size)
825                                 size = p_size;
826                 }
827         }
828
829         if (size == 0)
830                 dev_err(DEV, "Both nodes diskless!\n");
831
832         if (u_size) {
833                 if (u_size > size)
834                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
835                             (unsigned long)u_size>>1, (unsigned long)size>>1);
836                 else
837                         size = u_size;
838         }
839
840         return size;
841 }
842
843 /**
844  * drbd_check_al_size() - Ensures that the AL is of the right size
845  * @mdev:       DRBD device.
846  *
847  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
848  * failed, and 0 on success. You should call drbd_md_sync() after you called
849  * this function.
850  */
851 static int drbd_check_al_size(struct drbd_conf *mdev)
852 {
853         struct lru_cache *n, *t;
854         struct lc_element *e;
855         unsigned int in_use;
856         int i;
857
858         if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
859                 mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
860
861         if (mdev->act_log &&
862             mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
863                 return 0;
864
865         in_use = 0;
866         t = mdev->act_log;
867         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
868                 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
869
870         if (n == NULL) {
871                 dev_err(DEV, "Cannot allocate act_log lru!\n");
872                 return -ENOMEM;
873         }
874         spin_lock_irq(&mdev->al_lock);
875         if (t) {
876                 for (i = 0; i < t->nr_elements; i++) {
877                         e = lc_element_by_index(t, i);
878                         if (e->refcnt)
879                                 dev_err(DEV, "refcnt(%d)==%d\n",
880                                     e->lc_number, e->refcnt);
881                         in_use += e->refcnt;
882                 }
883         }
884         if (!in_use)
885                 mdev->act_log = n;
886         spin_unlock_irq(&mdev->al_lock);
887         if (in_use) {
888                 dev_err(DEV, "Activity log still in use!\n");
889                 lc_destroy(n);
890                 return -EBUSY;
891         } else {
892                 if (t)
893                         lc_destroy(t);
894         }
895         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
896         return 0;
897 }
898
899 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
900 {
901         struct request_queue * const q = mdev->rq_queue;
902         int max_hw_sectors = max_bio_size >> 9;
903         int max_segments = 0;
904
905         if (get_ldev_if_state(mdev, D_ATTACHING)) {
906                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
907
908                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
909                 max_segments = mdev->ldev->dc.max_bio_bvecs;
910                 put_ldev(mdev);
911         }
912
913         blk_queue_logical_block_size(q, 512);
914         blk_queue_max_hw_sectors(q, max_hw_sectors);
915         /* This is the workaround for "bio would need to, but cannot, be split" */
916         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
917         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
918
919         if (get_ldev_if_state(mdev, D_ATTACHING)) {
920                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
921
922                 blk_queue_stack_limits(q, b);
923
924                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
925                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
926                                  q->backing_dev_info.ra_pages,
927                                  b->backing_dev_info.ra_pages);
928                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
929                 }
930                 put_ldev(mdev);
931         }
932 }
933
934 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
935 {
936         int now, new, local, peer;
937
938         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
939         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
940         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
941
942         if (get_ldev_if_state(mdev, D_ATTACHING)) {
943                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
944                 mdev->local_max_bio_size = local;
945                 put_ldev(mdev);
946         }
947
948         /* We may ignore peer limits if the peer is modern enough.
949            Because new from 8.3.8 onwards the peer can use multiple
950            BIOs for a single peer_request */
951         if (mdev->state.conn >= C_CONNECTED) {
952                 if (mdev->tconn->agreed_pro_version < 94)
953                         peer = mdev->peer_max_bio_size;
954                 else if (mdev->tconn->agreed_pro_version == 94)
955                         peer = DRBD_MAX_SIZE_H80_PACKET;
956                 else /* drbd 8.3.8 onwards */
957                         peer = DRBD_MAX_BIO_SIZE;
958         }
959
960         new = min_t(int, local, peer);
961
962         if (mdev->state.role == R_PRIMARY && new < now)
963                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
964
965         if (new != now)
966                 dev_info(DEV, "max BIO size = %u\n", new);
967
968         drbd_setup_queue_param(mdev, new);
969 }
970
971 /* serialize deconfig (worker exiting, doing cleanup)
972  * and reconfig (drbdsetup disk, drbdsetup net)
973  *
974  * Wait for a potentially exiting worker, then restart it,
975  * or start a new one.  Flush any pending work, there may still be an
976  * after_state_change queued.
977  */
978 static void conn_reconfig_start(struct drbd_tconn *tconn)
979 {
980         wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
981         wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
982         drbd_thread_start(&tconn->worker);
983         conn_flush_workqueue(tconn);
984 }
985
986 /* if still unconfigured, stops worker again.
987  * if configured now, clears CONFIG_PENDING.
988  * wakes potential waiters */
989 static void conn_reconfig_done(struct drbd_tconn *tconn)
990 {
991         spin_lock_irq(&tconn->req_lock);
992         if (conn_all_vols_unconf(tconn)) {
993                 set_bit(OBJECT_DYING, &tconn->flags);
994                 drbd_thread_stop_nowait(&tconn->worker);
995         } else
996                 clear_bit(CONFIG_PENDING, &tconn->flags);
997         spin_unlock_irq(&tconn->req_lock);
998         wake_up(&tconn->ping_wait);
999 }
1000
1001 /* Make sure IO is suspended before calling this function(). */
1002 static void drbd_suspend_al(struct drbd_conf *mdev)
1003 {
1004         int s = 0;
1005
1006         if (!lc_try_lock(mdev->act_log)) {
1007                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1008                 return;
1009         }
1010
1011         drbd_al_shrink(mdev);
1012         spin_lock_irq(&mdev->tconn->req_lock);
1013         if (mdev->state.conn < C_CONNECTED)
1014                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1015         spin_unlock_irq(&mdev->tconn->req_lock);
1016         lc_unlock(mdev->act_log);
1017
1018         if (s)
1019                 dev_info(DEV, "Suspended AL updates\n");
1020 }
1021
1022 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1023 {
1024         struct drbd_conf *mdev;
1025         int err;
1026         enum drbd_ret_code retcode;
1027         enum determine_dev_size dd;
1028         sector_t max_possible_sectors;
1029         sector_t min_md_device_sectors;
1030         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1031         struct block_device *bdev;
1032         struct lru_cache *resync_lru = NULL;
1033         union drbd_state ns, os;
1034         enum drbd_state_rv rv;
1035         int cp_discovered = 0;
1036
1037         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1038         if (!adm_ctx.reply_skb)
1039                 return retcode;
1040         if (retcode != NO_ERROR)
1041                 goto fail;
1042
1043         mdev = adm_ctx.mdev;
1044         conn_reconfig_start(mdev->tconn);
1045
1046         /* if you want to reconfigure, please tear down first */
1047         if (mdev->state.disk > D_DISKLESS) {
1048                 retcode = ERR_DISK_CONFIGURED;
1049                 goto fail;
1050         }
1051         /* It may just now have detached because of IO error.  Make sure
1052          * drbd_ldev_destroy is done already, we may end up here very fast,
1053          * e.g. if someone calls attach from the on-io-error handler,
1054          * to realize a "hot spare" feature (not that I'd recommend that) */
1055         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1056
1057         /* allocation not in the IO path, drbdsetup context */
1058         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1059         if (!nbc) {
1060                 retcode = ERR_NOMEM;
1061                 goto fail;
1062         }
1063
1064         nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
1065         nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
1066         nbc->dc.fencing       = DRBD_FENCING_DEF;
1067         nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
1068
1069         err = disk_conf_from_attrs(&nbc->dc, info->attrs);
1070         if (err) {
1071                 retcode = ERR_MANDATORY_TAG;
1072                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1073                 goto fail;
1074         }
1075
1076         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1077                 retcode = ERR_MD_IDX_INVALID;
1078                 goto fail;
1079         }
1080
1081         if (get_net_conf(mdev->tconn)) {
1082                 int prot = mdev->tconn->net_conf->wire_protocol;
1083                 put_net_conf(mdev->tconn);
1084                 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1085                         retcode = ERR_STONITH_AND_PROT_A;
1086                         goto fail;
1087                 }
1088         }
1089
1090         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1091                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1092         if (IS_ERR(bdev)) {
1093                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1094                         PTR_ERR(bdev));
1095                 retcode = ERR_OPEN_DISK;
1096                 goto fail;
1097         }
1098         nbc->backing_bdev = bdev;
1099
1100         /*
1101          * meta_dev_idx >= 0: external fixed size, possibly multiple
1102          * drbd sharing one meta device.  TODO in that case, paranoia
1103          * check that [md_bdev, meta_dev_idx] is not yet used by some
1104          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1105          * should check it for you already; but if you don't, or
1106          * someone fooled it, we need to double check here)
1107          */
1108         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1109                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1110                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1111                                   (void *)mdev : (void *)drbd_m_holder);
1112         if (IS_ERR(bdev)) {
1113                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1114                         PTR_ERR(bdev));
1115                 retcode = ERR_OPEN_MD_DISK;
1116                 goto fail;
1117         }
1118         nbc->md_bdev = bdev;
1119
1120         if ((nbc->backing_bdev == nbc->md_bdev) !=
1121             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1122              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1123                 retcode = ERR_MD_IDX_INVALID;
1124                 goto fail;
1125         }
1126
1127         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1128                         1, 61, sizeof(struct bm_extent),
1129                         offsetof(struct bm_extent, lce));
1130         if (!resync_lru) {
1131                 retcode = ERR_NOMEM;
1132                 goto fail;
1133         }
1134
1135         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1136         drbd_md_set_sector_offsets(mdev, nbc);
1137
1138         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1139                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1140                         (unsigned long long) drbd_get_max_capacity(nbc),
1141                         (unsigned long long) nbc->dc.disk_size);
1142                 retcode = ERR_DISK_TO_SMALL;
1143                 goto fail;
1144         }
1145
1146         if ((int)nbc->dc.meta_dev_idx < 0) {
1147                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1148                 /* at least one MB, otherwise it does not make sense */
1149                 min_md_device_sectors = (2<<10);
1150         } else {
1151                 max_possible_sectors = DRBD_MAX_SECTORS;
1152                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1153         }
1154
1155         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1156                 retcode = ERR_MD_DISK_TO_SMALL;
1157                 dev_warn(DEV, "refusing attach: md-device too small, "
1158                      "at least %llu sectors needed for this meta-disk type\n",
1159                      (unsigned long long) min_md_device_sectors);
1160                 goto fail;
1161         }
1162
1163         /* Make sure the new disk is big enough
1164          * (we may currently be R_PRIMARY with no local disk...) */
1165         if (drbd_get_max_capacity(nbc) <
1166             drbd_get_capacity(mdev->this_bdev)) {
1167                 retcode = ERR_DISK_TO_SMALL;
1168                 goto fail;
1169         }
1170
1171         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1172
1173         if (nbc->known_size > max_possible_sectors) {
1174                 dev_warn(DEV, "==> truncating very big lower level device "
1175                         "to currently maximum possible %llu sectors <==\n",
1176                         (unsigned long long) max_possible_sectors);
1177                 if ((int)nbc->dc.meta_dev_idx >= 0)
1178                         dev_warn(DEV, "==>> using internal or flexible "
1179                                       "meta data may help <<==\n");
1180         }
1181
1182         drbd_suspend_io(mdev);
1183         /* also wait for the last barrier ack. */
1184         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1185         /* and for any other previously queued work */
1186         drbd_flush_workqueue(mdev);
1187
1188         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1189         retcode = rv;  /* FIXME: Type mismatch. */
1190         drbd_resume_io(mdev);
1191         if (rv < SS_SUCCESS)
1192                 goto fail;
1193
1194         if (!get_ldev_if_state(mdev, D_ATTACHING))
1195                 goto force_diskless;
1196
1197         drbd_md_set_sector_offsets(mdev, nbc);
1198
1199         if (!mdev->bitmap) {
1200                 if (drbd_bm_init(mdev)) {
1201                         retcode = ERR_NOMEM;
1202                         goto force_diskless_dec;
1203                 }
1204         }
1205
1206         retcode = drbd_md_read(mdev, nbc);
1207         if (retcode != NO_ERROR)
1208                 goto force_diskless_dec;
1209
1210         if (mdev->state.conn < C_CONNECTED &&
1211             mdev->state.role == R_PRIMARY &&
1212             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1213                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1214                     (unsigned long long)mdev->ed_uuid);
1215                 retcode = ERR_DATA_NOT_CURRENT;
1216                 goto force_diskless_dec;
1217         }
1218
1219         /* Since we are diskless, fix the activity log first... */
1220         if (drbd_check_al_size(mdev)) {
1221                 retcode = ERR_NOMEM;
1222                 goto force_diskless_dec;
1223         }
1224
1225         /* Prevent shrinking of consistent devices ! */
1226         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1227             drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1228                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1229                 retcode = ERR_DISK_TO_SMALL;
1230                 goto force_diskless_dec;
1231         }
1232
1233         if (!drbd_al_read_log(mdev, nbc)) {
1234                 retcode = ERR_IO_MD_DISK;
1235                 goto force_diskless_dec;
1236         }
1237
1238         /* Reset the "barriers don't work" bits here, then force meta data to
1239          * be written, to ensure we determine if barriers are supported. */
1240         if (nbc->dc.no_md_flush)
1241                 set_bit(MD_NO_FUA, &mdev->flags);
1242         else
1243                 clear_bit(MD_NO_FUA, &mdev->flags);
1244
1245         /* Point of no return reached.
1246          * Devices and memory are no longer released by error cleanup below.
1247          * now mdev takes over responsibility, and the state engine should
1248          * clean it up somewhere.  */
1249         D_ASSERT(mdev->ldev == NULL);
1250         mdev->ldev = nbc;
1251         mdev->resync = resync_lru;
1252         nbc = NULL;
1253         resync_lru = NULL;
1254
1255         mdev->write_ordering = WO_bdev_flush;
1256         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1257
1258         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1259                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1260         else
1261                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1262
1263         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1264             !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1265                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1266                 cp_discovered = 1;
1267         }
1268
1269         mdev->send_cnt = 0;
1270         mdev->recv_cnt = 0;
1271         mdev->read_cnt = 0;
1272         mdev->writ_cnt = 0;
1273
1274         drbd_reconsider_max_bio_size(mdev);
1275
1276         /* If I am currently not R_PRIMARY,
1277          * but meta data primary indicator is set,
1278          * I just now recover from a hard crash,
1279          * and have been R_PRIMARY before that crash.
1280          *
1281          * Now, if I had no connection before that crash
1282          * (have been degraded R_PRIMARY), chances are that
1283          * I won't find my peer now either.
1284          *
1285          * In that case, and _only_ in that case,
1286          * we use the degr-wfc-timeout instead of the default,
1287          * so we can automatically recover from a crash of a
1288          * degraded but active "cluster" after a certain timeout.
1289          */
1290         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1291         if (mdev->state.role != R_PRIMARY &&
1292              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1293             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1294                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1295
1296         dd = drbd_determine_dev_size(mdev, 0);
1297         if (dd == dev_size_error) {
1298                 retcode = ERR_NOMEM_BITMAP;
1299                 goto force_diskless_dec;
1300         } else if (dd == grew)
1301                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1302
1303         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1304                 dev_info(DEV, "Assuming that all blocks are out of sync "
1305                      "(aka FullSync)\n");
1306                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1307                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1308                         retcode = ERR_IO_MD_DISK;
1309                         goto force_diskless_dec;
1310                 }
1311         } else {
1312                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1313                         "read from attaching", BM_LOCKED_MASK) < 0) {
1314                         retcode = ERR_IO_MD_DISK;
1315                         goto force_diskless_dec;
1316                 }
1317         }
1318
1319         if (cp_discovered) {
1320                 drbd_al_apply_to_bm(mdev);
1321                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1322                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1323                         retcode = ERR_IO_MD_DISK;
1324                         goto force_diskless_dec;
1325                 }
1326         }
1327
1328         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1329                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1330
1331         spin_lock_irq(&mdev->tconn->req_lock);
1332         os = mdev->state;
1333         ns.i = os.i;
1334         /* If MDF_CONSISTENT is not set go into inconsistent state,
1335            otherwise investigate MDF_WasUpToDate...
1336            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1337            otherwise into D_CONSISTENT state.
1338         */
1339         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1340                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1341                         ns.disk = D_CONSISTENT;
1342                 else
1343                         ns.disk = D_OUTDATED;
1344         } else {
1345                 ns.disk = D_INCONSISTENT;
1346         }
1347
1348         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1349                 ns.pdsk = D_OUTDATED;
1350
1351         if ( ns.disk == D_CONSISTENT &&
1352             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1353                 ns.disk = D_UP_TO_DATE;
1354
1355         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1356            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1357            this point, because drbd_request_state() modifies these
1358            flags. */
1359
1360         /* In case we are C_CONNECTED postpone any decision on the new disk
1361            state after the negotiation phase. */
1362         if (mdev->state.conn == C_CONNECTED) {
1363                 mdev->new_state_tmp.i = ns.i;
1364                 ns.i = os.i;
1365                 ns.disk = D_NEGOTIATING;
1366
1367                 /* We expect to receive up-to-date UUIDs soon.
1368                    To avoid a race in receive_state, free p_uuid while
1369                    holding req_lock. I.e. atomic with the state change */
1370                 kfree(mdev->p_uuid);
1371                 mdev->p_uuid = NULL;
1372         }
1373
1374         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1375         ns = mdev->state;
1376         spin_unlock_irq(&mdev->tconn->req_lock);
1377
1378         if (rv < SS_SUCCESS)
1379                 goto force_diskless_dec;
1380
1381         if (mdev->state.role == R_PRIMARY)
1382                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1383         else
1384                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1385
1386         drbd_md_mark_dirty(mdev);
1387         drbd_md_sync(mdev);
1388
1389         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1390         put_ldev(mdev);
1391         conn_reconfig_done(mdev->tconn);
1392         drbd_adm_finish(info, retcode);
1393         return 0;
1394
1395  force_diskless_dec:
1396         put_ldev(mdev);
1397  force_diskless:
1398         drbd_force_state(mdev, NS(disk, D_FAILED));
1399         drbd_md_sync(mdev);
1400         conn_reconfig_done(mdev->tconn);
1401  fail:
1402         if (nbc) {
1403                 if (nbc->backing_bdev)
1404                         blkdev_put(nbc->backing_bdev,
1405                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1406                 if (nbc->md_bdev)
1407                         blkdev_put(nbc->md_bdev,
1408                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1409                 kfree(nbc);
1410         }
1411         lc_destroy(resync_lru);
1412
1413         drbd_adm_finish(info, retcode);
1414         return 0;
1415 }
1416
1417 /* Detaching the disk is a process in multiple stages.  First we need to lock
1418  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1419  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1420  * internal references as well.
1421  * Only then we have finally detached. */
1422 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1423 {
1424         struct drbd_conf *mdev;
1425         enum drbd_ret_code retcode;
1426
1427         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1428         if (!adm_ctx.reply_skb)
1429                 return retcode;
1430         if (retcode != NO_ERROR)
1431                 goto out;
1432
1433         mdev = adm_ctx.mdev;
1434         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1435         retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1436         wait_event(mdev->misc_wait,
1437                         mdev->state.disk != D_DISKLESS ||
1438                         !atomic_read(&mdev->local_cnt));
1439         drbd_resume_io(mdev);
1440 out:
1441         drbd_adm_finish(info, retcode);
1442         return 0;
1443 }
1444
1445 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1446 {
1447         char hmac_name[CRYPTO_MAX_ALG_NAME];
1448         struct drbd_conf *mdev;
1449         struct net_conf *new_conf = NULL;
1450         struct crypto_hash *tfm = NULL;
1451         struct crypto_hash *integrity_w_tfm = NULL;
1452         struct crypto_hash *integrity_r_tfm = NULL;
1453         void *int_dig_out = NULL;
1454         void *int_dig_in = NULL;
1455         void *int_dig_vv = NULL;
1456         struct drbd_tconn *oconn;
1457         struct drbd_tconn *tconn;
1458         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1459         enum drbd_ret_code retcode;
1460         int i;
1461         int err;
1462
1463         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1464         if (!adm_ctx.reply_skb)
1465                 return retcode;
1466         if (retcode != NO_ERROR)
1467                 goto out;
1468
1469         tconn = adm_ctx.tconn;
1470         conn_reconfig_start(tconn);
1471
1472         if (tconn->cstate > C_STANDALONE) {
1473                 retcode = ERR_NET_CONFIGURED;
1474                 goto fail;
1475         }
1476
1477         /* allocation not in the IO path, cqueue thread context */
1478         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1479         if (!new_conf) {
1480                 retcode = ERR_NOMEM;
1481                 goto fail;
1482         }
1483
1484         new_conf->timeout          = DRBD_TIMEOUT_DEF;
1485         new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
1486         new_conf->ping_int         = DRBD_PING_INT_DEF;
1487         new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
1488         new_conf->max_buffers      = DRBD_MAX_BUFFERS_DEF;
1489         new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1490         new_conf->sndbuf_size      = DRBD_SNDBUF_SIZE_DEF;
1491         new_conf->rcvbuf_size      = DRBD_RCVBUF_SIZE_DEF;
1492         new_conf->ko_count         = DRBD_KO_COUNT_DEF;
1493         new_conf->after_sb_0p      = DRBD_AFTER_SB_0P_DEF;
1494         new_conf->after_sb_1p      = DRBD_AFTER_SB_1P_DEF;
1495         new_conf->after_sb_2p      = DRBD_AFTER_SB_2P_DEF;
1496         new_conf->want_lose        = 0;
1497         new_conf->two_primaries    = 0;
1498         new_conf->wire_protocol    = DRBD_PROT_C;
1499         new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
1500         new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
1501         new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
1502         new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
1503
1504         err = net_conf_from_attrs(new_conf, info->attrs);
1505         if (err) {
1506                 retcode = ERR_MANDATORY_TAG;
1507                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1508                 goto fail;
1509         }
1510
1511         if (new_conf->two_primaries
1512             && (new_conf->wire_protocol != DRBD_PROT_C)) {
1513                 retcode = ERR_NOT_PROTO_C;
1514                 goto fail;
1515         }
1516
1517         idr_for_each_entry(&tconn->volumes, mdev, i) {
1518                 if (get_ldev(mdev)) {
1519                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1520                         put_ldev(mdev);
1521                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1522                                 retcode = ERR_STONITH_AND_PROT_A;
1523                                 goto fail;
1524                         }
1525                 }
1526                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1527                         retcode = ERR_DISCARD;
1528                         goto fail;
1529                 }
1530                 if (!mdev->bitmap) {
1531                         if(drbd_bm_init(mdev)) {
1532                                 retcode = ERR_NOMEM;
1533                                 goto fail;
1534                         }
1535                 }
1536         }
1537
1538         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1539                 retcode = ERR_CONG_NOT_PROTO_A;
1540                 goto fail;
1541         }
1542
1543         retcode = NO_ERROR;
1544
1545         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1546         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1547         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1548                 if (oconn == tconn)
1549                         continue;
1550                 if (get_net_conf(oconn)) {
1551                         taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1552                         if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1553                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1554                                 retcode = ERR_LOCAL_ADDR;
1555
1556                         taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1557                         if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1558                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1559                                 retcode = ERR_PEER_ADDR;
1560
1561                         put_net_conf(oconn);
1562                         if (retcode != NO_ERROR)
1563                                 goto fail;
1564                 }
1565         }
1566
1567         if (new_conf->cram_hmac_alg[0] != 0) {
1568                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1569                         new_conf->cram_hmac_alg);
1570                 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1571                 if (IS_ERR(tfm)) {
1572                         tfm = NULL;
1573                         retcode = ERR_AUTH_ALG;
1574                         goto fail;
1575                 }
1576
1577                 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1578                         retcode = ERR_AUTH_ALG_ND;
1579                         goto fail;
1580                 }
1581         }
1582
1583         if (new_conf->integrity_alg[0]) {
1584                 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1585                 if (IS_ERR(integrity_w_tfm)) {
1586                         integrity_w_tfm = NULL;
1587                         retcode=ERR_INTEGRITY_ALG;
1588                         goto fail;
1589                 }
1590
1591                 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1592                         retcode=ERR_INTEGRITY_ALG_ND;
1593                         goto fail;
1594                 }
1595
1596                 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1597                 if (IS_ERR(integrity_r_tfm)) {
1598                         integrity_r_tfm = NULL;
1599                         retcode=ERR_INTEGRITY_ALG;
1600                         goto fail;
1601                 }
1602         }
1603
1604         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1605
1606         /* allocation not in the IO path, cqueue thread context */
1607         if (integrity_w_tfm) {
1608                 i = crypto_hash_digestsize(integrity_w_tfm);
1609                 int_dig_out = kmalloc(i, GFP_KERNEL);
1610                 if (!int_dig_out) {
1611                         retcode = ERR_NOMEM;
1612                         goto fail;
1613                 }
1614                 int_dig_in = kmalloc(i, GFP_KERNEL);
1615                 if (!int_dig_in) {
1616                         retcode = ERR_NOMEM;
1617                         goto fail;
1618                 }
1619                 int_dig_vv = kmalloc(i, GFP_KERNEL);
1620                 if (!int_dig_vv) {
1621                         retcode = ERR_NOMEM;
1622                         goto fail;
1623                 }
1624         }
1625
1626         conn_flush_workqueue(tconn);
1627         spin_lock_irq(&tconn->req_lock);
1628         if (tconn->net_conf != NULL) {
1629                 retcode = ERR_NET_CONFIGURED;
1630                 spin_unlock_irq(&tconn->req_lock);
1631                 goto fail;
1632         }
1633         tconn->net_conf = new_conf;
1634
1635         crypto_free_hash(tconn->cram_hmac_tfm);
1636         tconn->cram_hmac_tfm = tfm;
1637
1638         crypto_free_hash(tconn->integrity_w_tfm);
1639         tconn->integrity_w_tfm = integrity_w_tfm;
1640
1641         crypto_free_hash(tconn->integrity_r_tfm);
1642         tconn->integrity_r_tfm = integrity_r_tfm;
1643
1644         kfree(tconn->int_dig_out);
1645         kfree(tconn->int_dig_in);
1646         kfree(tconn->int_dig_vv);
1647         tconn->int_dig_out=int_dig_out;
1648         tconn->int_dig_in=int_dig_in;
1649         tconn->int_dig_vv=int_dig_vv;
1650         retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1651         spin_unlock_irq(&tconn->req_lock);
1652
1653         idr_for_each_entry(&tconn->volumes, mdev, i) {
1654                 mdev->send_cnt = 0;
1655                 mdev->recv_cnt = 0;
1656                 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1657         }
1658         conn_reconfig_done(tconn);
1659         drbd_adm_finish(info, retcode);
1660         return 0;
1661
1662 fail:
1663         kfree(int_dig_out);
1664         kfree(int_dig_in);
1665         kfree(int_dig_vv);
1666         crypto_free_hash(tfm);
1667         crypto_free_hash(integrity_w_tfm);
1668         crypto_free_hash(integrity_r_tfm);
1669         kfree(new_conf);
1670
1671         conn_reconfig_done(tconn);
1672 out:
1673         drbd_adm_finish(info, retcode);
1674         return 0;
1675 }
1676
1677 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
1678 {
1679         struct disconnect_parms parms;
1680         struct drbd_tconn *tconn;
1681         enum drbd_ret_code retcode;
1682         int err;
1683
1684         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1685         if (!adm_ctx.reply_skb)
1686                 return retcode;
1687         if (retcode != NO_ERROR)
1688                 goto fail;
1689
1690         tconn = adm_ctx.tconn;
1691         memset(&parms, 0, sizeof(parms));
1692         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
1693                 err = disconnect_parms_from_attrs(&parms, info->attrs);
1694                 if (err) {
1695                         retcode = ERR_MANDATORY_TAG;
1696                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1697                         goto fail;
1698                 }
1699         }
1700
1701         if (parms.force_disconnect) {
1702                 spin_lock_irq(&tconn->req_lock);
1703                 if (tconn->cstate >= C_WF_CONNECTION)
1704                         _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1705                 spin_unlock_irq(&tconn->req_lock);
1706                 goto done;
1707         }
1708
1709         retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
1710
1711         if (retcode == SS_NOTHING_TO_DO)
1712                 goto done;
1713         else if (retcode == SS_ALREADY_STANDALONE)
1714                 goto done;
1715         else if (retcode == SS_PRIMARY_NOP) {
1716                 /* Our state checking code wants to see the peer outdated. */
1717                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1718                                                         pdsk, D_OUTDATED), CS_VERBOSE);
1719         } else if (retcode == SS_CW_FAILED_BY_PEER) {
1720                 /* The peer probably wants to see us outdated. */
1721                 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1722                                                         disk, D_OUTDATED), 0);
1723                 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1724                         conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1725                         retcode = SS_SUCCESS;
1726                 }
1727         }
1728
1729         if (retcode < SS_SUCCESS)
1730                 goto fail;
1731
1732         if (wait_event_interruptible(tconn->ping_wait,
1733                                      tconn->cstate != C_DISCONNECTING)) {
1734                 /* Do not test for mdev->state.conn == C_STANDALONE, since
1735                    someone else might connect us in the mean time! */
1736                 retcode = ERR_INTR;
1737                 goto fail;
1738         }
1739
1740  done:
1741         retcode = NO_ERROR;
1742  fail:
1743         drbd_adm_finish(info, retcode);
1744         return 0;
1745 }
1746
1747 void resync_after_online_grow(struct drbd_conf *mdev)
1748 {
1749         int iass; /* I am sync source */
1750
1751         dev_info(DEV, "Resync of new storage after online grow\n");
1752         if (mdev->state.role != mdev->state.peer)
1753                 iass = (mdev->state.role == R_PRIMARY);
1754         else
1755                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
1756
1757         if (iass)
1758                 drbd_start_resync(mdev, C_SYNC_SOURCE);
1759         else
1760                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1761 }
1762
1763 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1764 {
1765         struct resize_parms rs;
1766         struct drbd_conf *mdev;
1767         enum drbd_ret_code retcode;
1768         enum determine_dev_size dd;
1769         enum dds_flags ddsf;
1770         int err;
1771
1772         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1773         if (!adm_ctx.reply_skb)
1774                 return retcode;
1775         if (retcode != NO_ERROR)
1776                 goto fail;
1777
1778         memset(&rs, 0, sizeof(struct resize_parms));
1779         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
1780                 err = resize_parms_from_attrs(&rs, info->attrs);
1781                 if (err) {
1782                         retcode = ERR_MANDATORY_TAG;
1783                         drbd_msg_put_info(from_attrs_err_to_txt(err));
1784                         goto fail;
1785                 }
1786         }
1787
1788         mdev = adm_ctx.mdev;
1789         if (mdev->state.conn > C_CONNECTED) {
1790                 retcode = ERR_RESIZE_RESYNC;
1791                 goto fail;
1792         }
1793
1794         if (mdev->state.role == R_SECONDARY &&
1795             mdev->state.peer == R_SECONDARY) {
1796                 retcode = ERR_NO_PRIMARY;
1797                 goto fail;
1798         }
1799
1800         if (!get_ldev(mdev)) {
1801                 retcode = ERR_NO_DISK;
1802                 goto fail;
1803         }
1804
1805         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
1806                 retcode = ERR_NEED_APV_93;
1807                 goto fail;
1808         }
1809
1810         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1811                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1812
1813         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1814         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1815         dd = drbd_determine_dev_size(mdev, ddsf);
1816         drbd_md_sync(mdev);
1817         put_ldev(mdev);
1818         if (dd == dev_size_error) {
1819                 retcode = ERR_NOMEM_BITMAP;
1820                 goto fail;
1821         }
1822
1823         if (mdev->state.conn == C_CONNECTED) {
1824                 if (dd == grew)
1825                         set_bit(RESIZE_PENDING, &mdev->flags);
1826
1827                 drbd_send_uuids(mdev);
1828                 drbd_send_sizes(mdev, 1, ddsf);
1829         }
1830
1831  fail:
1832         drbd_adm_finish(info, retcode);
1833         return 0;
1834 }
1835
1836 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
1837 {
1838         struct drbd_conf *mdev;
1839         enum drbd_ret_code retcode;
1840         int err;
1841         int ovr; /* online verify running */
1842         int rsr; /* re-sync running */
1843         struct crypto_hash *verify_tfm = NULL;
1844         struct crypto_hash *csums_tfm = NULL;
1845         struct syncer_conf sc;
1846         cpumask_var_t new_cpu_mask;
1847         int *rs_plan_s = NULL;
1848         int fifo_size;
1849
1850         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1851         if (!adm_ctx.reply_skb)
1852                 return retcode;
1853         if (retcode != NO_ERROR)
1854                 goto fail;
1855         mdev = adm_ctx.mdev;
1856
1857         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1858                 retcode = ERR_NOMEM;
1859                 drbd_msg_put_info("unable to allocate cpumask");
1860                 goto fail;
1861         }
1862
1863         if (((struct drbd_genlmsghdr*)info->userhdr)->flags
1864                         & DRBD_GENL_F_SET_DEFAULTS) {
1865                 memset(&sc, 0, sizeof(struct syncer_conf));
1866                 sc.rate       = DRBD_RATE_DEF;
1867                 sc.after      = DRBD_AFTER_DEF;
1868                 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1869                 sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
1870                 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1871                 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1872                 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1873                 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1874                 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1875         } else
1876                 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1877
1878         err = syncer_conf_from_attrs(&sc, info->attrs);
1879         if (err) {
1880                 retcode = ERR_MANDATORY_TAG;
1881                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1882                 goto fail;
1883         }
1884
1885         /* re-sync running */
1886         rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1887                 mdev->state.conn == C_SYNC_TARGET ||
1888                 mdev->state.conn == C_PAUSED_SYNC_S ||
1889                 mdev->state.conn == C_PAUSED_SYNC_T );
1890
1891         if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1892                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1893                 goto fail;
1894         }
1895
1896         if (!rsr && sc.csums_alg[0]) {
1897                 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1898                 if (IS_ERR(csums_tfm)) {
1899                         csums_tfm = NULL;
1900                         retcode = ERR_CSUMS_ALG;
1901                         goto fail;
1902                 }
1903
1904                 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1905                         retcode = ERR_CSUMS_ALG_ND;
1906                         goto fail;
1907                 }
1908         }
1909
1910         /* online verify running */
1911         ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1912
1913         if (ovr) {
1914                 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1915                         retcode = ERR_VERIFY_RUNNING;
1916                         goto fail;
1917                 }
1918         }
1919
1920         if (!ovr && sc.verify_alg[0]) {
1921                 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1922                 if (IS_ERR(verify_tfm)) {
1923                         verify_tfm = NULL;
1924                         retcode = ERR_VERIFY_ALG;
1925                         goto fail;
1926                 }
1927
1928                 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1929                         retcode = ERR_VERIFY_ALG_ND;
1930                         goto fail;
1931                 }
1932         }
1933
1934         /* silently ignore cpu mask on UP kernel */
1935         if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1936                 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1937                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1938                 if (err) {
1939                         dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1940                         retcode = ERR_CPU_MASK_PARSE;
1941                         goto fail;
1942                 }
1943         }
1944
1945         if (!expect(sc.rate >= 1))
1946                 sc.rate = 1;
1947
1948         /* clip to allowed range */
1949         if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
1950                 sc.al_extents = DRBD_AL_EXTENTS_MIN;
1951         if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
1952                 sc.al_extents = DRBD_AL_EXTENTS_MAX;
1953
1954         /* most sanity checks done, try to assign the new sync-after
1955          * dependency.  need to hold the global lock in there,
1956          * to avoid a race in the dependency loop check. */
1957         retcode = drbd_alter_sa(mdev, sc.after);
1958         if (retcode != NO_ERROR)
1959                 goto fail;
1960
1961         fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1962         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1963                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1964                 if (!rs_plan_s) {
1965                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1966                         retcode = ERR_NOMEM;
1967                         goto fail;
1968                 }
1969         }
1970
1971         /* ok, assign the rest of it as well.
1972          * lock against receive_SyncParam() */
1973         spin_lock(&mdev->peer_seq_lock);
1974         mdev->sync_conf = sc;
1975
1976         if (!rsr) {
1977                 crypto_free_hash(mdev->csums_tfm);
1978                 mdev->csums_tfm = csums_tfm;
1979                 csums_tfm = NULL;
1980         }
1981
1982         if (!ovr) {
1983                 crypto_free_hash(mdev->verify_tfm);
1984                 mdev->verify_tfm = verify_tfm;
1985                 verify_tfm = NULL;
1986         }
1987
1988         if (fifo_size != mdev->rs_plan_s.size) {
1989                 kfree(mdev->rs_plan_s.values);
1990                 mdev->rs_plan_s.values = rs_plan_s;
1991                 mdev->rs_plan_s.size   = fifo_size;
1992                 mdev->rs_planed = 0;
1993                 rs_plan_s = NULL;
1994         }
1995
1996         spin_unlock(&mdev->peer_seq_lock);
1997
1998         if (get_ldev(mdev)) {
1999                 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
2000                 drbd_al_shrink(mdev);
2001                 err = drbd_check_al_size(mdev);
2002                 lc_unlock(mdev->act_log);
2003                 wake_up(&mdev->al_wait);
2004
2005                 put_ldev(mdev);
2006                 drbd_md_sync(mdev);
2007
2008                 if (err) {
2009                         retcode = ERR_NOMEM;
2010                         goto fail;
2011                 }
2012         }
2013
2014         if (mdev->state.conn >= C_CONNECTED)
2015                 drbd_send_sync_param(mdev, &sc);
2016
2017         if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
2018                 cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
2019                 drbd_calc_cpu_mask(mdev->tconn);
2020                 mdev->tconn->receiver.reset_cpu_mask = 1;
2021                 mdev->tconn->asender.reset_cpu_mask = 1;
2022                 mdev->tconn->worker.reset_cpu_mask = 1;
2023         }
2024
2025         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2026 fail:
2027         kfree(rs_plan_s);
2028         free_cpumask_var(new_cpu_mask);
2029         crypto_free_hash(csums_tfm);
2030         crypto_free_hash(verify_tfm);
2031
2032         drbd_adm_finish(info, retcode);
2033         return 0;
2034 }
2035
2036 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2037 {
2038         struct drbd_conf *mdev;
2039         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2040
2041         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2042         if (!adm_ctx.reply_skb)
2043                 return retcode;
2044         if (retcode != NO_ERROR)
2045                 goto out;
2046
2047         mdev = adm_ctx.mdev;
2048
2049         /* If there is still bitmap IO pending, probably because of a previous
2050          * resync just being finished, wait for it before requesting a new resync. */
2051         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2052
2053         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2054
2055         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2056                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2057
2058         while (retcode == SS_NEED_CONNECTION) {
2059                 spin_lock_irq(&mdev->tconn->req_lock);
2060                 if (mdev->state.conn < C_CONNECTED)
2061                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2062                 spin_unlock_irq(&mdev->tconn->req_lock);
2063
2064                 if (retcode != SS_NEED_CONNECTION)
2065                         break;
2066
2067                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2068         }
2069
2070 out:
2071         drbd_adm_finish(info, retcode);
2072         return 0;
2073 }
2074
2075 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2076 {
2077         int rv;
2078
2079         rv = drbd_bmio_set_n_write(mdev);
2080         drbd_suspend_al(mdev);
2081         return rv;
2082 }
2083
2084 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2085                 union drbd_state mask, union drbd_state val)
2086 {
2087         enum drbd_ret_code retcode;
2088
2089         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2090         if (!adm_ctx.reply_skb)
2091                 return retcode;
2092         if (retcode != NO_ERROR)
2093                 goto out;
2094
2095         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2096 out:
2097         drbd_adm_finish(info, retcode);
2098         return 0;
2099 }
2100
2101 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2102 {
2103         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2104 }
2105
2106 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2107 {
2108         enum drbd_ret_code retcode;
2109
2110         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2111         if (!adm_ctx.reply_skb)
2112                 return retcode;
2113         if (retcode != NO_ERROR)
2114                 goto out;
2115
2116         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2117                 retcode = ERR_PAUSE_IS_SET;
2118 out:
2119         drbd_adm_finish(info, retcode);
2120         return 0;
2121 }
2122
2123 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2124 {
2125         union drbd_state s;
2126         enum drbd_ret_code retcode;
2127
2128         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2129         if (!adm_ctx.reply_skb)
2130                 return retcode;
2131         if (retcode != NO_ERROR)
2132                 goto out;
2133
2134         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2135                 s = adm_ctx.mdev->state;
2136                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2137                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2138                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2139                 } else {
2140                         retcode = ERR_PAUSE_IS_CLEAR;
2141                 }
2142         }
2143
2144 out:
2145         drbd_adm_finish(info, retcode);
2146         return 0;
2147 }
2148
2149 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2150 {
2151         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2152 }
2153
2154 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2155 {
2156         struct drbd_conf *mdev;
2157         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2158
2159         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2160         if (!adm_ctx.reply_skb)
2161                 return retcode;
2162         if (retcode != NO_ERROR)
2163                 goto out;
2164
2165         mdev = adm_ctx.mdev;
2166         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2167                 drbd_uuid_new_current(mdev);
2168                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2169         }
2170         drbd_suspend_io(mdev);
2171         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2172         if (retcode == SS_SUCCESS) {
2173                 if (mdev->state.conn < C_CONNECTED)
2174                         tl_clear(mdev->tconn);
2175                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2176                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2177         }
2178         drbd_resume_io(mdev);
2179
2180 out:
2181         drbd_adm_finish(info, retcode);
2182         return 0;
2183 }
2184
2185 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2186 {
2187         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2188 }
2189
2190 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2191                 const struct sib_info *sib)
2192 {
2193         struct state_info *si = NULL; /* for sizeof(si->member); */
2194         struct nlattr *nla;
2195         int got_ldev;
2196         int got_net;
2197         int err = 0;
2198         int exclude_sensitive;
2199
2200         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2201          * to.  So we better exclude_sensitive information.
2202          *
2203          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2204          * in the context of the requesting user process. Exclude sensitive
2205          * information, unless current has superuser.
2206          *
2207          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2208          * relies on the current implementation of netlink_dump(), which
2209          * executes the dump callback successively from netlink_recvmsg(),
2210          * always in the context of the receiving process */
2211         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2212
2213         got_ldev = get_ldev(mdev);
2214         got_net = get_net_conf(mdev->tconn);
2215
2216         /* We need to add connection name and volume number information still.
2217          * Minor number is in drbd_genlmsghdr. */
2218         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2219         if (!nla)
2220                 goto nla_put_failure;
2221         NLA_PUT_U32(skb, T_ctx_volume, mdev->vnr);
2222         NLA_PUT_STRING(skb, T_ctx_conn_name, mdev->tconn->name);
2223         nla_nest_end(skb, nla);
2224
2225         if (got_ldev)
2226                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2227                         goto nla_put_failure;
2228         if (got_net)
2229                 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2230                         goto nla_put_failure;
2231
2232         if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
2233                         goto nla_put_failure;
2234
2235         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2236         if (!nla)
2237                 goto nla_put_failure;
2238         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2239         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2240         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2241         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2242
2243         if (got_ldev) {
2244                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2245                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2246                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2247                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2248                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2249                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2250                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2251                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2252                 }
2253         }
2254
2255         if (sib) {
2256                 switch(sib->sib_reason) {
2257                 case SIB_SYNC_PROGRESS:
2258                 case SIB_GET_STATUS_REPLY:
2259                         break;
2260                 case SIB_STATE_CHANGE:
2261                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2262                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2263                         break;
2264                 case SIB_HELPER_POST:
2265                         NLA_PUT_U32(skb,
2266                                 T_helper_exit_code, sib->helper_exit_code);
2267                         /* fall through */
2268                 case SIB_HELPER_PRE:
2269                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2270                         break;
2271                 }
2272         }
2273         nla_nest_end(skb, nla);
2274
2275         if (0)
2276 nla_put_failure:
2277                 err = -EMSGSIZE;
2278         if (got_ldev)
2279                 put_ldev(mdev);
2280         if (got_net)
2281                 put_net_conf(mdev->tconn);
2282         return err;
2283 }
2284
2285 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2286 {
2287         enum drbd_ret_code retcode;
2288         int err;
2289
2290         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2291         if (!adm_ctx.reply_skb)
2292                 return retcode;
2293         if (retcode != NO_ERROR)
2294                 goto out;
2295
2296         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2297         if (err) {
2298                 nlmsg_free(adm_ctx.reply_skb);
2299                 return err;
2300         }
2301 out:
2302         drbd_adm_finish(info, retcode);
2303         return 0;
2304 }
2305
2306 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2307 {
2308         struct drbd_conf *mdev;
2309         struct drbd_genlmsghdr *dh;
2310         int minor = cb->args[0];
2311
2312         /* Open coded deferred single idr_for_each_entry iteration.
2313          * This may miss entries inserted after this dump started,
2314          * or entries deleted before they are reached.
2315          * But we need to make sure the mdev won't disappear while
2316          * we are looking at it. */
2317
2318         rcu_read_lock();
2319         mdev = idr_get_next(&minors, &minor);
2320         if (mdev) {
2321                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2322                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2323                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2324                 if (!dh)
2325                         goto errout;
2326
2327                 D_ASSERT(mdev->minor == minor);
2328
2329                 dh->minor = minor;
2330                 dh->ret_code = NO_ERROR;
2331
2332                 if (nla_put_status_info(skb, mdev, NULL)) {
2333                         genlmsg_cancel(skb, dh);
2334                         goto errout;
2335                 }
2336                 genlmsg_end(skb, dh);
2337         }
2338
2339 errout:
2340         rcu_read_unlock();
2341         /* where to start idr_get_next with the next iteration */
2342         cb->args[0] = minor+1;
2343
2344         /* No more minors found: empty skb. Which will terminate the dump. */
2345         return skb->len;
2346 }
2347
2348 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2349 {
2350         enum drbd_ret_code retcode;
2351         struct timeout_parms tp;
2352         int err;
2353
2354         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2355         if (!adm_ctx.reply_skb)
2356                 return retcode;
2357         if (retcode != NO_ERROR)
2358                 goto out;
2359
2360         tp.timeout_type =
2361                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2362                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2363                 UT_DEFAULT;
2364
2365         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2366         if (err) {
2367                 nlmsg_free(adm_ctx.reply_skb);
2368                 return err;
2369         }
2370 out:
2371         drbd_adm_finish(info, retcode);
2372         return 0;
2373 }
2374
2375 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2376 {
2377         struct drbd_conf *mdev;
2378         enum drbd_ret_code retcode;
2379
2380         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2381         if (!adm_ctx.reply_skb)
2382                 return retcode;
2383         if (retcode != NO_ERROR)
2384                 goto out;
2385
2386         mdev = adm_ctx.mdev;
2387         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2388                 /* resume from last known position, if possible */
2389                 struct start_ov_parms parms =
2390                         { .ov_start_sector = mdev->ov_start_sector };
2391                 int err = start_ov_parms_from_attrs(&parms, info->attrs);
2392                 if (err) {
2393                         retcode = ERR_MANDATORY_TAG;
2394                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2395                         goto out;
2396                 }
2397                 /* w_make_ov_request expects position to be aligned */
2398                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2399         }
2400         /* If there is still bitmap IO pending, e.g. previous resync or verify
2401          * just being finished, wait for it before requesting a new resync. */
2402         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2403         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2404 out:
2405         drbd_adm_finish(info, retcode);
2406         return 0;
2407 }
2408
2409
2410 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2411 {
2412         struct drbd_conf *mdev;
2413         enum drbd_ret_code retcode;
2414         int skip_initial_sync = 0;
2415         int err;
2416         struct new_c_uuid_parms args;
2417
2418         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2419         if (!adm_ctx.reply_skb)
2420                 return retcode;
2421         if (retcode != NO_ERROR)
2422                 goto out_nolock;
2423
2424         mdev = adm_ctx.mdev;
2425         memset(&args, 0, sizeof(args));
2426         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2427                 err = new_c_uuid_parms_from_attrs(&args, info->attrs);
2428                 if (err) {
2429                         retcode = ERR_MANDATORY_TAG;
2430                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2431                         goto out_nolock;
2432                 }
2433         }
2434
2435         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2436
2437         if (!get_ldev(mdev)) {
2438                 retcode = ERR_NO_DISK;
2439                 goto out;
2440         }
2441
2442         /* this is "skip initial sync", assume to be clean */
2443         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2444             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2445                 dev_info(DEV, "Preparing to skip initial sync\n");
2446                 skip_initial_sync = 1;
2447         } else if (mdev->state.conn != C_STANDALONE) {
2448                 retcode = ERR_CONNECTED;
2449                 goto out_dec;
2450         }
2451
2452         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2453         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2454
2455         if (args.clear_bm) {
2456                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2457                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2458                 if (err) {
2459                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2460                         retcode = ERR_IO_MD_DISK;
2461                 }
2462                 if (skip_initial_sync) {
2463                         drbd_send_uuids_skip_initial_sync(mdev);
2464                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2465                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2466                         spin_lock_irq(&mdev->tconn->req_lock);
2467                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2468                                         CS_VERBOSE, NULL);
2469                         spin_unlock_irq(&mdev->tconn->req_lock);
2470                 }
2471         }
2472
2473         drbd_md_sync(mdev);
2474 out_dec:
2475         put_ldev(mdev);
2476 out:
2477         mutex_unlock(mdev->state_mutex);
2478 out_nolock:
2479         drbd_adm_finish(info, retcode);
2480         return 0;
2481 }
2482
2483 static enum drbd_ret_code
2484 drbd_check_conn_name(const char *name)
2485 {
2486         if (!name || !name[0]) {
2487                 drbd_msg_put_info("connection name missing");
2488                 return ERR_MANDATORY_TAG;
2489         }
2490         /* if we want to use these in sysfs/configfs/debugfs some day,
2491          * we must not allow slashes */
2492         if (strchr(name, '/')) {
2493                 drbd_msg_put_info("invalid connection name");
2494                 return ERR_INVALID_REQUEST;
2495         }
2496         return NO_ERROR;
2497 }
2498
2499 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2500 {
2501         enum drbd_ret_code retcode;
2502
2503         retcode = drbd_adm_prepare(skb, info, 0);
2504         if (!adm_ctx.reply_skb)
2505                 return retcode;
2506         if (retcode != NO_ERROR)
2507                 goto out;
2508
2509         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2510         if (retcode != NO_ERROR)
2511                 goto out;
2512
2513         if (adm_ctx.tconn) {
2514                 retcode = ERR_INVALID_REQUEST;
2515                 drbd_msg_put_info("connection exists");
2516                 goto out;
2517         }
2518
2519         if (!drbd_new_tconn(adm_ctx.conn_name))
2520                 retcode = ERR_NOMEM;
2521 out:
2522         drbd_adm_finish(info, retcode);
2523         return 0;
2524 }
2525
2526 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2527 {
2528         struct drbd_genlmsghdr *dh = info->userhdr;
2529         enum drbd_ret_code retcode;
2530
2531         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2532         if (!adm_ctx.reply_skb)
2533                 return retcode;
2534         if (retcode != NO_ERROR)
2535                 goto out;
2536
2537         /* FIXME drop minor_count parameter, limit to MINORMASK */
2538         if (dh->minor >= minor_count) {
2539                 drbd_msg_put_info("requested minor out of range");
2540                 retcode = ERR_INVALID_REQUEST;
2541                 goto out;
2542         }
2543         /* FIXME we need a define here */
2544         if (adm_ctx.volume >= 256) {
2545                 drbd_msg_put_info("requested volume id out of range");
2546                 retcode = ERR_INVALID_REQUEST;
2547                 goto out;
2548         }
2549
2550         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2551 out:
2552         drbd_adm_finish(info, retcode);
2553         return 0;
2554 }
2555
2556 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2557 {
2558         struct drbd_conf *mdev;
2559         enum drbd_ret_code retcode;
2560
2561         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2562         if (!adm_ctx.reply_skb)
2563                 return retcode;
2564         if (retcode != NO_ERROR)
2565                 goto out;
2566
2567         mdev = adm_ctx.mdev;
2568         if (mdev->state.disk == D_DISKLESS &&
2569             mdev->state.conn == C_STANDALONE &&
2570             mdev->state.role == R_SECONDARY) {
2571                 drbd_delete_device(mdev_to_minor(mdev));
2572                 retcode = NO_ERROR;
2573         } else
2574                 retcode = ERR_MINOR_CONFIGURED;
2575 out:
2576         drbd_adm_finish(info, retcode);
2577         return 0;
2578 }
2579
2580 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
2581 {
2582         enum drbd_ret_code retcode;
2583
2584         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2585         if (!adm_ctx.reply_skb)
2586                 return retcode;
2587         if (retcode != NO_ERROR)
2588                 goto out;
2589
2590         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2591                 drbd_free_tconn(adm_ctx.tconn);
2592                 retcode = NO_ERROR;
2593         } else {
2594                 retcode = ERR_CONN_IN_USE;
2595         }
2596
2597 out:
2598         drbd_adm_finish(info, retcode);
2599         return 0;
2600 }
2601
2602 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
2603 {
2604         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
2605         struct sk_buff *msg;
2606         struct drbd_genlmsghdr *d_out;
2607         unsigned seq;
2608         int err = -ENOMEM;
2609
2610         seq = atomic_inc_return(&drbd_genl_seq);
2611         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
2612         if (!msg)
2613                 goto failed;
2614
2615         err = -EMSGSIZE;
2616         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
2617         if (!d_out) /* cannot happen, but anyways. */
2618                 goto nla_put_failure;
2619         d_out->minor = mdev_to_minor(mdev);
2620         d_out->ret_code = 0;
2621
2622         if (nla_put_status_info(msg, mdev, sib))
2623                 goto nla_put_failure;
2624         genlmsg_end(msg, d_out);
2625         err = drbd_genl_multicast_events(msg, 0);
2626         /* msg has been consumed or freed in netlink_broadcast() */
2627         if (err && err != -ESRCH)
2628                 goto failed;
2629
2630         return;
2631
2632 nla_put_failure:
2633         nlmsg_free(msg);
2634 failed:
2635         dev_err(DEV, "Error %d while broadcasting event. "
2636                         "Event seq:%u sib_reason:%u\n",
2637                         err, seq, sib->sib_reason);
2638 }