drbd: drbd_dew_dev_size() gets the user requests disk_size as argument
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_nl.c
1 /*
2    drbd_nl.c
3
4    This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
5
6    Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7    Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8    Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
9
10    drbd is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation; either version 2, or (at your option)
13    any later version.
14
15    drbd is distributed in the hope that it will be useful,
16    but WITHOUT ANY WARRANTY; without even the implied warranty of
17    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18    GNU General Public License for more details.
19
20    You should have received a copy of the GNU General Public License
21    along with drbd; see the file COPYING.  If not, write to
22    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
23
24  */
25
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/in.h>
29 #include <linux/fs.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
34 #include "drbd_int.h"
35 #include "drbd_req.h"
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
40
41 #include <net/genetlink.h>
42
43 /* .doit */
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
46
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
49
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
53
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
74 /* .dumpit */
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
76
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
79
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
82
83 /* Configuration is strictly serialized, because generic netlink message
84  * processing is strictly serialized by the genl_lock().
85  * Which means we can use one static global drbd_config_context struct.
86  */
87 static struct drbd_config_context {
88         /* assigned from drbd_genlmsghdr */
89         unsigned int minor;
90         /* assigned from request attributes, if present */
91         unsigned int volume;
92 #define VOLUME_UNSPECIFIED              (-1U)
93         /* pointer into the request skb,
94          * limited lifetime! */
95         char *conn_name;
96
97         /* reply buffer */
98         struct sk_buff *reply_skb;
99         /* pointer into reply buffer */
100         struct drbd_genlmsghdr *reply_dh;
101         /* resolved from attributes, if possible */
102         struct drbd_conf *mdev;
103         struct drbd_tconn *tconn;
104 } adm_ctx;
105
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
107 {
108         genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109         if (genlmsg_reply(skb, info))
110                 printk(KERN_ERR "drbd: error sending genl reply\n");
111 }
112
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114  * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
116 {
117         struct sk_buff *skb = adm_ctx.reply_skb;
118         struct nlattr *nla;
119         int err = -EMSGSIZE;
120
121         if (!info || !info[0])
122                 return 0;
123
124         nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125         if (!nla)
126                 return err;
127
128         err = nla_put_string(skb, T_info_text, info);
129         if (err) {
130                 nla_nest_cancel(skb, nla);
131                 return err;
132         } else
133                 nla_nest_end(skb, nla);
134         return 0;
135 }
136
137 /* This would be a good candidate for a "pre_doit" hook,
138  * and per-family private info->pointers.
139  * But we need to stay compatible with older kernels.
140  * If it returns successfully, adm_ctx members are valid.
141  */
142 #define DRBD_ADM_NEED_MINOR     1
143 #define DRBD_ADM_NEED_CONN      2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
145                 unsigned flags)
146 {
147         struct drbd_genlmsghdr *d_in = info->userhdr;
148         const u8 cmd = info->genlhdr->cmd;
149         int err;
150
151         memset(&adm_ctx, 0, sizeof(adm_ctx));
152
153         /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154         if (cmd != DRBD_ADM_GET_STATUS
155         && security_netlink_recv(skb, CAP_SYS_ADMIN))
156                return -EPERM;
157
158         adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159         if (!adm_ctx.reply_skb)
160                 goto fail;
161
162         adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163                                         info, &drbd_genl_family, 0, cmd);
164         /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165          * but anyways */
166         if (!adm_ctx.reply_dh)
167                 goto fail;
168
169         adm_ctx.reply_dh->minor = d_in->minor;
170         adm_ctx.reply_dh->ret_code = NO_ERROR;
171
172         if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
173                 struct nlattr *nla;
174                 /* parse and validate only */
175                 err = drbd_cfg_context_from_attrs(NULL, info);
176                 if (err)
177                         goto fail;
178
179                 /* It was present, and valid,
180                  * copy it over to the reply skb. */
181                 err = nla_put_nohdr(adm_ctx.reply_skb,
182                                 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183                                 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184                 if (err)
185                         goto fail;
186
187                 /* and assign stuff to the global adm_ctx */
188                 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189                 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190                 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
191                 if (nla)
192                         adm_ctx.conn_name = nla_data(nla);
193         } else
194                 adm_ctx.volume = VOLUME_UNSPECIFIED;
195
196         adm_ctx.minor = d_in->minor;
197         adm_ctx.mdev = minor_to_mdev(d_in->minor);
198         adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
199
200         if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201                 drbd_msg_put_info("unknown minor");
202                 return ERR_MINOR_INVALID;
203         }
204         if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205                 drbd_msg_put_info("unknown connection");
206                 return ERR_INVALID_REQUEST;
207         }
208
209         /* some more paranoia, if the request was over-determined */
210         if (adm_ctx.mdev && adm_ctx.tconn &&
211             adm_ctx.mdev->tconn != adm_ctx.tconn) {
212                 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213                                 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214                 drbd_msg_put_info("minor exists in different connection");
215                 return ERR_INVALID_REQUEST;
216         }
217         if (adm_ctx.mdev &&
218             adm_ctx.volume != VOLUME_UNSPECIFIED &&
219             adm_ctx.volume != adm_ctx.mdev->vnr) {
220                 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221                                 adm_ctx.minor, adm_ctx.volume,
222                                 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223                 drbd_msg_put_info("minor exists as different volume");
224                 return ERR_INVALID_REQUEST;
225         }
226
227         return NO_ERROR;
228
229 fail:
230         nlmsg_free(adm_ctx.reply_skb);
231         adm_ctx.reply_skb = NULL;
232         return -ENOMEM;
233 }
234
235 static int drbd_adm_finish(struct genl_info *info, int retcode)
236 {
237         struct nlattr *nla;
238         const char *conn_name = NULL;
239
240         if (adm_ctx.tconn) {
241                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
242                 adm_ctx.tconn = NULL;
243         }
244
245         if (!adm_ctx.reply_skb)
246                 return -ENOMEM;
247
248         adm_ctx.reply_dh->ret_code = retcode;
249
250         nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
251         if (nla) {
252                 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
253                 if (nla)
254                         conn_name = nla_data(nla);
255         }
256
257         drbd_adm_send_reply(adm_ctx.reply_skb, info);
258         return 0;
259 }
260
261 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
262 {
263         char *afs;
264         struct net_conf *nc;
265
266         rcu_read_lock();
267         nc = rcu_dereference(tconn->net_conf);
268         if (nc) {
269                 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
270                 case AF_INET6:
271                         afs = "ipv6";
272                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
273                                  &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
274                         break;
275                 case AF_INET:
276                         afs = "ipv4";
277                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
278                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
279                         break;
280                 default:
281                         afs = "ssocks";
282                         snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
283                                  &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
284                 }
285                 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
286         }
287         rcu_read_unlock();
288 }
289
290 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
291 {
292         char *envp[] = { "HOME=/",
293                         "TERM=linux",
294                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
295                          (char[20]) { }, /* address family */
296                          (char[60]) { }, /* address */
297                         NULL };
298         char mb[12];
299         char *argv[] = {usermode_helper, cmd, mb, NULL };
300         struct sib_info sib;
301         int ret;
302
303         snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
304         setup_khelper_env(mdev->tconn, envp);
305
306         /* The helper may take some time.
307          * write out any unsynced meta data changes now */
308         drbd_md_sync(mdev);
309
310         dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
311         sib.sib_reason = SIB_HELPER_PRE;
312         sib.helper_name = cmd;
313         drbd_bcast_event(mdev, &sib);
314         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
315         if (ret)
316                 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317                                 usermode_helper, cmd, mb,
318                                 (ret >> 8) & 0xff, ret);
319         else
320                 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321                                 usermode_helper, cmd, mb,
322                                 (ret >> 8) & 0xff, ret);
323         sib.sib_reason = SIB_HELPER_POST;
324         sib.helper_exit_code = ret;
325         drbd_bcast_event(mdev, &sib);
326
327         if (ret < 0) /* Ignore any ERRNOs we got. */
328                 ret = 0;
329
330         return ret;
331 }
332
333 static void conn_md_sync(struct drbd_tconn *tconn)
334 {
335         struct drbd_conf *mdev;
336         int vnr;
337
338         down_read(&drbd_cfg_rwsem);
339         idr_for_each_entry(&tconn->volumes, mdev, vnr)
340                 drbd_md_sync(mdev);
341         up_read(&drbd_cfg_rwsem);
342 }
343
344 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
345 {
346         char *envp[] = { "HOME=/",
347                         "TERM=linux",
348                         "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
349                          (char[20]) { }, /* address family */
350                          (char[60]) { }, /* address */
351                         NULL };
352         char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
353         int ret;
354
355         setup_khelper_env(tconn, envp);
356         conn_md_sync(tconn);
357
358         conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
359         /* TODO: conn_bcast_event() ?? */
360
361         ret = call_usermodehelper(usermode_helper, argv, envp, 1);
362         if (ret)
363                 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
364                           usermode_helper, cmd, tconn->name,
365                           (ret >> 8) & 0xff, ret);
366         else
367                 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
368                           usermode_helper, cmd, tconn->name,
369                           (ret >> 8) & 0xff, ret);
370         /* TODO: conn_bcast_event() ?? */
371
372         if (ret < 0) /* Ignore any ERRNOs we got. */
373                 ret = 0;
374
375         return ret;
376 }
377
378 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
379 {
380         enum drbd_fencing_p fp = FP_NOT_AVAIL;
381         struct drbd_conf *mdev;
382         int vnr;
383
384         rcu_read_lock();
385         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
386                 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
387                         fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
388                         put_ldev(mdev);
389                 }
390         }
391         rcu_read_unlock();
392
393         return fp;
394 }
395
396 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
397 {
398         union drbd_state mask = { };
399         union drbd_state val = { };
400         enum drbd_fencing_p fp;
401         char *ex_to_string;
402         int r;
403
404         if (tconn->cstate >= C_WF_REPORT_PARAMS) {
405                 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
406                 return false;
407         }
408
409         fp = highest_fencing_policy(tconn);
410         switch (fp) {
411         case FP_NOT_AVAIL:
412                 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
413                 goto out;
414         case FP_DONT_CARE:
415                 return true;
416         default: ;
417         }
418
419         r = conn_khelper(tconn, "fence-peer");
420
421         switch ((r>>8) & 0xff) {
422         case 3: /* peer is inconsistent */
423                 ex_to_string = "peer is inconsistent or worse";
424                 mask.pdsk = D_MASK;
425                 val.pdsk = D_INCONSISTENT;
426                 break;
427         case 4: /* peer got outdated, or was already outdated */
428                 ex_to_string = "peer was fenced";
429                 mask.pdsk = D_MASK;
430                 val.pdsk = D_OUTDATED;
431                 break;
432         case 5: /* peer was down */
433                 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
434                         /* we will(have) create(d) a new UUID anyways... */
435                         ex_to_string = "peer is unreachable, assumed to be dead";
436                         mask.pdsk = D_MASK;
437                         val.pdsk = D_OUTDATED;
438                 } else {
439                         ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
440                 }
441                 break;
442         case 6: /* Peer is primary, voluntarily outdate myself.
443                  * This is useful when an unconnected R_SECONDARY is asked to
444                  * become R_PRIMARY, but finds the other peer being active. */
445                 ex_to_string = "peer is active";
446                 conn_warn(tconn, "Peer is primary, outdating myself.\n");
447                 mask.disk = D_MASK;
448                 val.disk = D_OUTDATED;
449                 break;
450         case 7:
451                 if (fp != FP_STONITH)
452                         conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
453                 ex_to_string = "peer was stonithed";
454                 mask.pdsk = D_MASK;
455                 val.pdsk = D_OUTDATED;
456                 break;
457         default:
458                 /* The script is broken ... */
459                 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
460                 return false; /* Eventually leave IO frozen */
461         }
462
463         conn_info(tconn, "fence-peer helper returned %d (%s)\n",
464                   (r>>8) & 0xff, ex_to_string);
465
466  out:
467
468         /* Not using
469            conn_request_state(tconn, mask, val, CS_VERBOSE);
470            here, because we might were able to re-establish the connection in the
471            meantime. */
472         spin_lock_irq(&tconn->req_lock);
473         if (tconn->cstate < C_WF_REPORT_PARAMS)
474                 _conn_request_state(tconn, mask, val, CS_VERBOSE);
475         spin_unlock_irq(&tconn->req_lock);
476
477         return conn_highest_pdsk(tconn) <= D_OUTDATED;
478 }
479
480 static int _try_outdate_peer_async(void *data)
481 {
482         struct drbd_tconn *tconn = (struct drbd_tconn *)data;
483
484         conn_try_outdate_peer(tconn);
485
486         kref_put(&tconn->kref, &conn_destroy);
487         return 0;
488 }
489
490 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
491 {
492         struct task_struct *opa;
493
494         kref_get(&tconn->kref);
495         opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
496         if (IS_ERR(opa)) {
497                 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
498                 kref_put(&tconn->kref, &conn_destroy);
499         }
500 }
501
502 enum drbd_state_rv
503 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
504 {
505         const int max_tries = 4;
506         enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
507         struct net_conf *nc;
508         int try = 0;
509         int forced = 0;
510         union drbd_state mask, val;
511
512         if (new_role == R_PRIMARY)
513                 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
514
515         mutex_lock(mdev->state_mutex);
516
517         mask.i = 0; mask.role = R_MASK;
518         val.i  = 0; val.role  = new_role;
519
520         while (try++ < max_tries) {
521                 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
522
523                 /* in case we first succeeded to outdate,
524                  * but now suddenly could establish a connection */
525                 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
526                         val.pdsk = 0;
527                         mask.pdsk = 0;
528                         continue;
529                 }
530
531                 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
532                     (mdev->state.disk < D_UP_TO_DATE &&
533                      mdev->state.disk >= D_INCONSISTENT)) {
534                         mask.disk = D_MASK;
535                         val.disk  = D_UP_TO_DATE;
536                         forced = 1;
537                         continue;
538                 }
539
540                 if (rv == SS_NO_UP_TO_DATE_DISK &&
541                     mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
542                         D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
543
544                         if (conn_try_outdate_peer(mdev->tconn)) {
545                                 val.disk = D_UP_TO_DATE;
546                                 mask.disk = D_MASK;
547                         }
548                         continue;
549                 }
550
551                 if (rv == SS_NOTHING_TO_DO)
552                         goto out;
553                 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
554                         if (!conn_try_outdate_peer(mdev->tconn) && force) {
555                                 dev_warn(DEV, "Forced into split brain situation!\n");
556                                 mask.pdsk = D_MASK;
557                                 val.pdsk  = D_OUTDATED;
558
559                         }
560                         continue;
561                 }
562                 if (rv == SS_TWO_PRIMARIES) {
563                         /* Maybe the peer is detected as dead very soon...
564                            retry at most once more in this case. */
565                         int timeo;
566                         rcu_read_lock();
567                         nc = rcu_dereference(mdev->tconn->net_conf);
568                         timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
569                         rcu_read_unlock();
570                         schedule_timeout_interruptible(timeo);
571                         if (try < max_tries)
572                                 try = max_tries - 1;
573                         continue;
574                 }
575                 if (rv < SS_SUCCESS) {
576                         rv = _drbd_request_state(mdev, mask, val,
577                                                 CS_VERBOSE + CS_WAIT_COMPLETE);
578                         if (rv < SS_SUCCESS)
579                                 goto out;
580                 }
581                 break;
582         }
583
584         if (rv < SS_SUCCESS)
585                 goto out;
586
587         if (forced)
588                 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
589
590         /* Wait until nothing is on the fly :) */
591         wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
592
593         if (new_role == R_SECONDARY) {
594                 set_disk_ro(mdev->vdisk, true);
595                 if (get_ldev(mdev)) {
596                         mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
597                         put_ldev(mdev);
598                 }
599         } else {
600                 mutex_lock(&mdev->tconn->conf_update);
601                 nc = mdev->tconn->net_conf;
602                 if (nc)
603                         nc->want_lose = 0; /* without copy; single bit op is atomic */
604                 mutex_unlock(&mdev->tconn->conf_update);
605
606                 set_disk_ro(mdev->vdisk, false);
607                 if (get_ldev(mdev)) {
608                         if (((mdev->state.conn < C_CONNECTED ||
609                                mdev->state.pdsk <= D_FAILED)
610                               && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
611                                 drbd_uuid_new_current(mdev);
612
613                         mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
614                         put_ldev(mdev);
615                 }
616         }
617
618         /* writeout of activity log covered areas of the bitmap
619          * to stable storage done in after state change already */
620
621         if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
622                 /* if this was forced, we should consider sync */
623                 if (forced)
624                         drbd_send_uuids(mdev);
625                 drbd_send_state(mdev);
626         }
627
628         drbd_md_sync(mdev);
629
630         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
631 out:
632         mutex_unlock(mdev->state_mutex);
633         return rv;
634 }
635
636 static const char *from_attrs_err_to_txt(int err)
637 {
638         return  err == -ENOMSG ? "required attribute missing" :
639                 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
640                 err == -EEXIST ? "can not change invariant setting" :
641                 "invalid attribute value";
642 }
643
644 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
645 {
646         struct set_role_parms parms;
647         int err;
648         enum drbd_ret_code retcode;
649
650         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
651         if (!adm_ctx.reply_skb)
652                 return retcode;
653         if (retcode != NO_ERROR)
654                 goto out;
655
656         memset(&parms, 0, sizeof(parms));
657         if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
658                 err = set_role_parms_from_attrs(&parms, info);
659                 if (err) {
660                         retcode = ERR_MANDATORY_TAG;
661                         drbd_msg_put_info(from_attrs_err_to_txt(err));
662                         goto out;
663                 }
664         }
665
666         if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
667                 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
668         else
669                 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
670 out:
671         drbd_adm_finish(info, retcode);
672         return 0;
673 }
674
675 /* initializes the md.*_offset members, so we are able to find
676  * the on disk meta data */
677 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
678                                        struct drbd_backing_dev *bdev)
679 {
680         sector_t md_size_sect = 0;
681         switch (bdev->dc.meta_dev_idx) {
682         default:
683                 /* v07 style fixed size indexed meta data */
684                 bdev->md.md_size_sect = MD_RESERVED_SECT;
685                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
686                 bdev->md.al_offset = MD_AL_OFFSET;
687                 bdev->md.bm_offset = MD_BM_OFFSET;
688                 break;
689         case DRBD_MD_INDEX_FLEX_EXT:
690                 /* just occupy the full device; unit: sectors */
691                 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
692                 bdev->md.md_offset = 0;
693                 bdev->md.al_offset = MD_AL_OFFSET;
694                 bdev->md.bm_offset = MD_BM_OFFSET;
695                 break;
696         case DRBD_MD_INDEX_INTERNAL:
697         case DRBD_MD_INDEX_FLEX_INT:
698                 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
699                 /* al size is still fixed */
700                 bdev->md.al_offset = -MD_AL_SECTORS;
701                 /* we need (slightly less than) ~ this much bitmap sectors: */
702                 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
703                 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
704                 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
705                 md_size_sect = ALIGN(md_size_sect, 8);
706
707                 /* plus the "drbd meta data super block",
708                  * and the activity log; */
709                 md_size_sect += MD_BM_OFFSET;
710
711                 bdev->md.md_size_sect = md_size_sect;
712                 /* bitmap offset is adjusted by 'super' block size */
713                 bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
714                 break;
715         }
716 }
717
718 /* input size is expected to be in KB */
719 char *ppsize(char *buf, unsigned long long size)
720 {
721         /* Needs 9 bytes at max including trailing NUL:
722          * -1ULL ==> "16384 EB" */
723         static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
724         int base = 0;
725         while (size >= 10000 && base < sizeof(units)-1) {
726                 /* shift + round */
727                 size = (size >> 10) + !!(size & (1<<9));
728                 base++;
729         }
730         sprintf(buf, "%u %cB", (unsigned)size, units[base]);
731
732         return buf;
733 }
734
735 /* there is still a theoretical deadlock when called from receiver
736  * on an D_INCONSISTENT R_PRIMARY:
737  *  remote READ does inc_ap_bio, receiver would need to receive answer
738  *  packet from remote to dec_ap_bio again.
739  *  receiver receive_sizes(), comes here,
740  *  waits for ap_bio_cnt == 0. -> deadlock.
741  * but this cannot happen, actually, because:
742  *  R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
743  *  (not connected, or bad/no disk on peer):
744  *  see drbd_fail_request_early, ap_bio_cnt is zero.
745  *  R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
746  *  peer may not initiate a resize.
747  */
748 /* Note these are not to be confused with
749  * drbd_adm_suspend_io/drbd_adm_resume_io,
750  * which are (sub) state changes triggered by admin (drbdsetup),
751  * and can be long lived.
752  * This changes an mdev->flag, is triggered by drbd internals,
753  * and should be short-lived. */
754 void drbd_suspend_io(struct drbd_conf *mdev)
755 {
756         set_bit(SUSPEND_IO, &mdev->flags);
757         if (drbd_suspended(mdev))
758                 return;
759         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
760 }
761
762 void drbd_resume_io(struct drbd_conf *mdev)
763 {
764         clear_bit(SUSPEND_IO, &mdev->flags);
765         wake_up(&mdev->misc_wait);
766 }
767
768 /**
769  * drbd_determine_dev_size() -  Sets the right device size obeying all constraints
770  * @mdev:       DRBD device.
771  *
772  * Returns 0 on success, negative return values indicate errors.
773  * You should call drbd_md_sync() after calling this function.
774  */
775 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
776 {
777         sector_t prev_first_sect, prev_size; /* previous meta location */
778         sector_t la_size, u_size;
779         sector_t size;
780         char ppb[10];
781
782         int md_moved, la_size_changed;
783         enum determine_dev_size rv = unchanged;
784
785         /* race:
786          * application request passes inc_ap_bio,
787          * but then cannot get an AL-reference.
788          * this function later may wait on ap_bio_cnt == 0. -> deadlock.
789          *
790          * to avoid that:
791          * Suspend IO right here.
792          * still lock the act_log to not trigger ASSERTs there.
793          */
794         drbd_suspend_io(mdev);
795
796         /* no wait necessary anymore, actually we could assert that */
797         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
798
799         prev_first_sect = drbd_md_first_sector(mdev->ldev);
800         prev_size = mdev->ldev->md.md_size_sect;
801         la_size = mdev->ldev->md.la_size_sect;
802
803         /* TODO: should only be some assert here, not (re)init... */
804         drbd_md_set_sector_offsets(mdev, mdev->ldev);
805
806         u_size = mdev->ldev->dc.disk_size;
807         size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
808
809         if (drbd_get_capacity(mdev->this_bdev) != size ||
810             drbd_bm_capacity(mdev) != size) {
811                 int err;
812                 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
813                 if (unlikely(err)) {
814                         /* currently there is only one error: ENOMEM! */
815                         size = drbd_bm_capacity(mdev)>>1;
816                         if (size == 0) {
817                                 dev_err(DEV, "OUT OF MEMORY! "
818                                     "Could not allocate bitmap!\n");
819                         } else {
820                                 dev_err(DEV, "BM resizing failed. "
821                                     "Leaving size unchanged at size = %lu KB\n",
822                                     (unsigned long)size);
823                         }
824                         rv = dev_size_error;
825                 }
826                 /* racy, see comments above. */
827                 drbd_set_my_capacity(mdev, size);
828                 mdev->ldev->md.la_size_sect = size;
829                 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
830                      (unsigned long long)size>>1);
831         }
832         if (rv == dev_size_error)
833                 goto out;
834
835         la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
836
837         md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
838                 || prev_size       != mdev->ldev->md.md_size_sect;
839
840         if (la_size_changed || md_moved) {
841                 int err;
842
843                 drbd_al_shrink(mdev); /* All extents inactive. */
844                 dev_info(DEV, "Writing the whole bitmap, %s\n",
845                          la_size_changed && md_moved ? "size changed and md moved" :
846                          la_size_changed ? "size changed" : "md moved");
847                 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
848                 err = drbd_bitmap_io(mdev, &drbd_bm_write,
849                                 "size changed", BM_LOCKED_MASK);
850                 if (err) {
851                         rv = dev_size_error;
852                         goto out;
853                 }
854                 drbd_md_mark_dirty(mdev);
855         }
856
857         if (size > la_size)
858                 rv = grew;
859         if (size < la_size)
860                 rv = shrunk;
861 out:
862         lc_unlock(mdev->act_log);
863         wake_up(&mdev->al_wait);
864         drbd_resume_io(mdev);
865
866         return rv;
867 }
868
869 sector_t
870 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
871                   sector_t u_size, int assume_peer_has_space)
872 {
873         sector_t p_size = mdev->p_size;   /* partner's disk size. */
874         sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
875         sector_t m_size; /* my size */
876         sector_t size = 0;
877
878         m_size = drbd_get_max_capacity(bdev);
879
880         if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
881                 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
882                 p_size = m_size;
883         }
884
885         if (p_size && m_size) {
886                 size = min_t(sector_t, p_size, m_size);
887         } else {
888                 if (la_size) {
889                         size = la_size;
890                         if (m_size && m_size < size)
891                                 size = m_size;
892                         if (p_size && p_size < size)
893                                 size = p_size;
894                 } else {
895                         if (m_size)
896                                 size = m_size;
897                         if (p_size)
898                                 size = p_size;
899                 }
900         }
901
902         if (size == 0)
903                 dev_err(DEV, "Both nodes diskless!\n");
904
905         if (u_size) {
906                 if (u_size > size)
907                         dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
908                             (unsigned long)u_size>>1, (unsigned long)size>>1);
909                 else
910                         size = u_size;
911         }
912
913         return size;
914 }
915
916 /**
917  * drbd_check_al_size() - Ensures that the AL is of the right size
918  * @mdev:       DRBD device.
919  *
920  * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
921  * failed, and 0 on success. You should call drbd_md_sync() after you called
922  * this function.
923  */
924 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
925 {
926         struct lru_cache *n, *t;
927         struct lc_element *e;
928         unsigned int in_use;
929         int i;
930
931         if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
932                 dc->al_extents = DRBD_AL_EXTENTS_MIN;
933
934         if (mdev->act_log &&
935             mdev->act_log->nr_elements == dc->al_extents)
936                 return 0;
937
938         in_use = 0;
939         t = mdev->act_log;
940         n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
941                 dc->al_extents, sizeof(struct lc_element), 0);
942
943         if (n == NULL) {
944                 dev_err(DEV, "Cannot allocate act_log lru!\n");
945                 return -ENOMEM;
946         }
947         spin_lock_irq(&mdev->al_lock);
948         if (t) {
949                 for (i = 0; i < t->nr_elements; i++) {
950                         e = lc_element_by_index(t, i);
951                         if (e->refcnt)
952                                 dev_err(DEV, "refcnt(%d)==%d\n",
953                                     e->lc_number, e->refcnt);
954                         in_use += e->refcnt;
955                 }
956         }
957         if (!in_use)
958                 mdev->act_log = n;
959         spin_unlock_irq(&mdev->al_lock);
960         if (in_use) {
961                 dev_err(DEV, "Activity log still in use!\n");
962                 lc_destroy(n);
963                 return -EBUSY;
964         } else {
965                 if (t)
966                         lc_destroy(t);
967         }
968         drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
969         return 0;
970 }
971
972 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
973 {
974         struct request_queue * const q = mdev->rq_queue;
975         int max_hw_sectors = max_bio_size >> 9;
976         int max_segments = 0;
977
978         if (get_ldev_if_state(mdev, D_ATTACHING)) {
979                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
980
981                 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
982                 max_segments = mdev->ldev->dc.max_bio_bvecs;
983                 put_ldev(mdev);
984         }
985
986         blk_queue_logical_block_size(q, 512);
987         blk_queue_max_hw_sectors(q, max_hw_sectors);
988         /* This is the workaround for "bio would need to, but cannot, be split" */
989         blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
990         blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
991
992         if (get_ldev_if_state(mdev, D_ATTACHING)) {
993                 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
994
995                 blk_queue_stack_limits(q, b);
996
997                 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
998                         dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
999                                  q->backing_dev_info.ra_pages,
1000                                  b->backing_dev_info.ra_pages);
1001                         q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1002                 }
1003                 put_ldev(mdev);
1004         }
1005 }
1006
1007 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1008 {
1009         int now, new, local, peer;
1010
1011         now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1012         local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1013         peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1014
1015         if (get_ldev_if_state(mdev, D_ATTACHING)) {
1016                 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1017                 mdev->local_max_bio_size = local;
1018                 put_ldev(mdev);
1019         }
1020
1021         /* We may ignore peer limits if the peer is modern enough.
1022            Because new from 8.3.8 onwards the peer can use multiple
1023            BIOs for a single peer_request */
1024         if (mdev->state.conn >= C_CONNECTED) {
1025                 if (mdev->tconn->agreed_pro_version < 94)
1026                         peer = mdev->peer_max_bio_size;
1027                 else if (mdev->tconn->agreed_pro_version == 94)
1028                         peer = DRBD_MAX_SIZE_H80_PACKET;
1029                 else /* drbd 8.3.8 onwards */
1030                         peer = DRBD_MAX_BIO_SIZE;
1031         }
1032
1033         new = min_t(int, local, peer);
1034
1035         if (mdev->state.role == R_PRIMARY && new < now)
1036                 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1037
1038         if (new != now)
1039                 dev_info(DEV, "max BIO size = %u\n", new);
1040
1041         drbd_setup_queue_param(mdev, new);
1042 }
1043
1044 /* Starts the worker thread */
1045 static void conn_reconfig_start(struct drbd_tconn *tconn)
1046 {
1047         drbd_thread_start(&tconn->worker);
1048         conn_flush_workqueue(tconn);
1049 }
1050
1051 /* if still unconfigured, stops worker again. */
1052 static void conn_reconfig_done(struct drbd_tconn *tconn)
1053 {
1054         bool stop_threads;
1055         spin_lock_irq(&tconn->req_lock);
1056         stop_threads = conn_all_vols_unconf(tconn);
1057         spin_unlock_irq(&tconn->req_lock);
1058         if (stop_threads) {
1059                 /* asender is implicitly stopped by receiver
1060                  * in drbd_disconnect() */
1061                 drbd_thread_stop(&tconn->receiver);
1062                 drbd_thread_stop(&tconn->worker);
1063         }
1064 }
1065
1066 /* Make sure IO is suspended before calling this function(). */
1067 static void drbd_suspend_al(struct drbd_conf *mdev)
1068 {
1069         int s = 0;
1070
1071         if (!lc_try_lock(mdev->act_log)) {
1072                 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1073                 return;
1074         }
1075
1076         drbd_al_shrink(mdev);
1077         spin_lock_irq(&mdev->tconn->req_lock);
1078         if (mdev->state.conn < C_CONNECTED)
1079                 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1080         spin_unlock_irq(&mdev->tconn->req_lock);
1081         lc_unlock(mdev->act_log);
1082
1083         if (s)
1084                 dev_info(DEV, "Suspended AL updates\n");
1085 }
1086
1087
1088 static bool should_set_defaults(struct genl_info *info)
1089 {
1090         unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1091         return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1092 }
1093
1094 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1095 {
1096         enum drbd_ret_code retcode;
1097         struct drbd_conf *mdev;
1098         struct disk_conf *new_disk_conf;
1099         int err, fifo_size;
1100         int *rs_plan_s = NULL;
1101
1102         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1103         if (!adm_ctx.reply_skb)
1104                 return retcode;
1105         if (retcode != NO_ERROR)
1106                 goto out;
1107
1108         mdev = adm_ctx.mdev;
1109
1110         /* we also need a disk
1111          * to change the options on */
1112         if (!get_ldev(mdev)) {
1113                 retcode = ERR_NO_DISK;
1114                 goto out;
1115         }
1116
1117 /* FIXME freeze IO, cluster wide.
1118  *
1119  * We should make sure no-one uses
1120  * some half-updated struct when we
1121  * assign it later. */
1122
1123         new_disk_conf = kmalloc(sizeof(*new_disk_conf), GFP_KERNEL);
1124         if (!new_disk_conf) {
1125                 retcode = ERR_NOMEM;
1126                 goto fail;
1127         }
1128
1129         memcpy(new_disk_conf, &mdev->ldev->dc, sizeof(*new_disk_conf));
1130         if (should_set_defaults(info))
1131                 set_disk_conf_defaults(new_disk_conf);
1132
1133         err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1134         if (err) {
1135                 retcode = ERR_MANDATORY_TAG;
1136                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1137         }
1138
1139         if (!expect(new_disk_conf->resync_rate >= 1))
1140                 new_disk_conf->resync_rate = 1;
1141
1142         /* clip to allowed range */
1143         if (!expect(new_disk_conf->al_extents >= DRBD_AL_EXTENTS_MIN))
1144                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1145         if (!expect(new_disk_conf->al_extents <= DRBD_AL_EXTENTS_MAX))
1146                 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MAX;
1147
1148         /* most sanity checks done, try to assign the new sync-after
1149          * dependency.  need to hold the global lock in there,
1150          * to avoid a race in the dependency loop check. */
1151         retcode = drbd_alter_sa(mdev, new_disk_conf->resync_after);
1152         if (retcode != NO_ERROR)
1153                 goto fail;
1154
1155         fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1156         if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1157                 rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1158                 if (!rs_plan_s) {
1159                         dev_err(DEV, "kmalloc of fifo_buffer failed");
1160                         retcode = ERR_NOMEM;
1161                         goto fail;
1162                 }
1163         }
1164
1165         if (fifo_size != mdev->rs_plan_s.size) {
1166                 kfree(mdev->rs_plan_s.values);
1167                 mdev->rs_plan_s.values = rs_plan_s;
1168                 mdev->rs_plan_s.size   = fifo_size;
1169                 mdev->rs_planed = 0;
1170                 rs_plan_s = NULL;
1171         }
1172
1173         wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1174         drbd_al_shrink(mdev);
1175         err = drbd_check_al_size(mdev, new_disk_conf);
1176         lc_unlock(mdev->act_log);
1177         wake_up(&mdev->al_wait);
1178
1179         if (err) {
1180                 retcode = ERR_NOMEM;
1181                 goto fail;
1182         }
1183
1184         /* FIXME
1185          * To avoid someone looking at a half-updated struct, we probably
1186          * should have a rw-semaphor on net_conf and disk_conf.
1187          */
1188         mdev->ldev->dc = *new_disk_conf;
1189
1190         drbd_md_sync(mdev);
1191
1192
1193         if (mdev->state.conn >= C_CONNECTED)
1194                 drbd_send_sync_param(mdev);
1195
1196  fail:
1197         put_ldev(mdev);
1198         kfree(new_disk_conf);
1199         kfree(rs_plan_s);
1200  out:
1201         drbd_adm_finish(info, retcode);
1202         return 0;
1203 }
1204
1205 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1206 {
1207         struct drbd_conf *mdev;
1208         int err;
1209         enum drbd_ret_code retcode;
1210         enum determine_dev_size dd;
1211         sector_t max_possible_sectors;
1212         sector_t min_md_device_sectors;
1213         struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1214         struct block_device *bdev;
1215         struct lru_cache *resync_lru = NULL;
1216         union drbd_state ns, os;
1217         enum drbd_state_rv rv;
1218         struct net_conf *nc;
1219         int cp_discovered = 0;
1220
1221         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1222         if (!adm_ctx.reply_skb)
1223                 return retcode;
1224         if (retcode != NO_ERROR)
1225                 goto finish;
1226
1227         mdev = adm_ctx.mdev;
1228         conn_reconfig_start(mdev->tconn);
1229
1230         /* if you want to reconfigure, please tear down first */
1231         if (mdev->state.disk > D_DISKLESS) {
1232                 retcode = ERR_DISK_CONFIGURED;
1233                 goto fail;
1234         }
1235         /* It may just now have detached because of IO error.  Make sure
1236          * drbd_ldev_destroy is done already, we may end up here very fast,
1237          * e.g. if someone calls attach from the on-io-error handler,
1238          * to realize a "hot spare" feature (not that I'd recommend that) */
1239         wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1240
1241         /* allocation not in the IO path, drbdsetup context */
1242         nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1243         if (!nbc) {
1244                 retcode = ERR_NOMEM;
1245                 goto fail;
1246         }
1247
1248         set_disk_conf_defaults(&nbc->dc);
1249
1250         err = disk_conf_from_attrs(&nbc->dc, info);
1251         if (err) {
1252                 retcode = ERR_MANDATORY_TAG;
1253                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1254                 goto fail;
1255         }
1256
1257         if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1258                 retcode = ERR_MD_IDX_INVALID;
1259                 goto fail;
1260         }
1261
1262         rcu_read_lock();
1263         nc = rcu_dereference(mdev->tconn->net_conf);
1264         if (nc) {
1265                 if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1266                         rcu_read_unlock();
1267                         retcode = ERR_STONITH_AND_PROT_A;
1268                         goto fail;
1269                 }
1270         }
1271         rcu_read_unlock();
1272
1273         bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1274                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1275         if (IS_ERR(bdev)) {
1276                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1277                         PTR_ERR(bdev));
1278                 retcode = ERR_OPEN_DISK;
1279                 goto fail;
1280         }
1281         nbc->backing_bdev = bdev;
1282
1283         /*
1284          * meta_dev_idx >= 0: external fixed size, possibly multiple
1285          * drbd sharing one meta device.  TODO in that case, paranoia
1286          * check that [md_bdev, meta_dev_idx] is not yet used by some
1287          * other drbd minor!  (if you use drbd.conf + drbdadm, that
1288          * should check it for you already; but if you don't, or
1289          * someone fooled it, we need to double check here)
1290          */
1291         bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1292                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1293                                   ((int)nbc->dc.meta_dev_idx < 0) ?
1294                                   (void *)mdev : (void *)drbd_m_holder);
1295         if (IS_ERR(bdev)) {
1296                 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1297                         PTR_ERR(bdev));
1298                 retcode = ERR_OPEN_MD_DISK;
1299                 goto fail;
1300         }
1301         nbc->md_bdev = bdev;
1302
1303         if ((nbc->backing_bdev == nbc->md_bdev) !=
1304             (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1305              nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1306                 retcode = ERR_MD_IDX_INVALID;
1307                 goto fail;
1308         }
1309
1310         resync_lru = lc_create("resync", drbd_bm_ext_cache,
1311                         1, 61, sizeof(struct bm_extent),
1312                         offsetof(struct bm_extent, lce));
1313         if (!resync_lru) {
1314                 retcode = ERR_NOMEM;
1315                 goto fail;
1316         }
1317
1318         /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1319         drbd_md_set_sector_offsets(mdev, nbc);
1320
1321         if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1322                 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1323                         (unsigned long long) drbd_get_max_capacity(nbc),
1324                         (unsigned long long) nbc->dc.disk_size);
1325                 retcode = ERR_DISK_TO_SMALL;
1326                 goto fail;
1327         }
1328
1329         if ((int)nbc->dc.meta_dev_idx < 0) {
1330                 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1331                 /* at least one MB, otherwise it does not make sense */
1332                 min_md_device_sectors = (2<<10);
1333         } else {
1334                 max_possible_sectors = DRBD_MAX_SECTORS;
1335                 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1336         }
1337
1338         if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1339                 retcode = ERR_MD_DISK_TO_SMALL;
1340                 dev_warn(DEV, "refusing attach: md-device too small, "
1341                      "at least %llu sectors needed for this meta-disk type\n",
1342                      (unsigned long long) min_md_device_sectors);
1343                 goto fail;
1344         }
1345
1346         /* Make sure the new disk is big enough
1347          * (we may currently be R_PRIMARY with no local disk...) */
1348         if (drbd_get_max_capacity(nbc) <
1349             drbd_get_capacity(mdev->this_bdev)) {
1350                 retcode = ERR_DISK_TO_SMALL;
1351                 goto fail;
1352         }
1353
1354         nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1355
1356         if (nbc->known_size > max_possible_sectors) {
1357                 dev_warn(DEV, "==> truncating very big lower level device "
1358                         "to currently maximum possible %llu sectors <==\n",
1359                         (unsigned long long) max_possible_sectors);
1360                 if ((int)nbc->dc.meta_dev_idx >= 0)
1361                         dev_warn(DEV, "==>> using internal or flexible "
1362                                       "meta data may help <<==\n");
1363         }
1364
1365         drbd_suspend_io(mdev);
1366         /* also wait for the last barrier ack. */
1367         wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1368         /* and for any other previously queued work */
1369         drbd_flush_workqueue(mdev);
1370
1371         rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1372         retcode = rv;  /* FIXME: Type mismatch. */
1373         drbd_resume_io(mdev);
1374         if (rv < SS_SUCCESS)
1375                 goto fail;
1376
1377         if (!get_ldev_if_state(mdev, D_ATTACHING))
1378                 goto force_diskless;
1379
1380         drbd_md_set_sector_offsets(mdev, nbc);
1381
1382         if (!mdev->bitmap) {
1383                 if (drbd_bm_init(mdev)) {
1384                         retcode = ERR_NOMEM;
1385                         goto force_diskless_dec;
1386                 }
1387         }
1388
1389         retcode = drbd_md_read(mdev, nbc);
1390         if (retcode != NO_ERROR)
1391                 goto force_diskless_dec;
1392
1393         if (mdev->state.conn < C_CONNECTED &&
1394             mdev->state.role == R_PRIMARY &&
1395             (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1396                 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1397                     (unsigned long long)mdev->ed_uuid);
1398                 retcode = ERR_DATA_NOT_CURRENT;
1399                 goto force_diskless_dec;
1400         }
1401
1402         /* Since we are diskless, fix the activity log first... */
1403         if (drbd_check_al_size(mdev, &nbc->dc)) {
1404                 retcode = ERR_NOMEM;
1405                 goto force_diskless_dec;
1406         }
1407
1408         /* Prevent shrinking of consistent devices ! */
1409         if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1410             drbd_new_dev_size(mdev, nbc, nbc->dc.disk_size, 0) < nbc->md.la_size_sect) {
1411                 dev_warn(DEV, "refusing to truncate a consistent device\n");
1412                 retcode = ERR_DISK_TO_SMALL;
1413                 goto force_diskless_dec;
1414         }
1415
1416         if (!drbd_al_read_log(mdev, nbc)) {
1417                 retcode = ERR_IO_MD_DISK;
1418                 goto force_diskless_dec;
1419         }
1420
1421         /* Reset the "barriers don't work" bits here, then force meta data to
1422          * be written, to ensure we determine if barriers are supported. */
1423         if (nbc->dc.no_md_flush)
1424                 set_bit(MD_NO_FUA, &mdev->flags);
1425         else
1426                 clear_bit(MD_NO_FUA, &mdev->flags);
1427
1428         /* Point of no return reached.
1429          * Devices and memory are no longer released by error cleanup below.
1430          * now mdev takes over responsibility, and the state engine should
1431          * clean it up somewhere.  */
1432         D_ASSERT(mdev->ldev == NULL);
1433         mdev->ldev = nbc;
1434         mdev->resync = resync_lru;
1435         nbc = NULL;
1436         resync_lru = NULL;
1437
1438         mdev->write_ordering = WO_bdev_flush;
1439         drbd_bump_write_ordering(mdev, WO_bdev_flush);
1440
1441         if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1442                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1443         else
1444                 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1445
1446         if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1447             !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1448                 set_bit(CRASHED_PRIMARY, &mdev->flags);
1449                 cp_discovered = 1;
1450         }
1451
1452         mdev->send_cnt = 0;
1453         mdev->recv_cnt = 0;
1454         mdev->read_cnt = 0;
1455         mdev->writ_cnt = 0;
1456
1457         drbd_reconsider_max_bio_size(mdev);
1458
1459         /* If I am currently not R_PRIMARY,
1460          * but meta data primary indicator is set,
1461          * I just now recover from a hard crash,
1462          * and have been R_PRIMARY before that crash.
1463          *
1464          * Now, if I had no connection before that crash
1465          * (have been degraded R_PRIMARY), chances are that
1466          * I won't find my peer now either.
1467          *
1468          * In that case, and _only_ in that case,
1469          * we use the degr-wfc-timeout instead of the default,
1470          * so we can automatically recover from a crash of a
1471          * degraded but active "cluster" after a certain timeout.
1472          */
1473         clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1474         if (mdev->state.role != R_PRIMARY &&
1475              drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1476             !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1477                 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1478
1479         dd = drbd_determine_dev_size(mdev, 0);
1480         if (dd == dev_size_error) {
1481                 retcode = ERR_NOMEM_BITMAP;
1482                 goto force_diskless_dec;
1483         } else if (dd == grew)
1484                 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1485
1486         if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1487                 dev_info(DEV, "Assuming that all blocks are out of sync "
1488                      "(aka FullSync)\n");
1489                 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1490                         "set_n_write from attaching", BM_LOCKED_MASK)) {
1491                         retcode = ERR_IO_MD_DISK;
1492                         goto force_diskless_dec;
1493                 }
1494         } else {
1495                 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1496                         "read from attaching", BM_LOCKED_MASK)) {
1497                         retcode = ERR_IO_MD_DISK;
1498                         goto force_diskless_dec;
1499                 }
1500         }
1501
1502         if (cp_discovered) {
1503                 drbd_al_apply_to_bm(mdev);
1504                 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1505                         "crashed primary apply AL", BM_LOCKED_MASK)) {
1506                         retcode = ERR_IO_MD_DISK;
1507                         goto force_diskless_dec;
1508                 }
1509         }
1510
1511         if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1512                 drbd_suspend_al(mdev); /* IO is still suspended here... */
1513
1514         spin_lock_irq(&mdev->tconn->req_lock);
1515         os = drbd_read_state(mdev);
1516         ns = os;
1517         /* If MDF_CONSISTENT is not set go into inconsistent state,
1518            otherwise investigate MDF_WasUpToDate...
1519            If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1520            otherwise into D_CONSISTENT state.
1521         */
1522         if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1523                 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1524                         ns.disk = D_CONSISTENT;
1525                 else
1526                         ns.disk = D_OUTDATED;
1527         } else {
1528                 ns.disk = D_INCONSISTENT;
1529         }
1530
1531         if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1532                 ns.pdsk = D_OUTDATED;
1533
1534         if ( ns.disk == D_CONSISTENT &&
1535             (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1536                 ns.disk = D_UP_TO_DATE;
1537
1538         /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1539            MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1540            this point, because drbd_request_state() modifies these
1541            flags. */
1542
1543         /* In case we are C_CONNECTED postpone any decision on the new disk
1544            state after the negotiation phase. */
1545         if (mdev->state.conn == C_CONNECTED) {
1546                 mdev->new_state_tmp.i = ns.i;
1547                 ns.i = os.i;
1548                 ns.disk = D_NEGOTIATING;
1549
1550                 /* We expect to receive up-to-date UUIDs soon.
1551                    To avoid a race in receive_state, free p_uuid while
1552                    holding req_lock. I.e. atomic with the state change */
1553                 kfree(mdev->p_uuid);
1554                 mdev->p_uuid = NULL;
1555         }
1556
1557         rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1558         spin_unlock_irq(&mdev->tconn->req_lock);
1559
1560         if (rv < SS_SUCCESS)
1561                 goto force_diskless_dec;
1562
1563         if (mdev->state.role == R_PRIMARY)
1564                 mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
1565         else
1566                 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1567
1568         drbd_md_mark_dirty(mdev);
1569         drbd_md_sync(mdev);
1570
1571         kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1572         put_ldev(mdev);
1573         conn_reconfig_done(mdev->tconn);
1574         drbd_adm_finish(info, retcode);
1575         return 0;
1576
1577  force_diskless_dec:
1578         put_ldev(mdev);
1579  force_diskless:
1580         drbd_force_state(mdev, NS(disk, D_FAILED));
1581         drbd_md_sync(mdev);
1582  fail:
1583         conn_reconfig_done(mdev->tconn);
1584         if (nbc) {
1585                 if (nbc->backing_bdev)
1586                         blkdev_put(nbc->backing_bdev,
1587                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1588                 if (nbc->md_bdev)
1589                         blkdev_put(nbc->md_bdev,
1590                                    FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1591                 kfree(nbc);
1592         }
1593         lc_destroy(resync_lru);
1594
1595  finish:
1596         drbd_adm_finish(info, retcode);
1597         return 0;
1598 }
1599
1600 static int adm_detach(struct drbd_conf *mdev)
1601 {
1602         enum drbd_state_rv retcode;
1603         int ret;
1604         drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1605         retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1606         /* D_FAILED will transition to DISKLESS. */
1607         ret = wait_event_interruptible(mdev->misc_wait,
1608                         mdev->state.disk != D_FAILED);
1609         drbd_resume_io(mdev);
1610         if ((int)retcode == (int)SS_IS_DISKLESS)
1611                 retcode = SS_NOTHING_TO_DO;
1612         if (ret)
1613                 retcode = ERR_INTR;
1614         return retcode;
1615 }
1616
1617 /* Detaching the disk is a process in multiple stages.  First we need to lock
1618  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1619  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1620  * internal references as well.
1621  * Only then we have finally detached. */
1622 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1623 {
1624         enum drbd_ret_code retcode;
1625
1626         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1627         if (!adm_ctx.reply_skb)
1628                 return retcode;
1629         if (retcode != NO_ERROR)
1630                 goto out;
1631
1632         retcode = adm_detach(adm_ctx.mdev);
1633 out:
1634         drbd_adm_finish(info, retcode);
1635         return 0;
1636 }
1637
1638 static bool conn_resync_running(struct drbd_tconn *tconn)
1639 {
1640         struct drbd_conf *mdev;
1641         bool rv = false;
1642         int vnr;
1643
1644         rcu_read_lock();
1645         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1646                 if (mdev->state.conn == C_SYNC_SOURCE ||
1647                     mdev->state.conn == C_SYNC_TARGET ||
1648                     mdev->state.conn == C_PAUSED_SYNC_S ||
1649                     mdev->state.conn == C_PAUSED_SYNC_T) {
1650                         rv = true;
1651                         break;
1652                 }
1653         }
1654         rcu_read_unlock();
1655
1656         return rv;
1657 }
1658
1659 static bool conn_ov_running(struct drbd_tconn *tconn)
1660 {
1661         struct drbd_conf *mdev;
1662         bool rv = false;
1663         int vnr;
1664
1665         rcu_read_lock();
1666         idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1667                 if (mdev->state.conn == C_VERIFY_S ||
1668                     mdev->state.conn == C_VERIFY_T) {
1669                         rv = true;
1670                         break;
1671                 }
1672         }
1673         rcu_read_unlock();
1674
1675         return rv;
1676 }
1677
1678 static enum drbd_ret_code
1679 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1680 {
1681         struct drbd_conf *mdev;
1682         int i;
1683
1684         if (old_conf && tconn->agreed_pro_version < 100 &&
1685             tconn->cstate == C_WF_REPORT_PARAMS &&
1686             new_conf->wire_protocol != old_conf->wire_protocol)
1687                 return ERR_NEED_APV_100;
1688
1689         if (new_conf->two_primaries &&
1690             (new_conf->wire_protocol != DRBD_PROT_C))
1691                 return ERR_NOT_PROTO_C;
1692
1693         idr_for_each_entry(&tconn->volumes, mdev, i) {
1694                 if (get_ldev(mdev)) {
1695                         enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1696                         put_ldev(mdev);
1697                         if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1698                                 return ERR_STONITH_AND_PROT_A;
1699                 }
1700                 if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
1701                         return ERR_DISCARD;
1702         }
1703
1704         if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1705                 return ERR_CONG_NOT_PROTO_A;
1706
1707         return NO_ERROR;
1708 }
1709
1710 static enum drbd_ret_code
1711 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1712 {
1713         static enum drbd_ret_code rv;
1714         struct drbd_conf *mdev;
1715         int i;
1716
1717         rcu_read_lock();
1718         rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1719         rcu_read_unlock();
1720
1721         /* tconn->volumes protected by genl_lock() here */
1722         idr_for_each_entry(&tconn->volumes, mdev, i) {
1723                 if (!mdev->bitmap) {
1724                         if(drbd_bm_init(mdev))
1725                                 return ERR_NOMEM;
1726                 }
1727         }
1728
1729         return rv;
1730 }
1731
1732 struct crypto {
1733         struct crypto_hash *verify_tfm;
1734         struct crypto_hash *csums_tfm;
1735         struct crypto_hash *cram_hmac_tfm;
1736         struct crypto_hash *integrity_tfm;
1737         void *int_dig_in;
1738         void *int_dig_vv;
1739 };
1740
1741 static int
1742 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1743 {
1744         if (!tfm_name[0])
1745                 return NO_ERROR;
1746
1747         *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1748         if (IS_ERR(*tfm)) {
1749                 *tfm = NULL;
1750                 return err_alg;
1751         }
1752
1753         return NO_ERROR;
1754 }
1755
1756 static enum drbd_ret_code
1757 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1758 {
1759         char hmac_name[CRYPTO_MAX_ALG_NAME];
1760         enum drbd_ret_code rv;
1761         int hash_size;
1762
1763         rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1764                        ERR_CSUMS_ALG);
1765         if (rv != NO_ERROR)
1766                 return rv;
1767         rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1768                        ERR_VERIFY_ALG);
1769         if (rv != NO_ERROR)
1770                 return rv;
1771         rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1772                        ERR_INTEGRITY_ALG);
1773         if (rv != NO_ERROR)
1774                 return rv;
1775         if (new_conf->cram_hmac_alg[0] != 0) {
1776                 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1777                          new_conf->cram_hmac_alg);
1778
1779                 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1780                                ERR_AUTH_ALG);
1781         }
1782         if (crypto->integrity_tfm) {
1783                 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1784                 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1785                 if (!crypto->int_dig_in)
1786                         return ERR_NOMEM;
1787                 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1788                 if (!crypto->int_dig_vv)
1789                         return ERR_NOMEM;
1790         }
1791
1792         return rv;
1793 }
1794
1795 static void free_crypto(struct crypto *crypto)
1796 {
1797         kfree(crypto->int_dig_in);
1798         kfree(crypto->int_dig_vv);
1799         crypto_free_hash(crypto->cram_hmac_tfm);
1800         crypto_free_hash(crypto->integrity_tfm);
1801         crypto_free_hash(crypto->csums_tfm);
1802         crypto_free_hash(crypto->verify_tfm);
1803 }
1804
1805 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1806 {
1807         enum drbd_ret_code retcode;
1808         struct drbd_tconn *tconn;
1809         struct net_conf *old_conf, *new_conf = NULL;
1810         int err;
1811         int ovr; /* online verify running */
1812         int rsr; /* re-sync running */
1813         struct crypto crypto = { };
1814         bool change_integrity_alg;
1815
1816         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1817         if (!adm_ctx.reply_skb)
1818                 return retcode;
1819         if (retcode != NO_ERROR)
1820                 goto out;
1821
1822         tconn = adm_ctx.tconn;
1823
1824         new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1825         if (!new_conf) {
1826                 retcode = ERR_NOMEM;
1827                 goto out;
1828         }
1829
1830         conn_reconfig_start(tconn);
1831
1832         mutex_lock(&tconn->data.mutex);
1833         mutex_lock(&tconn->conf_update);
1834         old_conf = tconn->net_conf;
1835
1836         if (!old_conf) {
1837                 drbd_msg_put_info("net conf missing, try connect");
1838                 retcode = ERR_INVALID_REQUEST;
1839                 goto fail;
1840         }
1841
1842         *new_conf = *old_conf;
1843         if (should_set_defaults(info))
1844                 set_net_conf_defaults(new_conf);
1845
1846         err = net_conf_from_attrs_for_change(new_conf, info);
1847         if (err) {
1848                 retcode = ERR_MANDATORY_TAG;
1849                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1850                 goto fail;
1851         }
1852
1853         retcode = check_net_options(tconn, new_conf);
1854         if (retcode != NO_ERROR)
1855                 goto fail;
1856
1857         /* re-sync running */
1858         rsr = conn_resync_running(tconn);
1859         if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1860                 retcode = ERR_CSUMS_RESYNC_RUNNING;
1861                 goto fail;
1862         }
1863
1864         /* online verify running */
1865         ovr = conn_ov_running(tconn);
1866         if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1867                 retcode = ERR_VERIFY_RUNNING;
1868                 goto fail;
1869         }
1870
1871         change_integrity_alg = strcmp(old_conf->integrity_alg,
1872                                       new_conf->integrity_alg);
1873
1874         retcode = alloc_crypto(&crypto, new_conf);
1875         if (retcode != NO_ERROR)
1876                 goto fail;
1877
1878         rcu_assign_pointer(tconn->net_conf, new_conf);
1879
1880         if (!rsr) {
1881                 crypto_free_hash(tconn->csums_tfm);
1882                 tconn->csums_tfm = crypto.csums_tfm;
1883                 crypto.csums_tfm = NULL;
1884         }
1885         if (!ovr) {
1886                 crypto_free_hash(tconn->verify_tfm);
1887                 tconn->verify_tfm = crypto.verify_tfm;
1888                 crypto.verify_tfm = NULL;
1889         }
1890
1891         kfree(tconn->int_dig_in);
1892         tconn->int_dig_in = crypto.int_dig_in;
1893         kfree(tconn->int_dig_vv);
1894         tconn->int_dig_vv = crypto.int_dig_vv;
1895         crypto_free_hash(tconn->integrity_tfm);
1896         tconn->integrity_tfm = crypto.integrity_tfm;
1897         if (change_integrity_alg) {
1898                 /* Do this without trying to take tconn->data.mutex again.  */
1899                 if (__drbd_send_protocol(tconn))
1900                         goto fail;
1901         }
1902
1903         /* FIXME Changing cram_hmac while the connection is established is useless */
1904         crypto_free_hash(tconn->cram_hmac_tfm);
1905         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1906
1907         mutex_unlock(&tconn->conf_update);
1908         mutex_unlock(&tconn->data.mutex);
1909         synchronize_rcu();
1910         kfree(old_conf);
1911
1912         if (tconn->cstate >= C_WF_REPORT_PARAMS)
1913                 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1914
1915         goto done;
1916
1917  fail:
1918         mutex_unlock(&tconn->conf_update);
1919         mutex_unlock(&tconn->data.mutex);
1920         free_crypto(&crypto);
1921         kfree(new_conf);
1922  done:
1923         conn_reconfig_done(tconn);
1924  out:
1925         drbd_adm_finish(info, retcode);
1926         return 0;
1927 }
1928
1929 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1930 {
1931         struct drbd_conf *mdev;
1932         struct net_conf *old_conf, *new_conf = NULL;
1933         struct crypto crypto = { };
1934         struct drbd_tconn *oconn;
1935         struct drbd_tconn *tconn;
1936         struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1937         enum drbd_ret_code retcode;
1938         int i;
1939         int err;
1940
1941         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1942         if (!adm_ctx.reply_skb)
1943                 return retcode;
1944         if (retcode != NO_ERROR)
1945                 goto out;
1946
1947         tconn = adm_ctx.tconn;
1948         conn_reconfig_start(tconn);
1949
1950         if (tconn->cstate > C_STANDALONE) {
1951                 retcode = ERR_NET_CONFIGURED;
1952                 goto fail;
1953         }
1954
1955         /* allocation not in the IO path, cqueue thread context */
1956         new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
1957         if (!new_conf) {
1958                 retcode = ERR_NOMEM;
1959                 goto fail;
1960         }
1961
1962         set_net_conf_defaults(new_conf);
1963
1964         err = net_conf_from_attrs(new_conf, info);
1965         if (err) {
1966                 retcode = ERR_MANDATORY_TAG;
1967                 drbd_msg_put_info(from_attrs_err_to_txt(err));
1968                 goto fail;
1969         }
1970
1971         retcode = check_net_options(tconn, new_conf);
1972         if (retcode != NO_ERROR)
1973                 goto fail;
1974
1975         retcode = NO_ERROR;
1976
1977         new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1978         new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1979
1980         /* No need to take drbd_cfg_rwsem here.  All reconfiguration is
1981          * strictly serialized on genl_lock(). We are protected against
1982          * concurrent reconfiguration/addition/deletion */
1983         list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1984                 struct net_conf *nc;
1985                 if (oconn == tconn)
1986                         continue;
1987
1988                 rcu_read_lock();
1989                 nc = rcu_dereference(oconn->net_conf);
1990                 if (nc) {
1991                         taken_addr = (struct sockaddr *)&nc->my_addr;
1992                         if (new_conf->my_addr_len == nc->my_addr_len &&
1993                             !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1994                                 retcode = ERR_LOCAL_ADDR;
1995
1996                         taken_addr = (struct sockaddr *)&nc->peer_addr;
1997                         if (new_conf->peer_addr_len == nc->peer_addr_len &&
1998                             !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1999                                 retcode = ERR_PEER_ADDR;
2000                 }
2001                 rcu_read_unlock();
2002                 if (retcode != NO_ERROR)
2003                         goto fail;
2004         }
2005
2006         retcode = alloc_crypto(&crypto, new_conf);
2007         if (retcode != NO_ERROR)
2008                 goto fail;
2009
2010         ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2011
2012         conn_flush_workqueue(tconn);
2013
2014         mutex_lock(&tconn->conf_update);
2015         old_conf = tconn->net_conf;
2016         if (old_conf) {
2017                 retcode = ERR_NET_CONFIGURED;
2018                 mutex_unlock(&tconn->conf_update);
2019                 goto fail;
2020         }
2021         rcu_assign_pointer(tconn->net_conf, new_conf);
2022
2023         conn_free_crypto(tconn);
2024         tconn->int_dig_in = crypto.int_dig_in;
2025         tconn->int_dig_vv = crypto.int_dig_vv;
2026         tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2027         tconn->integrity_tfm = crypto.integrity_tfm;
2028         tconn->csums_tfm = crypto.csums_tfm;
2029         tconn->verify_tfm = crypto.verify_tfm;
2030
2031         mutex_unlock(&tconn->conf_update);
2032
2033         rcu_read_lock();
2034         idr_for_each_entry(&tconn->volumes, mdev, i) {
2035                 mdev->send_cnt = 0;
2036                 mdev->recv_cnt = 0;
2037         }
2038         rcu_read_unlock();
2039
2040         retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2041
2042         conn_reconfig_done(tconn);
2043         drbd_adm_finish(info, retcode);
2044         return 0;
2045
2046 fail:
2047         free_crypto(&crypto);
2048         kfree(new_conf);
2049
2050         conn_reconfig_done(tconn);
2051 out:
2052         drbd_adm_finish(info, retcode);
2053         return 0;
2054 }
2055
2056 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2057 {
2058         enum drbd_state_rv rv;
2059
2060         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2061                         force ? CS_HARD : 0);
2062
2063         switch (rv) {
2064         case SS_NOTHING_TO_DO:
2065                 break;
2066         case SS_ALREADY_STANDALONE:
2067                 return SS_SUCCESS;
2068         case SS_PRIMARY_NOP:
2069                 /* Our state checking code wants to see the peer outdated. */
2070                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2071                                                 pdsk, D_OUTDATED), CS_VERBOSE);
2072                 break;
2073         case SS_CW_FAILED_BY_PEER:
2074                 /* The peer probably wants to see us outdated. */
2075                 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2076                                                         disk, D_OUTDATED), 0);
2077                 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2078                         rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2079                                         CS_HARD);
2080                 }
2081                 break;
2082         default:;
2083                 /* no special handling necessary */
2084         }
2085
2086         if (rv >= SS_SUCCESS) {
2087                 enum drbd_state_rv rv2;
2088                 /* No one else can reconfigure the network while I am here.
2089                  * The state handling only uses drbd_thread_stop_nowait(),
2090                  * we want to really wait here until the receiver is no more.
2091                  */
2092                 drbd_thread_stop(&adm_ctx.tconn->receiver);
2093
2094                 /* Race breaker.  This additional state change request may be
2095                  * necessary, if this was a forced disconnect during a receiver
2096                  * restart.  We may have "killed" the receiver thread just
2097                  * after drbdd_init() returned.  Typically, we should be
2098                  * C_STANDALONE already, now, and this becomes a no-op.
2099                  */
2100                 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2101                                 CS_VERBOSE | CS_HARD);
2102                 if (rv2 < SS_SUCCESS)
2103                         conn_err(tconn,
2104                                 "unexpected rv2=%d in conn_try_disconnect()\n",
2105                                 rv2);
2106         }
2107         return rv;
2108 }
2109
2110 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2111 {
2112         struct disconnect_parms parms;
2113         struct drbd_tconn *tconn;
2114         enum drbd_state_rv rv;
2115         enum drbd_ret_code retcode;
2116         int err;
2117
2118         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2119         if (!adm_ctx.reply_skb)
2120                 return retcode;
2121         if (retcode != NO_ERROR)
2122                 goto fail;
2123
2124         tconn = adm_ctx.tconn;
2125         memset(&parms, 0, sizeof(parms));
2126         if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2127                 err = disconnect_parms_from_attrs(&parms, info);
2128                 if (err) {
2129                         retcode = ERR_MANDATORY_TAG;
2130                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2131                         goto fail;
2132                 }
2133         }
2134
2135         rv = conn_try_disconnect(tconn, parms.force_disconnect);
2136         if (rv < SS_SUCCESS)
2137                 retcode = rv;  /* FIXME: Type mismatch. */
2138         else
2139                 retcode = NO_ERROR;
2140  fail:
2141         drbd_adm_finish(info, retcode);
2142         return 0;
2143 }
2144
2145 void resync_after_online_grow(struct drbd_conf *mdev)
2146 {
2147         int iass; /* I am sync source */
2148
2149         dev_info(DEV, "Resync of new storage after online grow\n");
2150         if (mdev->state.role != mdev->state.peer)
2151                 iass = (mdev->state.role == R_PRIMARY);
2152         else
2153                 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2154
2155         if (iass)
2156                 drbd_start_resync(mdev, C_SYNC_SOURCE);
2157         else
2158                 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2159 }
2160
2161 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2162 {
2163         struct resize_parms rs;
2164         struct drbd_conf *mdev;
2165         enum drbd_ret_code retcode;
2166         enum determine_dev_size dd;
2167         enum dds_flags ddsf;
2168         int err;
2169
2170         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2171         if (!adm_ctx.reply_skb)
2172                 return retcode;
2173         if (retcode != NO_ERROR)
2174                 goto fail;
2175
2176         memset(&rs, 0, sizeof(struct resize_parms));
2177         if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2178                 err = resize_parms_from_attrs(&rs, info);
2179                 if (err) {
2180                         retcode = ERR_MANDATORY_TAG;
2181                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2182                         goto fail;
2183                 }
2184         }
2185
2186         mdev = adm_ctx.mdev;
2187         if (mdev->state.conn > C_CONNECTED) {
2188                 retcode = ERR_RESIZE_RESYNC;
2189                 goto fail;
2190         }
2191
2192         if (mdev->state.role == R_SECONDARY &&
2193             mdev->state.peer == R_SECONDARY) {
2194                 retcode = ERR_NO_PRIMARY;
2195                 goto fail;
2196         }
2197
2198         if (!get_ldev(mdev)) {
2199                 retcode = ERR_NO_DISK;
2200                 goto fail;
2201         }
2202
2203         if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2204                 retcode = ERR_NEED_APV_93;
2205                 goto fail;
2206         }
2207
2208         if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2209                 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2210
2211         mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2212         ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2213         dd = drbd_determine_dev_size(mdev, ddsf);
2214         drbd_md_sync(mdev);
2215         put_ldev(mdev);
2216         if (dd == dev_size_error) {
2217                 retcode = ERR_NOMEM_BITMAP;
2218                 goto fail;
2219         }
2220
2221         if (mdev->state.conn == C_CONNECTED) {
2222                 if (dd == grew)
2223                         set_bit(RESIZE_PENDING, &mdev->flags);
2224
2225                 drbd_send_uuids(mdev);
2226                 drbd_send_sizes(mdev, 1, ddsf);
2227         }
2228
2229  fail:
2230         drbd_adm_finish(info, retcode);
2231         return 0;
2232 }
2233
2234 void drbd_set_res_opts_defaults(struct res_opts *r)
2235 {
2236         return set_res_opts_defaults(r);
2237 }
2238
2239 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2240 {
2241         enum drbd_ret_code retcode;
2242         cpumask_var_t new_cpu_mask;
2243         struct drbd_tconn *tconn;
2244         struct res_opts res_opts;
2245         int err;
2246
2247         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2248         if (!adm_ctx.reply_skb)
2249                 return retcode;
2250         if (retcode != NO_ERROR)
2251                 goto fail;
2252         tconn = adm_ctx.tconn;
2253
2254         if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2255                 retcode = ERR_NOMEM;
2256                 drbd_msg_put_info("unable to allocate cpumask");
2257                 goto fail;
2258         }
2259
2260         res_opts = tconn->res_opts;
2261         if (should_set_defaults(info))
2262                 set_res_opts_defaults(&res_opts);
2263
2264         err = res_opts_from_attrs(&res_opts, info);
2265         if (err) {
2266                 retcode = ERR_MANDATORY_TAG;
2267                 drbd_msg_put_info(from_attrs_err_to_txt(err));
2268                 goto fail;
2269         }
2270
2271         /* silently ignore cpu mask on UP kernel */
2272         if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2273                 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2274                                 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2275                 if (err) {
2276                         conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2277                         retcode = ERR_CPU_MASK_PARSE;
2278                         goto fail;
2279                 }
2280         }
2281
2282
2283         tconn->res_opts = res_opts;
2284
2285         if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2286                 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2287                 drbd_calc_cpu_mask(tconn);
2288                 tconn->receiver.reset_cpu_mask = 1;
2289                 tconn->asender.reset_cpu_mask = 1;
2290                 tconn->worker.reset_cpu_mask = 1;
2291         }
2292
2293 fail:
2294         free_cpumask_var(new_cpu_mask);
2295
2296         drbd_adm_finish(info, retcode);
2297         return 0;
2298 }
2299
2300 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2301 {
2302         struct drbd_conf *mdev;
2303         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2304
2305         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2306         if (!adm_ctx.reply_skb)
2307                 return retcode;
2308         if (retcode != NO_ERROR)
2309                 goto out;
2310
2311         mdev = adm_ctx.mdev;
2312
2313         /* If there is still bitmap IO pending, probably because of a previous
2314          * resync just being finished, wait for it before requesting a new resync. */
2315         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2316
2317         retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2318
2319         if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2320                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2321
2322         while (retcode == SS_NEED_CONNECTION) {
2323                 spin_lock_irq(&mdev->tconn->req_lock);
2324                 if (mdev->state.conn < C_CONNECTED)
2325                         retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2326                 spin_unlock_irq(&mdev->tconn->req_lock);
2327
2328                 if (retcode != SS_NEED_CONNECTION)
2329                         break;
2330
2331                 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2332         }
2333
2334 out:
2335         drbd_adm_finish(info, retcode);
2336         return 0;
2337 }
2338
2339 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2340 {
2341         int rv;
2342
2343         rv = drbd_bmio_set_n_write(mdev);
2344         drbd_suspend_al(mdev);
2345         return rv;
2346 }
2347
2348 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2349                 union drbd_state mask, union drbd_state val)
2350 {
2351         enum drbd_ret_code retcode;
2352
2353         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2354         if (!adm_ctx.reply_skb)
2355                 return retcode;
2356         if (retcode != NO_ERROR)
2357                 goto out;
2358
2359         retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2360 out:
2361         drbd_adm_finish(info, retcode);
2362         return 0;
2363 }
2364
2365 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2366 {
2367         return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2368 }
2369
2370 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2371 {
2372         enum drbd_ret_code retcode;
2373
2374         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2375         if (!adm_ctx.reply_skb)
2376                 return retcode;
2377         if (retcode != NO_ERROR)
2378                 goto out;
2379
2380         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2381                 retcode = ERR_PAUSE_IS_SET;
2382 out:
2383         drbd_adm_finish(info, retcode);
2384         return 0;
2385 }
2386
2387 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2388 {
2389         union drbd_dev_state s;
2390         enum drbd_ret_code retcode;
2391
2392         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2393         if (!adm_ctx.reply_skb)
2394                 return retcode;
2395         if (retcode != NO_ERROR)
2396                 goto out;
2397
2398         if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2399                 s = adm_ctx.mdev->state;
2400                 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2401                         retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2402                                   s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2403                 } else {
2404                         retcode = ERR_PAUSE_IS_CLEAR;
2405                 }
2406         }
2407
2408 out:
2409         drbd_adm_finish(info, retcode);
2410         return 0;
2411 }
2412
2413 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2414 {
2415         return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2416 }
2417
2418 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2419 {
2420         struct drbd_conf *mdev;
2421         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2422
2423         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2424         if (!adm_ctx.reply_skb)
2425                 return retcode;
2426         if (retcode != NO_ERROR)
2427                 goto out;
2428
2429         mdev = adm_ctx.mdev;
2430         if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2431                 drbd_uuid_new_current(mdev);
2432                 clear_bit(NEW_CUR_UUID, &mdev->flags);
2433         }
2434         drbd_suspend_io(mdev);
2435         retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2436         if (retcode == SS_SUCCESS) {
2437                 if (mdev->state.conn < C_CONNECTED)
2438                         tl_clear(mdev->tconn);
2439                 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2440                         tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2441         }
2442         drbd_resume_io(mdev);
2443
2444 out:
2445         drbd_adm_finish(info, retcode);
2446         return 0;
2447 }
2448
2449 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2450 {
2451         return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2452 }
2453
2454 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2455 {
2456         struct nlattr *nla;
2457         nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2458         if (!nla)
2459                 goto nla_put_failure;
2460         if (vnr != VOLUME_UNSPECIFIED)
2461                 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2462         NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2463         nla_nest_end(skb, nla);
2464         return 0;
2465
2466 nla_put_failure:
2467         if (nla)
2468                 nla_nest_cancel(skb, nla);
2469         return -EMSGSIZE;
2470 }
2471
2472 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2473                 const struct sib_info *sib)
2474 {
2475         struct state_info *si = NULL; /* for sizeof(si->member); */
2476         struct net_conf *nc;
2477         struct nlattr *nla;
2478         int got_ldev;
2479         int err = 0;
2480         int exclude_sensitive;
2481
2482         /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2483          * to.  So we better exclude_sensitive information.
2484          *
2485          * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2486          * in the context of the requesting user process. Exclude sensitive
2487          * information, unless current has superuser.
2488          *
2489          * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2490          * relies on the current implementation of netlink_dump(), which
2491          * executes the dump callback successively from netlink_recvmsg(),
2492          * always in the context of the receiving process */
2493         exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2494
2495         got_ldev = get_ldev(mdev);
2496
2497         /* We need to add connection name and volume number information still.
2498          * Minor number is in drbd_genlmsghdr. */
2499         if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2500                 goto nla_put_failure;
2501
2502         if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2503                 goto nla_put_failure;
2504
2505         if (got_ldev)
2506                 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2507                         goto nla_put_failure;
2508
2509         rcu_read_lock();
2510         nc = rcu_dereference(mdev->tconn->net_conf);
2511         if (nc)
2512                 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2513         rcu_read_unlock();
2514         if (err)
2515                 goto nla_put_failure;
2516
2517         nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2518         if (!nla)
2519                 goto nla_put_failure;
2520         NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2521         NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2522         NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2523         NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2524
2525         if (got_ldev) {
2526                 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2527                 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2528                 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2529                 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2530                 if (C_SYNC_SOURCE <= mdev->state.conn &&
2531                     C_PAUSED_SYNC_T >= mdev->state.conn) {
2532                         NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2533                         NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2534                 }
2535         }
2536
2537         if (sib) {
2538                 switch(sib->sib_reason) {
2539                 case SIB_SYNC_PROGRESS:
2540                 case SIB_GET_STATUS_REPLY:
2541                         break;
2542                 case SIB_STATE_CHANGE:
2543                         NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2544                         NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2545                         break;
2546                 case SIB_HELPER_POST:
2547                         NLA_PUT_U32(skb,
2548                                 T_helper_exit_code, sib->helper_exit_code);
2549                         /* fall through */
2550                 case SIB_HELPER_PRE:
2551                         NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2552                         break;
2553                 }
2554         }
2555         nla_nest_end(skb, nla);
2556
2557         if (0)
2558 nla_put_failure:
2559                 err = -EMSGSIZE;
2560         if (got_ldev)
2561                 put_ldev(mdev);
2562         return err;
2563 }
2564
2565 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2566 {
2567         enum drbd_ret_code retcode;
2568         int err;
2569
2570         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2571         if (!adm_ctx.reply_skb)
2572                 return retcode;
2573         if (retcode != NO_ERROR)
2574                 goto out;
2575
2576         err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2577         if (err) {
2578                 nlmsg_free(adm_ctx.reply_skb);
2579                 return err;
2580         }
2581 out:
2582         drbd_adm_finish(info, retcode);
2583         return 0;
2584 }
2585
2586 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2587 {
2588         struct drbd_conf *mdev;
2589         struct drbd_genlmsghdr *dh;
2590         struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2591         struct drbd_tconn *tconn = NULL;
2592         struct drbd_tconn *tmp;
2593         unsigned volume = cb->args[1];
2594
2595         /* Open coded, deferred, iteration:
2596          * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2597          *      idr_for_each_entry(&tconn->volumes, mdev, i) {
2598          *        ...
2599          *      }
2600          * }
2601          * where tconn is cb->args[0];
2602          * and i is cb->args[1];
2603          *
2604          * cb->args[2] indicates if we shall loop over all resources,
2605          * or just dump all volumes of a single resource.
2606          *
2607          * This may miss entries inserted after this dump started,
2608          * or entries deleted before they are reached.
2609          *
2610          * We need to make sure the mdev won't disappear while
2611          * we are looking at it, and revalidate our iterators
2612          * on each iteration.
2613          */
2614
2615         /* synchronize with conn_create()/conn_destroy() */
2616         down_read(&drbd_cfg_rwsem);
2617         /* revalidate iterator position */
2618         list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2619                 if (pos == NULL) {
2620                         /* first iteration */
2621                         pos = tmp;
2622                         tconn = pos;
2623                         break;
2624                 }
2625                 if (tmp == pos) {
2626                         tconn = pos;
2627                         break;
2628                 }
2629         }
2630         if (tconn) {
2631 next_tconn:
2632                 mdev = idr_get_next(&tconn->volumes, &volume);
2633                 if (!mdev) {
2634                         /* No more volumes to dump on this tconn.
2635                          * Advance tconn iterator. */
2636                         pos = list_entry(tconn->all_tconn.next,
2637                                         struct drbd_tconn, all_tconn);
2638                         /* Did we dump any volume on this tconn yet? */
2639                         if (volume != 0) {
2640                                 /* If we reached the end of the list,
2641                                  * or only a single resource dump was requested,
2642                                  * we are done. */
2643                                 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2644                                         goto out;
2645                                 volume = 0;
2646                                 tconn = pos;
2647                                 goto next_tconn;
2648                         }
2649                 }
2650
2651                 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2652                                 cb->nlh->nlmsg_seq, &drbd_genl_family,
2653                                 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2654                 if (!dh)
2655                         goto out;
2656
2657                 if (!mdev) {
2658                         /* this is a tconn without a single volume */
2659                         dh->minor = -1U;
2660                         dh->ret_code = NO_ERROR;
2661                         if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2662                                 genlmsg_cancel(skb, dh);
2663                         else
2664                                 genlmsg_end(skb, dh);
2665                         goto out;
2666                 }
2667
2668                 D_ASSERT(mdev->vnr == volume);
2669                 D_ASSERT(mdev->tconn == tconn);
2670
2671                 dh->minor = mdev_to_minor(mdev);
2672                 dh->ret_code = NO_ERROR;
2673
2674                 if (nla_put_status_info(skb, mdev, NULL)) {
2675                         genlmsg_cancel(skb, dh);
2676                         goto out;
2677                 }
2678                 genlmsg_end(skb, dh);
2679         }
2680
2681 out:
2682         up_read(&drbd_cfg_rwsem);
2683         /* where to start the next iteration */
2684         cb->args[0] = (long)pos;
2685         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2686
2687         /* No more tconns/volumes/minors found results in an empty skb.
2688          * Which will terminate the dump. */
2689         return skb->len;
2690 }
2691
2692 /*
2693  * Request status of all resources, or of all volumes within a single resource.
2694  *
2695  * This is a dump, as the answer may not fit in a single reply skb otherwise.
2696  * Which means we cannot use the family->attrbuf or other such members, because
2697  * dump is NOT protected by the genl_lock().  During dump, we only have access
2698  * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2699  *
2700  * Once things are setup properly, we call into get_one_status().
2701  */
2702 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2703 {
2704         const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2705         struct nlattr *nla;
2706         const char *conn_name;
2707         struct drbd_tconn *tconn;
2708
2709         /* Is this a followup call? */
2710         if (cb->args[0]) {
2711                 /* ... of a single resource dump,
2712                  * and the resource iterator has been advanced already? */
2713                 if (cb->args[2] && cb->args[2] != cb->args[0])
2714                         return 0; /* DONE. */
2715                 goto dump;
2716         }
2717
2718         /* First call (from netlink_dump_start).  We need to figure out
2719          * which resource(s) the user wants us to dump. */
2720         nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2721                         nlmsg_attrlen(cb->nlh, hdrlen),
2722                         DRBD_NLA_CFG_CONTEXT);
2723
2724         /* No explicit context given.  Dump all. */
2725         if (!nla)
2726                 goto dump;
2727         nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2728         /* context given, but no name present? */
2729         if (!nla)
2730                 return -EINVAL;
2731         conn_name = nla_data(nla);
2732         tconn = conn_get_by_name(conn_name);
2733
2734         if (!tconn)
2735                 return -ENODEV;
2736
2737         kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2738
2739         /* prime iterators, and set "filter" mode mark:
2740          * only dump this tconn. */
2741         cb->args[0] = (long)tconn;
2742         /* cb->args[1] = 0; passed in this way. */
2743         cb->args[2] = (long)tconn;
2744
2745 dump:
2746         return get_one_status(skb, cb);
2747 }
2748
2749 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2750 {
2751         enum drbd_ret_code retcode;
2752         struct timeout_parms tp;
2753         int err;
2754
2755         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2756         if (!adm_ctx.reply_skb)
2757                 return retcode;
2758         if (retcode != NO_ERROR)
2759                 goto out;
2760
2761         tp.timeout_type =
2762                 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2763                 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2764                 UT_DEFAULT;
2765
2766         err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2767         if (err) {
2768                 nlmsg_free(adm_ctx.reply_skb);
2769                 return err;
2770         }
2771 out:
2772         drbd_adm_finish(info, retcode);
2773         return 0;
2774 }
2775
2776 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2777 {
2778         struct drbd_conf *mdev;
2779         enum drbd_ret_code retcode;
2780
2781         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2782         if (!adm_ctx.reply_skb)
2783                 return retcode;
2784         if (retcode != NO_ERROR)
2785                 goto out;
2786
2787         mdev = adm_ctx.mdev;
2788         if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2789                 /* resume from last known position, if possible */
2790                 struct start_ov_parms parms =
2791                         { .ov_start_sector = mdev->ov_start_sector };
2792                 int err = start_ov_parms_from_attrs(&parms, info);
2793                 if (err) {
2794                         retcode = ERR_MANDATORY_TAG;
2795                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2796                         goto out;
2797                 }
2798                 /* w_make_ov_request expects position to be aligned */
2799                 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2800         }
2801         /* If there is still bitmap IO pending, e.g. previous resync or verify
2802          * just being finished, wait for it before requesting a new resync. */
2803         wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2804         retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2805 out:
2806         drbd_adm_finish(info, retcode);
2807         return 0;
2808 }
2809
2810
2811 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2812 {
2813         struct drbd_conf *mdev;
2814         enum drbd_ret_code retcode;
2815         int skip_initial_sync = 0;
2816         int err;
2817         struct new_c_uuid_parms args;
2818
2819         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2820         if (!adm_ctx.reply_skb)
2821                 return retcode;
2822         if (retcode != NO_ERROR)
2823                 goto out_nolock;
2824
2825         mdev = adm_ctx.mdev;
2826         memset(&args, 0, sizeof(args));
2827         if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2828                 err = new_c_uuid_parms_from_attrs(&args, info);
2829                 if (err) {
2830                         retcode = ERR_MANDATORY_TAG;
2831                         drbd_msg_put_info(from_attrs_err_to_txt(err));
2832                         goto out_nolock;
2833                 }
2834         }
2835
2836         mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2837
2838         if (!get_ldev(mdev)) {
2839                 retcode = ERR_NO_DISK;
2840                 goto out;
2841         }
2842
2843         /* this is "skip initial sync", assume to be clean */
2844         if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2845             mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2846                 dev_info(DEV, "Preparing to skip initial sync\n");
2847                 skip_initial_sync = 1;
2848         } else if (mdev->state.conn != C_STANDALONE) {
2849                 retcode = ERR_CONNECTED;
2850                 goto out_dec;
2851         }
2852
2853         drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2854         drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2855
2856         if (args.clear_bm) {
2857                 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2858                         "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2859                 if (err) {
2860                         dev_err(DEV, "Writing bitmap failed with %d\n",err);
2861                         retcode = ERR_IO_MD_DISK;
2862                 }
2863                 if (skip_initial_sync) {
2864                         drbd_send_uuids_skip_initial_sync(mdev);
2865                         _drbd_uuid_set(mdev, UI_BITMAP, 0);
2866                         drbd_print_uuids(mdev, "cleared bitmap UUID");
2867                         spin_lock_irq(&mdev->tconn->req_lock);
2868                         _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2869                                         CS_VERBOSE, NULL);
2870                         spin_unlock_irq(&mdev->tconn->req_lock);
2871                 }
2872         }
2873
2874         drbd_md_sync(mdev);
2875 out_dec:
2876         put_ldev(mdev);
2877 out:
2878         mutex_unlock(mdev->state_mutex);
2879 out_nolock:
2880         drbd_adm_finish(info, retcode);
2881         return 0;
2882 }
2883
2884 static enum drbd_ret_code
2885 drbd_check_conn_name(const char *name)
2886 {
2887         if (!name || !name[0]) {
2888                 drbd_msg_put_info("connection name missing");
2889                 return ERR_MANDATORY_TAG;
2890         }
2891         /* if we want to use these in sysfs/configfs/debugfs some day,
2892          * we must not allow slashes */
2893         if (strchr(name, '/')) {
2894                 drbd_msg_put_info("invalid connection name");
2895                 return ERR_INVALID_REQUEST;
2896         }
2897         return NO_ERROR;
2898 }
2899
2900 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2901 {
2902         enum drbd_ret_code retcode;
2903
2904         retcode = drbd_adm_prepare(skb, info, 0);
2905         if (!adm_ctx.reply_skb)
2906                 return retcode;
2907         if (retcode != NO_ERROR)
2908                 goto out;
2909
2910         retcode = drbd_check_conn_name(adm_ctx.conn_name);
2911         if (retcode != NO_ERROR)
2912                 goto out;
2913
2914         if (adm_ctx.tconn) {
2915                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2916                         retcode = ERR_INVALID_REQUEST;
2917                         drbd_msg_put_info("connection exists");
2918                 }
2919                 /* else: still NO_ERROR */
2920                 goto out;
2921         }
2922
2923         if (!conn_create(adm_ctx.conn_name))
2924                 retcode = ERR_NOMEM;
2925 out:
2926         drbd_adm_finish(info, retcode);
2927         return 0;
2928 }
2929
2930 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2931 {
2932         struct drbd_genlmsghdr *dh = info->userhdr;
2933         enum drbd_ret_code retcode;
2934
2935         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2936         if (!adm_ctx.reply_skb)
2937                 return retcode;
2938         if (retcode != NO_ERROR)
2939                 goto out;
2940
2941         /* FIXME drop minor_count parameter, limit to MINORMASK */
2942         if (dh->minor >= minor_count) {
2943                 drbd_msg_put_info("requested minor out of range");
2944                 retcode = ERR_INVALID_REQUEST;
2945                 goto out;
2946         }
2947         if (adm_ctx.volume > DRBD_VOLUME_MAX) {
2948                 drbd_msg_put_info("requested volume id out of range");
2949                 retcode = ERR_INVALID_REQUEST;
2950                 goto out;
2951         }
2952
2953         /* drbd_adm_prepare made sure already
2954          * that mdev->tconn and mdev->vnr match the request. */
2955         if (adm_ctx.mdev) {
2956                 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2957                         retcode = ERR_MINOR_EXISTS;
2958                 /* else: still NO_ERROR */
2959                 goto out;
2960         }
2961
2962         down_write(&drbd_cfg_rwsem);
2963         retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2964         up_write(&drbd_cfg_rwsem);
2965 out:
2966         drbd_adm_finish(info, retcode);
2967         return 0;
2968 }
2969
2970 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2971 {
2972         if (mdev->state.disk == D_DISKLESS &&
2973             /* no need to be mdev->state.conn == C_STANDALONE &&
2974              * we may want to delete a minor from a live replication group.
2975              */
2976             mdev->state.role == R_SECONDARY) {
2977                 drbd_delete_device(mdev);
2978                 return NO_ERROR;
2979         } else
2980                 return ERR_MINOR_CONFIGURED;
2981 }
2982
2983 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2984 {
2985         enum drbd_ret_code retcode;
2986
2987         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2988         if (!adm_ctx.reply_skb)
2989                 return retcode;
2990         if (retcode != NO_ERROR)
2991                 goto out;
2992
2993         down_write(&drbd_cfg_rwsem);
2994         retcode = adm_delete_minor(adm_ctx.mdev);
2995         up_write(&drbd_cfg_rwsem);
2996 out:
2997         drbd_adm_finish(info, retcode);
2998         return 0;
2999 }
3000
3001 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3002 {
3003         int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3004         struct drbd_conf *mdev;
3005         unsigned i;
3006
3007         retcode = drbd_adm_prepare(skb, info, 0);
3008         if (!adm_ctx.reply_skb)
3009                 return retcode;
3010         if (retcode != NO_ERROR)
3011                 goto out;
3012
3013         if (!adm_ctx.tconn) {
3014                 retcode = ERR_CONN_NOT_KNOWN;
3015                 goto out;
3016         }
3017
3018         down_read(&drbd_cfg_rwsem);
3019         /* demote */
3020         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3021                 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3022                 if (retcode < SS_SUCCESS) {
3023                         drbd_msg_put_info("failed to demote");
3024                         goto out_unlock;
3025                 }
3026         }
3027         up_read(&drbd_cfg_rwsem);
3028
3029         /* disconnect; may stop the receiver;
3030          * must not hold the drbd_cfg_rwsem */
3031         retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3032         if (retcode < SS_SUCCESS) {
3033                 drbd_msg_put_info("failed to disconnect");
3034                 goto out;
3035         }
3036
3037         down_read(&drbd_cfg_rwsem);
3038         /* detach */
3039         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3040                 retcode = adm_detach(mdev);
3041                 if (retcode < SS_SUCCESS) {
3042                         drbd_msg_put_info("failed to detach");
3043                         goto out_unlock;
3044                 }
3045         }
3046         up_read(&drbd_cfg_rwsem);
3047
3048         /* If we reach this, all volumes (of this tconn) are Secondary,
3049          * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3050          * actually stopped, state handling only does drbd_thread_stop_nowait().
3051          * This needs to be done without holding drbd_cfg_rwsem. */
3052         drbd_thread_stop(&adm_ctx.tconn->worker);
3053
3054         /* Now, nothing can fail anymore */
3055
3056         /* delete volumes */
3057         down_write(&drbd_cfg_rwsem);
3058         idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3059                 retcode = adm_delete_minor(mdev);
3060                 if (retcode != NO_ERROR) {
3061                         /* "can not happen" */
3062                         drbd_msg_put_info("failed to delete volume");
3063                         up_write(&drbd_cfg_rwsem);
3064                         goto out;
3065                 }
3066         }
3067
3068         /* delete connection */
3069         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3070                 list_del(&adm_ctx.tconn->all_tconn);
3071                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3072
3073                 retcode = NO_ERROR;
3074         } else {
3075                 /* "can not happen" */
3076                 retcode = ERR_CONN_IN_USE;
3077                 drbd_msg_put_info("failed to delete connection");
3078         }
3079         up_write(&drbd_cfg_rwsem);
3080         goto out;
3081 out_unlock:
3082         up_read(&drbd_cfg_rwsem);
3083 out:
3084         drbd_adm_finish(info, retcode);
3085         return 0;
3086 }
3087
3088 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3089 {
3090         enum drbd_ret_code retcode;
3091
3092         retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3093         if (!adm_ctx.reply_skb)
3094                 return retcode;
3095         if (retcode != NO_ERROR)
3096                 goto out;
3097
3098         down_write(&drbd_cfg_rwsem);
3099         if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3100                 list_del(&adm_ctx.tconn->all_tconn);
3101                 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3102
3103                 retcode = NO_ERROR;
3104         } else {
3105                 retcode = ERR_CONN_IN_USE;
3106         }
3107         up_write(&drbd_cfg_rwsem);
3108
3109         if (retcode == NO_ERROR)
3110                 drbd_thread_stop(&adm_ctx.tconn->worker);
3111 out:
3112         drbd_adm_finish(info, retcode);
3113         return 0;
3114 }
3115
3116 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3117 {
3118         static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3119         struct sk_buff *msg;
3120         struct drbd_genlmsghdr *d_out;
3121         unsigned seq;
3122         int err = -ENOMEM;
3123
3124         seq = atomic_inc_return(&drbd_genl_seq);
3125         msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3126         if (!msg)
3127                 goto failed;
3128
3129         err = -EMSGSIZE;
3130         d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3131         if (!d_out) /* cannot happen, but anyways. */
3132                 goto nla_put_failure;
3133         d_out->minor = mdev_to_minor(mdev);
3134         d_out->ret_code = 0;
3135
3136         if (nla_put_status_info(msg, mdev, sib))
3137                 goto nla_put_failure;
3138         genlmsg_end(msg, d_out);
3139         err = drbd_genl_multicast_events(msg, 0);
3140         /* msg has been consumed or freed in netlink_broadcast() */
3141         if (err && err != -ESRCH)
3142                 goto failed;
3143
3144         return;
3145
3146 nla_put_failure:
3147         nlmsg_free(msg);
3148 failed:
3149         dev_err(DEV, "Error %d while broadcasting event. "
3150                         "Event seq:%u sib_reason:%u\n",
3151                         err, seq, sib->sib_reason);
3152 }