4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
79 #include <linux/genl_magic_func.h>
81 /* used blkdev_get_by_path, to claim our meta data device(s) */
82 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
84 /* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
88 static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
91 /* assigned from request attributes, if present */
93 #define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
97 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
109 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
116 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
118 int drbd_msg_put_info(const char *info)
120 struct sk_buff *skb = adm_ctx.reply_skb;
124 if (!info || !info[0])
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
131 err = nla_put_string(skb, T_info_text, info);
133 nla_nest_cancel(skb, nla);
136 nla_nest_end(skb, nla);
140 /* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
145 #define DRBD_ADM_NEED_MINOR 1
146 #define DRBD_ADM_NEED_RESOURCE 2
147 #define DRBD_ADM_NEED_CONNECTION 4
148 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
163 if (!adm_ctx.reply_skb) {
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
172 if (!adm_ctx.reply_dh) {
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
180 adm_ctx.volume = VOLUME_UNSPECIFIED;
181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
183 /* parse and validate only */
184 err = drbd_cfg_context_from_attrs(NULL, info);
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
199 adm_ctx.volume = nla_get_u32(nla);
200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
202 adm_ctx.resource_name = nla_data(nla);
203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
224 return ERR_INVALID_REQUEST;
227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
247 /* some more paranoia, if the request was over-determined */
248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
253 drbd_msg_put_info("minor exists in different resource");
254 return ERR_INVALID_REQUEST;
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
262 drbd_msg_put_info("minor exists as different volume");
263 return ERR_INVALID_REQUEST;
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
274 static int drbd_adm_finish(struct genl_info *info, int retcode)
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
278 adm_ctx.tconn = NULL;
281 if (!adm_ctx.reply_skb)
284 adm_ctx.reply_dh->ret_code = retcode;
285 drbd_adm_send_reply(adm_ctx.reply_skb, info);
289 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
316 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
318 char *envp[] = { "HOME=/",
320 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
321 (char[20]) { }, /* address family */
322 (char[60]) { }, /* address */
325 char *argv[] = {usermode_helper, cmd, mb, NULL };
329 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
330 setup_khelper_env(mdev->tconn, envp);
332 /* The helper may take some time.
333 * write out any unsynced meta data changes now */
336 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
337 sib.sib_reason = SIB_HELPER_PRE;
338 sib.helper_name = cmd;
339 drbd_bcast_event(mdev, &sib);
340 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
342 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
343 usermode_helper, cmd, mb,
344 (ret >> 8) & 0xff, ret);
346 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
347 usermode_helper, cmd, mb,
348 (ret >> 8) & 0xff, ret);
349 sib.sib_reason = SIB_HELPER_POST;
350 sib.helper_exit_code = ret;
351 drbd_bcast_event(mdev, &sib);
353 if (ret < 0) /* Ignore any ERRNOs we got. */
359 static void conn_md_sync(struct drbd_tconn *tconn)
361 struct drbd_conf *mdev;
365 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
366 kref_get(&mdev->kref);
369 kref_put(&mdev->kref, &drbd_minor_destroy);
375 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
377 char *envp[] = { "HOME=/",
379 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
380 (char[20]) { }, /* address family */
381 (char[60]) { }, /* address */
383 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
386 setup_khelper_env(tconn, envp);
389 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
390 /* TODO: conn_bcast_event() ?? */
392 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
394 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
395 usermode_helper, cmd, tconn->name,
396 (ret >> 8) & 0xff, ret);
398 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
399 usermode_helper, cmd, tconn->name,
400 (ret >> 8) & 0xff, ret);
401 /* TODO: conn_bcast_event() ?? */
403 if (ret < 0) /* Ignore any ERRNOs we got. */
409 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
411 enum drbd_fencing_p fp = FP_NOT_AVAIL;
412 struct drbd_conf *mdev;
416 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
417 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
418 fp = max_t(enum drbd_fencing_p, fp,
419 rcu_dereference(mdev->ldev->disk_conf)->fencing);
428 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
430 union drbd_state mask = { };
431 union drbd_state val = { };
432 enum drbd_fencing_p fp;
436 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
437 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
441 fp = highest_fencing_policy(tconn);
444 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
451 r = conn_khelper(tconn, "fence-peer");
453 switch ((r>>8) & 0xff) {
454 case 3: /* peer is inconsistent */
455 ex_to_string = "peer is inconsistent or worse";
457 val.pdsk = D_INCONSISTENT;
459 case 4: /* peer got outdated, or was already outdated */
460 ex_to_string = "peer was fenced";
462 val.pdsk = D_OUTDATED;
464 case 5: /* peer was down */
465 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
466 /* we will(have) create(d) a new UUID anyways... */
467 ex_to_string = "peer is unreachable, assumed to be dead";
469 val.pdsk = D_OUTDATED;
471 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
474 case 6: /* Peer is primary, voluntarily outdate myself.
475 * This is useful when an unconnected R_SECONDARY is asked to
476 * become R_PRIMARY, but finds the other peer being active. */
477 ex_to_string = "peer is active";
478 conn_warn(tconn, "Peer is primary, outdating myself.\n");
480 val.disk = D_OUTDATED;
483 if (fp != FP_STONITH)
484 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
485 ex_to_string = "peer was stonithed";
487 val.pdsk = D_OUTDATED;
490 /* The script is broken ... */
491 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
492 return false; /* Eventually leave IO frozen */
495 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
496 (r>>8) & 0xff, ex_to_string);
501 conn_request_state(tconn, mask, val, CS_VERBOSE);
502 here, because we might were able to re-establish the connection in the
504 spin_lock_irq(&tconn->req_lock);
505 if (tconn->cstate < C_WF_REPORT_PARAMS)
506 _conn_request_state(tconn, mask, val, CS_VERBOSE);
507 spin_unlock_irq(&tconn->req_lock);
509 return conn_highest_pdsk(tconn) <= D_OUTDATED;
512 static int _try_outdate_peer_async(void *data)
514 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
516 conn_try_outdate_peer(tconn);
518 kref_put(&tconn->kref, &conn_destroy);
522 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
524 struct task_struct *opa;
526 kref_get(&tconn->kref);
527 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
529 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
530 kref_put(&tconn->kref, &conn_destroy);
535 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
537 const int max_tries = 4;
538 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
542 union drbd_state mask, val;
544 if (new_role == R_PRIMARY)
545 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
547 mutex_lock(mdev->state_mutex);
549 mask.i = 0; mask.role = R_MASK;
550 val.i = 0; val.role = new_role;
552 while (try++ < max_tries) {
553 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
555 /* in case we first succeeded to outdate,
556 * but now suddenly could establish a connection */
557 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
563 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
564 (mdev->state.disk < D_UP_TO_DATE &&
565 mdev->state.disk >= D_INCONSISTENT)) {
567 val.disk = D_UP_TO_DATE;
572 if (rv == SS_NO_UP_TO_DATE_DISK &&
573 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
574 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
576 if (conn_try_outdate_peer(mdev->tconn)) {
577 val.disk = D_UP_TO_DATE;
583 if (rv == SS_NOTHING_TO_DO)
585 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
586 if (!conn_try_outdate_peer(mdev->tconn) && force) {
587 dev_warn(DEV, "Forced into split brain situation!\n");
589 val.pdsk = D_OUTDATED;
594 if (rv == SS_TWO_PRIMARIES) {
595 /* Maybe the peer is detected as dead very soon...
596 retry at most once more in this case. */
599 nc = rcu_dereference(mdev->tconn->net_conf);
600 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
602 schedule_timeout_interruptible(timeo);
607 if (rv < SS_SUCCESS) {
608 rv = _drbd_request_state(mdev, mask, val,
609 CS_VERBOSE + CS_WAIT_COMPLETE);
620 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
622 /* Wait until nothing is on the fly :) */
623 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
625 if (new_role == R_SECONDARY) {
626 set_disk_ro(mdev->vdisk, true);
627 if (get_ldev(mdev)) {
628 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
632 mutex_lock(&mdev->tconn->conf_update);
633 nc = mdev->tconn->net_conf;
635 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
636 mutex_unlock(&mdev->tconn->conf_update);
638 set_disk_ro(mdev->vdisk, false);
639 if (get_ldev(mdev)) {
640 if (((mdev->state.conn < C_CONNECTED ||
641 mdev->state.pdsk <= D_FAILED)
642 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
643 drbd_uuid_new_current(mdev);
645 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
650 /* writeout of activity log covered areas of the bitmap
651 * to stable storage done in after state change already */
653 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
654 /* if this was forced, we should consider sync */
656 drbd_send_uuids(mdev);
657 drbd_send_state(mdev);
662 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
664 mutex_unlock(mdev->state_mutex);
668 static const char *from_attrs_err_to_txt(int err)
670 return err == -ENOMSG ? "required attribute missing" :
671 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
672 err == -EEXIST ? "can not change invariant setting" :
673 "invalid attribute value";
676 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
678 struct set_role_parms parms;
680 enum drbd_ret_code retcode;
682 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
683 if (!adm_ctx.reply_skb)
685 if (retcode != NO_ERROR)
688 memset(&parms, 0, sizeof(parms));
689 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
690 err = set_role_parms_from_attrs(&parms, info);
692 retcode = ERR_MANDATORY_TAG;
693 drbd_msg_put_info(from_attrs_err_to_txt(err));
698 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
699 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
701 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
703 drbd_adm_finish(info, retcode);
707 /* initializes the md.*_offset members, so we are able to find
708 * the on disk meta data */
709 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
710 struct drbd_backing_dev *bdev)
712 sector_t md_size_sect = 0;
716 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
718 switch (meta_dev_idx) {
720 /* v07 style fixed size indexed meta data */
721 bdev->md.md_size_sect = MD_RESERVED_SECT;
722 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
723 bdev->md.al_offset = MD_AL_OFFSET;
724 bdev->md.bm_offset = MD_BM_OFFSET;
726 case DRBD_MD_INDEX_FLEX_EXT:
727 /* just occupy the full device; unit: sectors */
728 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
729 bdev->md.md_offset = 0;
730 bdev->md.al_offset = MD_AL_OFFSET;
731 bdev->md.bm_offset = MD_BM_OFFSET;
733 case DRBD_MD_INDEX_INTERNAL:
734 case DRBD_MD_INDEX_FLEX_INT:
735 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
736 /* al size is still fixed */
737 bdev->md.al_offset = -MD_AL_SECTORS;
738 /* we need (slightly less than) ~ this much bitmap sectors: */
739 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
740 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
741 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
742 md_size_sect = ALIGN(md_size_sect, 8);
744 /* plus the "drbd meta data super block",
745 * and the activity log; */
746 md_size_sect += MD_BM_OFFSET;
748 bdev->md.md_size_sect = md_size_sect;
749 /* bitmap offset is adjusted by 'super' block size */
750 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
756 /* input size is expected to be in KB */
757 char *ppsize(char *buf, unsigned long long size)
759 /* Needs 9 bytes at max including trailing NUL:
760 * -1ULL ==> "16384 EB" */
761 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
763 while (size >= 10000 && base < sizeof(units)-1) {
765 size = (size >> 10) + !!(size & (1<<9));
768 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
773 /* there is still a theoretical deadlock when called from receiver
774 * on an D_INCONSISTENT R_PRIMARY:
775 * remote READ does inc_ap_bio, receiver would need to receive answer
776 * packet from remote to dec_ap_bio again.
777 * receiver receive_sizes(), comes here,
778 * waits for ap_bio_cnt == 0. -> deadlock.
779 * but this cannot happen, actually, because:
780 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
781 * (not connected, or bad/no disk on peer):
782 * see drbd_fail_request_early, ap_bio_cnt is zero.
783 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
784 * peer may not initiate a resize.
786 /* Note these are not to be confused with
787 * drbd_adm_suspend_io/drbd_adm_resume_io,
788 * which are (sub) state changes triggered by admin (drbdsetup),
789 * and can be long lived.
790 * This changes an mdev->flag, is triggered by drbd internals,
791 * and should be short-lived. */
792 void drbd_suspend_io(struct drbd_conf *mdev)
794 set_bit(SUSPEND_IO, &mdev->flags);
795 if (drbd_suspended(mdev))
797 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
800 void drbd_resume_io(struct drbd_conf *mdev)
802 clear_bit(SUSPEND_IO, &mdev->flags);
803 wake_up(&mdev->misc_wait);
807 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
808 * @mdev: DRBD device.
810 * Returns 0 on success, negative return values indicate errors.
811 * You should call drbd_md_sync() after calling this function.
813 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
815 sector_t prev_first_sect, prev_size; /* previous meta location */
816 sector_t la_size, u_size;
820 int md_moved, la_size_changed;
821 enum determine_dev_size rv = unchanged;
824 * application request passes inc_ap_bio,
825 * but then cannot get an AL-reference.
826 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
829 * Suspend IO right here.
830 * still lock the act_log to not trigger ASSERTs there.
832 drbd_suspend_io(mdev);
834 /* no wait necessary anymore, actually we could assert that */
835 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
837 prev_first_sect = drbd_md_first_sector(mdev->ldev);
838 prev_size = mdev->ldev->md.md_size_sect;
839 la_size = mdev->ldev->md.la_size_sect;
841 /* TODO: should only be some assert here, not (re)init... */
842 drbd_md_set_sector_offsets(mdev, mdev->ldev);
845 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
847 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
849 if (drbd_get_capacity(mdev->this_bdev) != size ||
850 drbd_bm_capacity(mdev) != size) {
852 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
854 /* currently there is only one error: ENOMEM! */
855 size = drbd_bm_capacity(mdev)>>1;
857 dev_err(DEV, "OUT OF MEMORY! "
858 "Could not allocate bitmap!\n");
860 dev_err(DEV, "BM resizing failed. "
861 "Leaving size unchanged at size = %lu KB\n",
862 (unsigned long)size);
866 /* racy, see comments above. */
867 drbd_set_my_capacity(mdev, size);
868 mdev->ldev->md.la_size_sect = size;
869 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
870 (unsigned long long)size>>1);
872 if (rv == dev_size_error)
875 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
877 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
878 || prev_size != mdev->ldev->md.md_size_sect;
880 if (la_size_changed || md_moved) {
883 drbd_al_shrink(mdev); /* All extents inactive. */
884 dev_info(DEV, "Writing the whole bitmap, %s\n",
885 la_size_changed && md_moved ? "size changed and md moved" :
886 la_size_changed ? "size changed" : "md moved");
887 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
888 err = drbd_bitmap_io(mdev, &drbd_bm_write,
889 "size changed", BM_LOCKED_MASK);
894 drbd_md_mark_dirty(mdev);
902 lc_unlock(mdev->act_log);
903 wake_up(&mdev->al_wait);
904 drbd_resume_io(mdev);
910 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
911 sector_t u_size, int assume_peer_has_space)
913 sector_t p_size = mdev->p_size; /* partner's disk size. */
914 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
915 sector_t m_size; /* my size */
918 m_size = drbd_get_max_capacity(bdev);
920 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
921 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
925 if (p_size && m_size) {
926 size = min_t(sector_t, p_size, m_size);
930 if (m_size && m_size < size)
932 if (p_size && p_size < size)
943 dev_err(DEV, "Both nodes diskless!\n");
947 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
948 (unsigned long)u_size>>1, (unsigned long)size>>1);
957 * drbd_check_al_size() - Ensures that the AL is of the right size
958 * @mdev: DRBD device.
960 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
961 * failed, and 0 on success. You should call drbd_md_sync() after you called
964 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
966 struct lru_cache *n, *t;
967 struct lc_element *e;
972 mdev->act_log->nr_elements == dc->al_extents)
977 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
978 dc->al_extents, sizeof(struct lc_element), 0);
981 dev_err(DEV, "Cannot allocate act_log lru!\n");
984 spin_lock_irq(&mdev->al_lock);
986 for (i = 0; i < t->nr_elements; i++) {
987 e = lc_element_by_index(t, i);
989 dev_err(DEV, "refcnt(%d)==%d\n",
990 e->lc_number, e->refcnt);
996 spin_unlock_irq(&mdev->al_lock);
998 dev_err(DEV, "Activity log still in use!\n");
1005 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1009 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
1011 struct request_queue * const q = mdev->rq_queue;
1012 int max_hw_sectors = max_bio_size >> 9;
1013 int max_segments = 0;
1015 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1016 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1018 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1020 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1025 blk_queue_logical_block_size(q, 512);
1026 blk_queue_max_hw_sectors(q, max_hw_sectors);
1027 /* This is the workaround for "bio would need to, but cannot, be split" */
1028 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1029 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1031 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1032 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1034 blk_queue_stack_limits(q, b);
1036 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1037 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1038 q->backing_dev_info.ra_pages,
1039 b->backing_dev_info.ra_pages);
1040 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1046 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1048 int now, new, local, peer;
1050 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1051 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1052 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1054 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1055 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1056 mdev->local_max_bio_size = local;
1060 /* We may ignore peer limits if the peer is modern enough.
1061 Because new from 8.3.8 onwards the peer can use multiple
1062 BIOs for a single peer_request */
1063 if (mdev->state.conn >= C_CONNECTED) {
1064 if (mdev->tconn->agreed_pro_version < 94)
1065 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1066 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1067 else if (mdev->tconn->agreed_pro_version == 94)
1068 peer = DRBD_MAX_SIZE_H80_PACKET;
1069 else if (mdev->tconn->agreed_pro_version < 100)
1070 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1072 peer = DRBD_MAX_BIO_SIZE;
1075 new = min_t(int, local, peer);
1077 if (mdev->state.role == R_PRIMARY && new < now)
1078 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1081 dev_info(DEV, "max BIO size = %u\n", new);
1083 drbd_setup_queue_param(mdev, new);
1086 /* Starts the worker thread */
1087 static void conn_reconfig_start(struct drbd_tconn *tconn)
1089 drbd_thread_start(&tconn->worker);
1090 conn_flush_workqueue(tconn);
1093 /* if still unconfigured, stops worker again. */
1094 static void conn_reconfig_done(struct drbd_tconn *tconn)
1097 spin_lock_irq(&tconn->req_lock);
1098 stop_threads = conn_all_vols_unconf(tconn) &&
1099 tconn->cstate == C_STANDALONE;
1100 spin_unlock_irq(&tconn->req_lock);
1102 /* asender is implicitly stopped by receiver
1103 * in conn_disconnect() */
1104 drbd_thread_stop(&tconn->receiver);
1105 drbd_thread_stop(&tconn->worker);
1109 /* Make sure IO is suspended before calling this function(). */
1110 static void drbd_suspend_al(struct drbd_conf *mdev)
1114 if (!lc_try_lock(mdev->act_log)) {
1115 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1119 drbd_al_shrink(mdev);
1120 spin_lock_irq(&mdev->tconn->req_lock);
1121 if (mdev->state.conn < C_CONNECTED)
1122 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1123 spin_unlock_irq(&mdev->tconn->req_lock);
1124 lc_unlock(mdev->act_log);
1127 dev_info(DEV, "Suspended AL updates\n");
1131 static bool should_set_defaults(struct genl_info *info)
1133 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1134 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1137 static void enforce_disk_conf_limits(struct disk_conf *dc)
1139 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1140 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1141 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1142 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1144 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1145 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1148 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1150 enum drbd_ret_code retcode;
1151 struct drbd_conf *mdev;
1152 struct disk_conf *new_disk_conf, *old_disk_conf;
1153 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1156 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1157 if (!adm_ctx.reply_skb)
1159 if (retcode != NO_ERROR)
1162 mdev = adm_ctx.mdev;
1164 /* we also need a disk
1165 * to change the options on */
1166 if (!get_ldev(mdev)) {
1167 retcode = ERR_NO_DISK;
1171 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1172 if (!new_disk_conf) {
1173 retcode = ERR_NOMEM;
1177 mutex_lock(&mdev->tconn->conf_update);
1178 old_disk_conf = mdev->ldev->disk_conf;
1179 *new_disk_conf = *old_disk_conf;
1180 if (should_set_defaults(info))
1181 set_disk_conf_defaults(new_disk_conf);
1183 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1184 if (err && err != -ENOMSG) {
1185 retcode = ERR_MANDATORY_TAG;
1186 drbd_msg_put_info(from_attrs_err_to_txt(err));
1189 if (!expect(new_disk_conf->resync_rate >= 1))
1190 new_disk_conf->resync_rate = 1;
1192 enforce_disk_conf_limits(new_disk_conf);
1194 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1195 if (fifo_size != mdev->rs_plan_s->size) {
1196 new_plan = fifo_alloc(fifo_size);
1198 dev_err(DEV, "kmalloc of fifo_buffer failed");
1199 retcode = ERR_NOMEM;
1204 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1205 drbd_al_shrink(mdev);
1206 err = drbd_check_al_size(mdev, new_disk_conf);
1207 lc_unlock(mdev->act_log);
1208 wake_up(&mdev->al_wait);
1211 retcode = ERR_NOMEM;
1215 write_lock_irq(&global_state_lock);
1216 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1217 if (retcode == NO_ERROR) {
1218 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1219 drbd_resync_after_changed(mdev);
1221 write_unlock_irq(&global_state_lock);
1223 if (retcode != NO_ERROR)
1227 old_plan = mdev->rs_plan_s;
1228 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1231 mutex_unlock(&mdev->tconn->conf_update);
1234 if (mdev->state.conn >= C_CONNECTED)
1235 drbd_send_sync_param(mdev);
1238 kfree(old_disk_conf);
1240 mod_timer(&mdev->request_timer, jiffies + HZ);
1244 mutex_unlock(&mdev->tconn->conf_update);
1246 kfree(new_disk_conf);
1251 drbd_adm_finish(info, retcode);
1255 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1257 struct drbd_conf *mdev;
1259 enum drbd_ret_code retcode;
1260 enum determine_dev_size dd;
1261 sector_t max_possible_sectors;
1262 sector_t min_md_device_sectors;
1263 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1264 struct disk_conf *new_disk_conf = NULL;
1265 struct block_device *bdev;
1266 struct lru_cache *resync_lru = NULL;
1267 struct fifo_buffer *new_plan = NULL;
1268 union drbd_state ns, os;
1269 enum drbd_state_rv rv;
1270 struct net_conf *nc;
1272 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1273 if (!adm_ctx.reply_skb)
1275 if (retcode != NO_ERROR)
1278 mdev = adm_ctx.mdev;
1279 conn_reconfig_start(mdev->tconn);
1281 /* if you want to reconfigure, please tear down first */
1282 if (mdev->state.disk > D_DISKLESS) {
1283 retcode = ERR_DISK_CONFIGURED;
1286 /* It may just now have detached because of IO error. Make sure
1287 * drbd_ldev_destroy is done already, we may end up here very fast,
1288 * e.g. if someone calls attach from the on-io-error handler,
1289 * to realize a "hot spare" feature (not that I'd recommend that) */
1290 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1292 /* allocation not in the IO path, drbdsetup context */
1293 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1295 retcode = ERR_NOMEM;
1298 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1299 if (!new_disk_conf) {
1300 retcode = ERR_NOMEM;
1303 nbc->disk_conf = new_disk_conf;
1305 set_disk_conf_defaults(new_disk_conf);
1306 err = disk_conf_from_attrs(new_disk_conf, info);
1308 retcode = ERR_MANDATORY_TAG;
1309 drbd_msg_put_info(from_attrs_err_to_txt(err));
1313 enforce_disk_conf_limits(new_disk_conf);
1315 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1317 retcode = ERR_NOMEM;
1321 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1322 retcode = ERR_MD_IDX_INVALID;
1327 nc = rcu_dereference(mdev->tconn->net_conf);
1329 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1331 retcode = ERR_STONITH_AND_PROT_A;
1337 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1338 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1340 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1342 retcode = ERR_OPEN_DISK;
1345 nbc->backing_bdev = bdev;
1348 * meta_dev_idx >= 0: external fixed size, possibly multiple
1349 * drbd sharing one meta device. TODO in that case, paranoia
1350 * check that [md_bdev, meta_dev_idx] is not yet used by some
1351 * other drbd minor! (if you use drbd.conf + drbdadm, that
1352 * should check it for you already; but if you don't, or
1353 * someone fooled it, we need to double check here)
1355 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1356 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1357 (new_disk_conf->meta_dev_idx < 0) ?
1358 (void *)mdev : (void *)drbd_m_holder);
1360 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1362 retcode = ERR_OPEN_MD_DISK;
1365 nbc->md_bdev = bdev;
1367 if ((nbc->backing_bdev == nbc->md_bdev) !=
1368 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1369 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1370 retcode = ERR_MD_IDX_INVALID;
1374 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1375 1, 61, sizeof(struct bm_extent),
1376 offsetof(struct bm_extent, lce));
1378 retcode = ERR_NOMEM;
1382 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1383 drbd_md_set_sector_offsets(mdev, nbc);
1385 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1386 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1387 (unsigned long long) drbd_get_max_capacity(nbc),
1388 (unsigned long long) new_disk_conf->disk_size);
1389 retcode = ERR_DISK_TOO_SMALL;
1393 if (new_disk_conf->meta_dev_idx < 0) {
1394 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1395 /* at least one MB, otherwise it does not make sense */
1396 min_md_device_sectors = (2<<10);
1398 max_possible_sectors = DRBD_MAX_SECTORS;
1399 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
1402 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1403 retcode = ERR_MD_DISK_TOO_SMALL;
1404 dev_warn(DEV, "refusing attach: md-device too small, "
1405 "at least %llu sectors needed for this meta-disk type\n",
1406 (unsigned long long) min_md_device_sectors);
1410 /* Make sure the new disk is big enough
1411 * (we may currently be R_PRIMARY with no local disk...) */
1412 if (drbd_get_max_capacity(nbc) <
1413 drbd_get_capacity(mdev->this_bdev)) {
1414 retcode = ERR_DISK_TOO_SMALL;
1418 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1420 if (nbc->known_size > max_possible_sectors) {
1421 dev_warn(DEV, "==> truncating very big lower level device "
1422 "to currently maximum possible %llu sectors <==\n",
1423 (unsigned long long) max_possible_sectors);
1424 if (new_disk_conf->meta_dev_idx >= 0)
1425 dev_warn(DEV, "==>> using internal or flexible "
1426 "meta data may help <<==\n");
1429 drbd_suspend_io(mdev);
1430 /* also wait for the last barrier ack. */
1431 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1432 /* and for any other previously queued work */
1433 drbd_flush_workqueue(mdev);
1435 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1436 retcode = rv; /* FIXME: Type mismatch. */
1437 drbd_resume_io(mdev);
1438 if (rv < SS_SUCCESS)
1441 if (!get_ldev_if_state(mdev, D_ATTACHING))
1442 goto force_diskless;
1444 drbd_md_set_sector_offsets(mdev, nbc);
1446 if (!mdev->bitmap) {
1447 if (drbd_bm_init(mdev)) {
1448 retcode = ERR_NOMEM;
1449 goto force_diskless_dec;
1453 retcode = drbd_md_read(mdev, nbc);
1454 if (retcode != NO_ERROR)
1455 goto force_diskless_dec;
1457 if (mdev->state.conn < C_CONNECTED &&
1458 mdev->state.role == R_PRIMARY &&
1459 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1460 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1461 (unsigned long long)mdev->ed_uuid);
1462 retcode = ERR_DATA_NOT_CURRENT;
1463 goto force_diskless_dec;
1466 /* Since we are diskless, fix the activity log first... */
1467 if (drbd_check_al_size(mdev, new_disk_conf)) {
1468 retcode = ERR_NOMEM;
1469 goto force_diskless_dec;
1472 /* Prevent shrinking of consistent devices ! */
1473 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1474 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1475 dev_warn(DEV, "refusing to truncate a consistent device\n");
1476 retcode = ERR_DISK_TOO_SMALL;
1477 goto force_diskless_dec;
1480 /* Reset the "barriers don't work" bits here, then force meta data to
1481 * be written, to ensure we determine if barriers are supported. */
1482 if (new_disk_conf->md_flushes)
1483 clear_bit(MD_NO_FUA, &mdev->flags);
1485 set_bit(MD_NO_FUA, &mdev->flags);
1487 /* Point of no return reached.
1488 * Devices and memory are no longer released by error cleanup below.
1489 * now mdev takes over responsibility, and the state engine should
1490 * clean it up somewhere. */
1491 D_ASSERT(mdev->ldev == NULL);
1493 mdev->resync = resync_lru;
1494 mdev->rs_plan_s = new_plan;
1497 new_disk_conf = NULL;
1500 mdev->write_ordering = WO_bdev_flush;
1501 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1503 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1504 set_bit(CRASHED_PRIMARY, &mdev->flags);
1506 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1508 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1509 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
1510 set_bit(CRASHED_PRIMARY, &mdev->flags);
1517 drbd_reconsider_max_bio_size(mdev);
1519 /* If I am currently not R_PRIMARY,
1520 * but meta data primary indicator is set,
1521 * I just now recover from a hard crash,
1522 * and have been R_PRIMARY before that crash.
1524 * Now, if I had no connection before that crash
1525 * (have been degraded R_PRIMARY), chances are that
1526 * I won't find my peer now either.
1528 * In that case, and _only_ in that case,
1529 * we use the degr-wfc-timeout instead of the default,
1530 * so we can automatically recover from a crash of a
1531 * degraded but active "cluster" after a certain timeout.
1533 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1534 if (mdev->state.role != R_PRIMARY &&
1535 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1536 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1537 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1539 dd = drbd_determine_dev_size(mdev, 0);
1540 if (dd == dev_size_error) {
1541 retcode = ERR_NOMEM_BITMAP;
1542 goto force_diskless_dec;
1543 } else if (dd == grew)
1544 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1546 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1547 dev_info(DEV, "Assuming that all blocks are out of sync "
1548 "(aka FullSync)\n");
1549 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1550 "set_n_write from attaching", BM_LOCKED_MASK)) {
1551 retcode = ERR_IO_MD_DISK;
1552 goto force_diskless_dec;
1555 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1556 "read from attaching", BM_LOCKED_MASK)) {
1557 retcode = ERR_IO_MD_DISK;
1558 goto force_diskless_dec;
1562 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1563 drbd_suspend_al(mdev); /* IO is still suspended here... */
1565 spin_lock_irq(&mdev->tconn->req_lock);
1566 os = drbd_read_state(mdev);
1568 /* If MDF_CONSISTENT is not set go into inconsistent state,
1569 otherwise investigate MDF_WasUpToDate...
1570 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1571 otherwise into D_CONSISTENT state.
1573 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1574 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1575 ns.disk = D_CONSISTENT;
1577 ns.disk = D_OUTDATED;
1579 ns.disk = D_INCONSISTENT;
1582 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1583 ns.pdsk = D_OUTDATED;
1586 if (ns.disk == D_CONSISTENT &&
1587 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1588 ns.disk = D_UP_TO_DATE;
1591 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1592 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1593 this point, because drbd_request_state() modifies these
1596 /* In case we are C_CONNECTED postpone any decision on the new disk
1597 state after the negotiation phase. */
1598 if (mdev->state.conn == C_CONNECTED) {
1599 mdev->new_state_tmp.i = ns.i;
1601 ns.disk = D_NEGOTIATING;
1603 /* We expect to receive up-to-date UUIDs soon.
1604 To avoid a race in receive_state, free p_uuid while
1605 holding req_lock. I.e. atomic with the state change */
1606 kfree(mdev->p_uuid);
1607 mdev->p_uuid = NULL;
1610 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1611 spin_unlock_irq(&mdev->tconn->req_lock);
1613 if (rv < SS_SUCCESS)
1614 goto force_diskless_dec;
1616 mod_timer(&mdev->request_timer, jiffies + HZ);
1618 if (mdev->state.role == R_PRIMARY)
1619 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1621 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1623 drbd_md_mark_dirty(mdev);
1626 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1628 conn_reconfig_done(mdev->tconn);
1629 drbd_adm_finish(info, retcode);
1635 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1638 conn_reconfig_done(mdev->tconn);
1640 if (nbc->backing_bdev)
1641 blkdev_put(nbc->backing_bdev,
1642 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1644 blkdev_put(nbc->md_bdev,
1645 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1648 kfree(new_disk_conf);
1649 lc_destroy(resync_lru);
1653 drbd_adm_finish(info, retcode);
1657 static int adm_detach(struct drbd_conf *mdev, int force)
1659 enum drbd_state_rv retcode;
1663 drbd_force_state(mdev, NS(disk, D_FAILED));
1664 retcode = SS_SUCCESS;
1668 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1669 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
1670 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1671 drbd_md_put_buffer(mdev);
1672 /* D_FAILED will transition to DISKLESS. */
1673 ret = wait_event_interruptible(mdev->misc_wait,
1674 mdev->state.disk != D_FAILED);
1675 drbd_resume_io(mdev);
1676 if ((int)retcode == (int)SS_IS_DISKLESS)
1677 retcode = SS_NOTHING_TO_DO;
1684 /* Detaching the disk is a process in multiple stages. First we need to lock
1685 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1686 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1687 * internal references as well.
1688 * Only then we have finally detached. */
1689 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1691 enum drbd_ret_code retcode;
1692 struct detach_parms parms = { };
1695 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1696 if (!adm_ctx.reply_skb)
1698 if (retcode != NO_ERROR)
1701 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1702 err = detach_parms_from_attrs(&parms, info);
1704 retcode = ERR_MANDATORY_TAG;
1705 drbd_msg_put_info(from_attrs_err_to_txt(err));
1710 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
1712 drbd_adm_finish(info, retcode);
1716 static bool conn_resync_running(struct drbd_tconn *tconn)
1718 struct drbd_conf *mdev;
1723 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1724 if (mdev->state.conn == C_SYNC_SOURCE ||
1725 mdev->state.conn == C_SYNC_TARGET ||
1726 mdev->state.conn == C_PAUSED_SYNC_S ||
1727 mdev->state.conn == C_PAUSED_SYNC_T) {
1737 static bool conn_ov_running(struct drbd_tconn *tconn)
1739 struct drbd_conf *mdev;
1744 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1745 if (mdev->state.conn == C_VERIFY_S ||
1746 mdev->state.conn == C_VERIFY_T) {
1756 static enum drbd_ret_code
1757 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1759 struct drbd_conf *mdev;
1762 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1763 if (new_conf->wire_protocol != old_conf->wire_protocol)
1764 return ERR_NEED_APV_100;
1766 if (new_conf->two_primaries != old_conf->two_primaries)
1767 return ERR_NEED_APV_100;
1769 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1770 return ERR_NEED_APV_100;
1772 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1773 return ERR_NEED_APV_100;
1776 if (!new_conf->two_primaries &&
1777 conn_highest_role(tconn) == R_PRIMARY &&
1778 conn_highest_peer(tconn) == R_PRIMARY)
1779 return ERR_NEED_ALLOW_TWO_PRI;
1781 if (new_conf->two_primaries &&
1782 (new_conf->wire_protocol != DRBD_PROT_C))
1783 return ERR_NOT_PROTO_C;
1785 idr_for_each_entry(&tconn->volumes, mdev, i) {
1786 if (get_ldev(mdev)) {
1787 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1789 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1790 return ERR_STONITH_AND_PROT_A;
1792 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1796 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1797 return ERR_CONG_NOT_PROTO_A;
1802 static enum drbd_ret_code
1803 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1805 static enum drbd_ret_code rv;
1806 struct drbd_conf *mdev;
1810 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1813 /* tconn->volumes protected by genl_lock() here */
1814 idr_for_each_entry(&tconn->volumes, mdev, i) {
1815 if (!mdev->bitmap) {
1816 if(drbd_bm_init(mdev))
1825 struct crypto_hash *verify_tfm;
1826 struct crypto_hash *csums_tfm;
1827 struct crypto_hash *cram_hmac_tfm;
1828 struct crypto_hash *integrity_tfm;
1832 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1837 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1846 static enum drbd_ret_code
1847 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1849 char hmac_name[CRYPTO_MAX_ALG_NAME];
1850 enum drbd_ret_code rv;
1852 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1856 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1860 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1864 if (new_conf->cram_hmac_alg[0] != 0) {
1865 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1866 new_conf->cram_hmac_alg);
1868 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1875 static void free_crypto(struct crypto *crypto)
1877 crypto_free_hash(crypto->cram_hmac_tfm);
1878 crypto_free_hash(crypto->integrity_tfm);
1879 crypto_free_hash(crypto->csums_tfm);
1880 crypto_free_hash(crypto->verify_tfm);
1883 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1885 enum drbd_ret_code retcode;
1886 struct drbd_tconn *tconn;
1887 struct net_conf *old_conf, *new_conf = NULL;
1889 int ovr; /* online verify running */
1890 int rsr; /* re-sync running */
1891 struct crypto crypto = { };
1893 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
1894 if (!adm_ctx.reply_skb)
1896 if (retcode != NO_ERROR)
1899 tconn = adm_ctx.tconn;
1901 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1903 retcode = ERR_NOMEM;
1907 conn_reconfig_start(tconn);
1909 mutex_lock(&tconn->data.mutex);
1910 mutex_lock(&tconn->conf_update);
1911 old_conf = tconn->net_conf;
1914 drbd_msg_put_info("net conf missing, try connect");
1915 retcode = ERR_INVALID_REQUEST;
1919 *new_conf = *old_conf;
1920 if (should_set_defaults(info))
1921 set_net_conf_defaults(new_conf);
1923 err = net_conf_from_attrs_for_change(new_conf, info);
1924 if (err && err != -ENOMSG) {
1925 retcode = ERR_MANDATORY_TAG;
1926 drbd_msg_put_info(from_attrs_err_to_txt(err));
1930 retcode = check_net_options(tconn, new_conf);
1931 if (retcode != NO_ERROR)
1934 /* re-sync running */
1935 rsr = conn_resync_running(tconn);
1936 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1937 retcode = ERR_CSUMS_RESYNC_RUNNING;
1941 /* online verify running */
1942 ovr = conn_ov_running(tconn);
1943 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1944 retcode = ERR_VERIFY_RUNNING;
1948 retcode = alloc_crypto(&crypto, new_conf);
1949 if (retcode != NO_ERROR)
1952 rcu_assign_pointer(tconn->net_conf, new_conf);
1955 crypto_free_hash(tconn->csums_tfm);
1956 tconn->csums_tfm = crypto.csums_tfm;
1957 crypto.csums_tfm = NULL;
1960 crypto_free_hash(tconn->verify_tfm);
1961 tconn->verify_tfm = crypto.verify_tfm;
1962 crypto.verify_tfm = NULL;
1965 crypto_free_hash(tconn->integrity_tfm);
1966 tconn->integrity_tfm = crypto.integrity_tfm;
1967 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
1968 /* Do this without trying to take tconn->data.mutex again. */
1969 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
1971 crypto_free_hash(tconn->cram_hmac_tfm);
1972 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1974 mutex_unlock(&tconn->conf_update);
1975 mutex_unlock(&tconn->data.mutex);
1979 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1980 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1985 mutex_unlock(&tconn->conf_update);
1986 mutex_unlock(&tconn->data.mutex);
1987 free_crypto(&crypto);
1990 conn_reconfig_done(tconn);
1992 drbd_adm_finish(info, retcode);
1996 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1998 struct drbd_conf *mdev;
1999 struct net_conf *old_conf, *new_conf = NULL;
2000 struct crypto crypto = { };
2001 struct drbd_tconn *tconn;
2002 enum drbd_ret_code retcode;
2006 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2008 if (!adm_ctx.reply_skb)
2010 if (retcode != NO_ERROR)
2012 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2013 drbd_msg_put_info("connection endpoint(s) missing");
2014 retcode = ERR_INVALID_REQUEST;
2018 /* No need for _rcu here. All reconfiguration is
2019 * strictly serialized on genl_lock(). We are protected against
2020 * concurrent reconfiguration/addition/deletion */
2021 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2022 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2023 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2024 retcode = ERR_LOCAL_ADDR;
2028 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2029 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2030 retcode = ERR_PEER_ADDR;
2035 tconn = adm_ctx.tconn;
2036 conn_reconfig_start(tconn);
2038 if (tconn->cstate > C_STANDALONE) {
2039 retcode = ERR_NET_CONFIGURED;
2043 /* allocation not in the IO path, drbdsetup / netlink process context */
2044 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2046 retcode = ERR_NOMEM;
2050 set_net_conf_defaults(new_conf);
2052 err = net_conf_from_attrs(new_conf, info);
2053 if (err && err != -ENOMSG) {
2054 retcode = ERR_MANDATORY_TAG;
2055 drbd_msg_put_info(from_attrs_err_to_txt(err));
2059 retcode = check_net_options(tconn, new_conf);
2060 if (retcode != NO_ERROR)
2063 retcode = alloc_crypto(&crypto, new_conf);
2064 if (retcode != NO_ERROR)
2067 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2069 conn_flush_workqueue(tconn);
2071 mutex_lock(&tconn->conf_update);
2072 old_conf = tconn->net_conf;
2074 retcode = ERR_NET_CONFIGURED;
2075 mutex_unlock(&tconn->conf_update);
2078 rcu_assign_pointer(tconn->net_conf, new_conf);
2080 conn_free_crypto(tconn);
2081 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2082 tconn->integrity_tfm = crypto.integrity_tfm;
2083 tconn->csums_tfm = crypto.csums_tfm;
2084 tconn->verify_tfm = crypto.verify_tfm;
2086 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2087 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2088 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2089 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2091 mutex_unlock(&tconn->conf_update);
2094 idr_for_each_entry(&tconn->volumes, mdev, i) {
2100 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2102 conn_reconfig_done(tconn);
2103 drbd_adm_finish(info, retcode);
2107 free_crypto(&crypto);
2110 conn_reconfig_done(tconn);
2112 drbd_adm_finish(info, retcode);
2116 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2118 enum drbd_state_rv rv;
2120 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2121 force ? CS_HARD : 0);
2124 case SS_NOTHING_TO_DO:
2126 case SS_ALREADY_STANDALONE:
2128 case SS_PRIMARY_NOP:
2129 /* Our state checking code wants to see the peer outdated. */
2130 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2131 pdsk, D_OUTDATED), CS_VERBOSE);
2133 case SS_CW_FAILED_BY_PEER:
2134 /* The peer probably wants to see us outdated. */
2135 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2136 disk, D_OUTDATED), 0);
2137 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2138 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2143 /* no special handling necessary */
2146 if (rv >= SS_SUCCESS) {
2147 enum drbd_state_rv rv2;
2148 /* No one else can reconfigure the network while I am here.
2149 * The state handling only uses drbd_thread_stop_nowait(),
2150 * we want to really wait here until the receiver is no more.
2152 drbd_thread_stop(&adm_ctx.tconn->receiver);
2154 /* Race breaker. This additional state change request may be
2155 * necessary, if this was a forced disconnect during a receiver
2156 * restart. We may have "killed" the receiver thread just
2157 * after drbdd_init() returned. Typically, we should be
2158 * C_STANDALONE already, now, and this becomes a no-op.
2160 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2161 CS_VERBOSE | CS_HARD);
2162 if (rv2 < SS_SUCCESS)
2164 "unexpected rv2=%d in conn_try_disconnect()\n",
2170 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2172 struct disconnect_parms parms;
2173 struct drbd_tconn *tconn;
2174 enum drbd_state_rv rv;
2175 enum drbd_ret_code retcode;
2178 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
2179 if (!adm_ctx.reply_skb)
2181 if (retcode != NO_ERROR)
2184 tconn = adm_ctx.tconn;
2185 memset(&parms, 0, sizeof(parms));
2186 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2187 err = disconnect_parms_from_attrs(&parms, info);
2189 retcode = ERR_MANDATORY_TAG;
2190 drbd_msg_put_info(from_attrs_err_to_txt(err));
2195 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2196 if (rv < SS_SUCCESS)
2197 retcode = rv; /* FIXME: Type mismatch. */
2201 drbd_adm_finish(info, retcode);
2205 void resync_after_online_grow(struct drbd_conf *mdev)
2207 int iass; /* I am sync source */
2209 dev_info(DEV, "Resync of new storage after online grow\n");
2210 if (mdev->state.role != mdev->state.peer)
2211 iass = (mdev->state.role == R_PRIMARY);
2213 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2216 drbd_start_resync(mdev, C_SYNC_SOURCE);
2218 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2221 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2223 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2224 struct resize_parms rs;
2225 struct drbd_conf *mdev;
2226 enum drbd_ret_code retcode;
2227 enum determine_dev_size dd;
2228 enum dds_flags ddsf;
2232 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2233 if (!adm_ctx.reply_skb)
2235 if (retcode != NO_ERROR)
2238 memset(&rs, 0, sizeof(struct resize_parms));
2239 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2240 err = resize_parms_from_attrs(&rs, info);
2242 retcode = ERR_MANDATORY_TAG;
2243 drbd_msg_put_info(from_attrs_err_to_txt(err));
2248 mdev = adm_ctx.mdev;
2249 if (mdev->state.conn > C_CONNECTED) {
2250 retcode = ERR_RESIZE_RESYNC;
2254 if (mdev->state.role == R_SECONDARY &&
2255 mdev->state.peer == R_SECONDARY) {
2256 retcode = ERR_NO_PRIMARY;
2260 if (!get_ldev(mdev)) {
2261 retcode = ERR_NO_DISK;
2265 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2266 retcode = ERR_NEED_APV_93;
2271 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2273 if (u_size != (sector_t)rs.resize_size) {
2274 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2275 if (!new_disk_conf) {
2276 retcode = ERR_NOMEM;
2281 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2282 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2284 if (new_disk_conf) {
2285 mutex_lock(&mdev->tconn->conf_update);
2286 old_disk_conf = mdev->ldev->disk_conf;
2287 *new_disk_conf = *old_disk_conf;
2288 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2289 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2290 mutex_unlock(&mdev->tconn->conf_update);
2292 kfree(old_disk_conf);
2295 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2296 dd = drbd_determine_dev_size(mdev, ddsf);
2299 if (dd == dev_size_error) {
2300 retcode = ERR_NOMEM_BITMAP;
2304 if (mdev->state.conn == C_CONNECTED) {
2306 set_bit(RESIZE_PENDING, &mdev->flags);
2308 drbd_send_uuids(mdev);
2309 drbd_send_sizes(mdev, 1, ddsf);
2313 drbd_adm_finish(info, retcode);
2321 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2323 enum drbd_ret_code retcode;
2324 struct drbd_tconn *tconn;
2325 struct res_opts res_opts;
2328 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2329 if (!adm_ctx.reply_skb)
2331 if (retcode != NO_ERROR)
2333 tconn = adm_ctx.tconn;
2335 res_opts = tconn->res_opts;
2336 if (should_set_defaults(info))
2337 set_res_opts_defaults(&res_opts);
2339 err = res_opts_from_attrs(&res_opts, info);
2340 if (err && err != -ENOMSG) {
2341 retcode = ERR_MANDATORY_TAG;
2342 drbd_msg_put_info(from_attrs_err_to_txt(err));
2346 err = set_resource_options(tconn, &res_opts);
2348 retcode = ERR_INVALID_REQUEST;
2350 retcode = ERR_NOMEM;
2354 drbd_adm_finish(info, retcode);
2358 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2360 struct drbd_conf *mdev;
2361 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2363 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2364 if (!adm_ctx.reply_skb)
2366 if (retcode != NO_ERROR)
2369 mdev = adm_ctx.mdev;
2371 /* If there is still bitmap IO pending, probably because of a previous
2372 * resync just being finished, wait for it before requesting a new resync. */
2373 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2375 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2377 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2378 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2380 while (retcode == SS_NEED_CONNECTION) {
2381 spin_lock_irq(&mdev->tconn->req_lock);
2382 if (mdev->state.conn < C_CONNECTED)
2383 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2384 spin_unlock_irq(&mdev->tconn->req_lock);
2386 if (retcode != SS_NEED_CONNECTION)
2389 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2393 drbd_adm_finish(info, retcode);
2397 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2398 union drbd_state mask, union drbd_state val)
2400 enum drbd_ret_code retcode;
2402 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2403 if (!adm_ctx.reply_skb)
2405 if (retcode != NO_ERROR)
2408 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2410 drbd_adm_finish(info, retcode);
2414 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2416 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2419 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2421 enum drbd_ret_code retcode;
2423 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2424 if (!adm_ctx.reply_skb)
2426 if (retcode != NO_ERROR)
2429 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2430 retcode = ERR_PAUSE_IS_SET;
2432 drbd_adm_finish(info, retcode);
2436 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2438 union drbd_dev_state s;
2439 enum drbd_ret_code retcode;
2441 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2442 if (!adm_ctx.reply_skb)
2444 if (retcode != NO_ERROR)
2447 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2448 s = adm_ctx.mdev->state;
2449 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2450 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2451 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2453 retcode = ERR_PAUSE_IS_CLEAR;
2458 drbd_adm_finish(info, retcode);
2462 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2464 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2467 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2469 struct drbd_conf *mdev;
2470 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2472 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2473 if (!adm_ctx.reply_skb)
2475 if (retcode != NO_ERROR)
2478 mdev = adm_ctx.mdev;
2479 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2480 drbd_uuid_new_current(mdev);
2481 clear_bit(NEW_CUR_UUID, &mdev->flags);
2483 drbd_suspend_io(mdev);
2484 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2485 if (retcode == SS_SUCCESS) {
2486 if (mdev->state.conn < C_CONNECTED)
2487 tl_clear(mdev->tconn);
2488 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2489 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2491 drbd_resume_io(mdev);
2494 drbd_adm_finish(info, retcode);
2498 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2500 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2503 int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
2506 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2508 goto nla_put_failure;
2509 if (vnr != VOLUME_UNSPECIFIED)
2510 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2511 NLA_PUT_STRING(skb, T_ctx_resource_name, tconn->name);
2512 if (tconn->my_addr_len)
2513 NLA_PUT(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr);
2514 if (tconn->peer_addr_len)
2515 NLA_PUT(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr);
2516 nla_nest_end(skb, nla);
2521 nla_nest_cancel(skb, nla);
2525 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2526 const struct sib_info *sib)
2528 struct state_info *si = NULL; /* for sizeof(si->member); */
2529 struct net_conf *nc;
2533 int exclude_sensitive;
2535 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2536 * to. So we better exclude_sensitive information.
2538 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2539 * in the context of the requesting user process. Exclude sensitive
2540 * information, unless current has superuser.
2542 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2543 * relies on the current implementation of netlink_dump(), which
2544 * executes the dump callback successively from netlink_recvmsg(),
2545 * always in the context of the receiving process */
2546 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2548 got_ldev = get_ldev(mdev);
2550 /* We need to add connection name and volume number information still.
2551 * Minor number is in drbd_genlmsghdr. */
2552 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
2553 goto nla_put_failure;
2555 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2556 goto nla_put_failure;
2560 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2561 goto nla_put_failure;
2563 nc = rcu_dereference(mdev->tconn->net_conf);
2565 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2568 goto nla_put_failure;
2570 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2572 goto nla_put_failure;
2573 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2574 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2575 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2576 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2579 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2580 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2581 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2582 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2583 if (C_SYNC_SOURCE <= mdev->state.conn &&
2584 C_PAUSED_SYNC_T >= mdev->state.conn) {
2585 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2586 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2591 switch(sib->sib_reason) {
2592 case SIB_SYNC_PROGRESS:
2593 case SIB_GET_STATUS_REPLY:
2595 case SIB_STATE_CHANGE:
2596 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2597 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2599 case SIB_HELPER_POST:
2601 T_helper_exit_code, sib->helper_exit_code);
2603 case SIB_HELPER_PRE:
2604 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2608 nla_nest_end(skb, nla);
2618 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2620 enum drbd_ret_code retcode;
2623 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2624 if (!adm_ctx.reply_skb)
2626 if (retcode != NO_ERROR)
2629 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2631 nlmsg_free(adm_ctx.reply_skb);
2635 drbd_adm_finish(info, retcode);
2639 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2641 struct drbd_conf *mdev;
2642 struct drbd_genlmsghdr *dh;
2643 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2644 struct drbd_tconn *tconn = NULL;
2645 struct drbd_tconn *tmp;
2646 unsigned volume = cb->args[1];
2648 /* Open coded, deferred, iteration:
2649 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2650 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2654 * where tconn is cb->args[0];
2655 * and i is cb->args[1];
2657 * cb->args[2] indicates if we shall loop over all resources,
2658 * or just dump all volumes of a single resource.
2660 * This may miss entries inserted after this dump started,
2661 * or entries deleted before they are reached.
2663 * We need to make sure the mdev won't disappear while
2664 * we are looking at it, and revalidate our iterators
2665 * on each iteration.
2668 /* synchronize with conn_create()/conn_destroy() */
2670 /* revalidate iterator position */
2671 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2673 /* first iteration */
2685 mdev = idr_get_next(&tconn->volumes, &volume);
2687 /* No more volumes to dump on this tconn.
2688 * Advance tconn iterator. */
2689 pos = list_entry_rcu(tconn->all_tconn.next,
2690 struct drbd_tconn, all_tconn);
2691 /* Did we dump any volume on this tconn yet? */
2693 /* If we reached the end of the list,
2694 * or only a single resource dump was requested,
2696 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2704 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2705 cb->nlh->nlmsg_seq, &drbd_genl_family,
2706 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2711 /* This is a tconn without a single volume.
2712 * Suprisingly enough, it may have a network
2714 struct net_conf *nc;
2716 dh->ret_code = NO_ERROR;
2717 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
2719 nc = rcu_dereference(tconn->net_conf);
2720 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2725 D_ASSERT(mdev->vnr == volume);
2726 D_ASSERT(mdev->tconn == tconn);
2728 dh->minor = mdev_to_minor(mdev);
2729 dh->ret_code = NO_ERROR;
2731 if (nla_put_status_info(skb, mdev, NULL)) {
2733 genlmsg_cancel(skb, dh);
2737 genlmsg_end(skb, dh);
2742 /* where to start the next iteration */
2743 cb->args[0] = (long)pos;
2744 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2746 /* No more tconns/volumes/minors found results in an empty skb.
2747 * Which will terminate the dump. */
2752 * Request status of all resources, or of all volumes within a single resource.
2754 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2755 * Which means we cannot use the family->attrbuf or other such members, because
2756 * dump is NOT protected by the genl_lock(). During dump, we only have access
2757 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2759 * Once things are setup properly, we call into get_one_status().
2761 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2763 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2765 const char *resource_name;
2766 struct drbd_tconn *tconn;
2769 /* Is this a followup call? */
2771 /* ... of a single resource dump,
2772 * and the resource iterator has been advanced already? */
2773 if (cb->args[2] && cb->args[2] != cb->args[0])
2774 return 0; /* DONE. */
2778 /* First call (from netlink_dump_start). We need to figure out
2779 * which resource(s) the user wants us to dump. */
2780 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2781 nlmsg_attrlen(cb->nlh, hdrlen),
2782 DRBD_NLA_CFG_CONTEXT);
2784 /* No explicit context given. Dump all. */
2787 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2788 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2790 return PTR_ERR(nla);
2791 /* context given, but no name present? */
2794 resource_name = nla_data(nla);
2795 tconn = conn_get_by_name(resource_name);
2800 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2802 /* prime iterators, and set "filter" mode mark:
2803 * only dump this tconn. */
2804 cb->args[0] = (long)tconn;
2805 /* cb->args[1] = 0; passed in this way. */
2806 cb->args[2] = (long)tconn;
2809 return get_one_status(skb, cb);
2812 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2814 enum drbd_ret_code retcode;
2815 struct timeout_parms tp;
2818 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2819 if (!adm_ctx.reply_skb)
2821 if (retcode != NO_ERROR)
2825 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2826 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2829 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2831 nlmsg_free(adm_ctx.reply_skb);
2835 drbd_adm_finish(info, retcode);
2839 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2841 struct drbd_conf *mdev;
2842 enum drbd_ret_code retcode;
2844 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2845 if (!adm_ctx.reply_skb)
2847 if (retcode != NO_ERROR)
2850 mdev = adm_ctx.mdev;
2851 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2852 /* resume from last known position, if possible */
2853 struct start_ov_parms parms =
2854 { .ov_start_sector = mdev->ov_start_sector };
2855 int err = start_ov_parms_from_attrs(&parms, info);
2857 retcode = ERR_MANDATORY_TAG;
2858 drbd_msg_put_info(from_attrs_err_to_txt(err));
2861 /* w_make_ov_request expects position to be aligned */
2862 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2864 /* If there is still bitmap IO pending, e.g. previous resync or verify
2865 * just being finished, wait for it before requesting a new resync. */
2866 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2867 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2869 drbd_adm_finish(info, retcode);
2874 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2876 struct drbd_conf *mdev;
2877 enum drbd_ret_code retcode;
2878 int skip_initial_sync = 0;
2880 struct new_c_uuid_parms args;
2882 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2883 if (!adm_ctx.reply_skb)
2885 if (retcode != NO_ERROR)
2888 mdev = adm_ctx.mdev;
2889 memset(&args, 0, sizeof(args));
2890 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2891 err = new_c_uuid_parms_from_attrs(&args, info);
2893 retcode = ERR_MANDATORY_TAG;
2894 drbd_msg_put_info(from_attrs_err_to_txt(err));
2899 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2901 if (!get_ldev(mdev)) {
2902 retcode = ERR_NO_DISK;
2906 /* this is "skip initial sync", assume to be clean */
2907 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2908 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2909 dev_info(DEV, "Preparing to skip initial sync\n");
2910 skip_initial_sync = 1;
2911 } else if (mdev->state.conn != C_STANDALONE) {
2912 retcode = ERR_CONNECTED;
2916 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2917 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2919 if (args.clear_bm) {
2920 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2921 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2923 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2924 retcode = ERR_IO_MD_DISK;
2926 if (skip_initial_sync) {
2927 drbd_send_uuids_skip_initial_sync(mdev);
2928 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2929 drbd_print_uuids(mdev, "cleared bitmap UUID");
2930 spin_lock_irq(&mdev->tconn->req_lock);
2931 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2933 spin_unlock_irq(&mdev->tconn->req_lock);
2941 mutex_unlock(mdev->state_mutex);
2943 drbd_adm_finish(info, retcode);
2947 static enum drbd_ret_code
2948 drbd_check_resource_name(const char *name)
2950 if (!name || !name[0]) {
2951 drbd_msg_put_info("resource name missing");
2952 return ERR_MANDATORY_TAG;
2954 /* if we want to use these in sysfs/configfs/debugfs some day,
2955 * we must not allow slashes */
2956 if (strchr(name, '/')) {
2957 drbd_msg_put_info("invalid resource name");
2958 return ERR_INVALID_REQUEST;
2963 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
2965 enum drbd_ret_code retcode;
2966 struct res_opts res_opts;
2969 retcode = drbd_adm_prepare(skb, info, 0);
2970 if (!adm_ctx.reply_skb)
2972 if (retcode != NO_ERROR)
2975 set_res_opts_defaults(&res_opts);
2976 err = res_opts_from_attrs(&res_opts, info);
2977 if (err && err != -ENOMSG) {
2978 retcode = ERR_MANDATORY_TAG;
2979 drbd_msg_put_info(from_attrs_err_to_txt(err));
2983 retcode = drbd_check_resource_name(adm_ctx.resource_name);
2984 if (retcode != NO_ERROR)
2987 if (adm_ctx.tconn) {
2988 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2989 retcode = ERR_INVALID_REQUEST;
2990 drbd_msg_put_info("resource exists");
2992 /* else: still NO_ERROR */
2996 if (!conn_create(adm_ctx.resource_name, &res_opts))
2997 retcode = ERR_NOMEM;
2999 drbd_adm_finish(info, retcode);
3003 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3005 struct drbd_genlmsghdr *dh = info->userhdr;
3006 enum drbd_ret_code retcode;
3008 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3009 if (!adm_ctx.reply_skb)
3011 if (retcode != NO_ERROR)
3014 if (dh->minor > MINORMASK) {
3015 drbd_msg_put_info("requested minor out of range");
3016 retcode = ERR_INVALID_REQUEST;
3019 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3020 drbd_msg_put_info("requested volume id out of range");
3021 retcode = ERR_INVALID_REQUEST;
3025 /* drbd_adm_prepare made sure already
3026 * that mdev->tconn and mdev->vnr match the request. */
3028 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3029 retcode = ERR_MINOR_EXISTS;
3030 /* else: still NO_ERROR */
3034 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3036 drbd_adm_finish(info, retcode);
3040 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3042 if (mdev->state.disk == D_DISKLESS &&
3043 /* no need to be mdev->state.conn == C_STANDALONE &&
3044 * we may want to delete a minor from a live replication group.
3046 mdev->state.role == R_SECONDARY) {
3047 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3048 CS_VERBOSE + CS_WAIT_COMPLETE);
3049 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3050 idr_remove(&minors, mdev_to_minor(mdev));
3051 del_gendisk(mdev->vdisk);
3053 kref_put(&mdev->kref, &drbd_minor_destroy);
3056 return ERR_MINOR_CONFIGURED;
3059 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3061 enum drbd_ret_code retcode;
3063 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3064 if (!adm_ctx.reply_skb)
3066 if (retcode != NO_ERROR)
3069 retcode = adm_delete_minor(adm_ctx.mdev);
3071 drbd_adm_finish(info, retcode);
3075 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3077 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3078 struct drbd_conf *mdev;
3081 retcode = drbd_adm_prepare(skb, info, 0);
3082 if (!adm_ctx.reply_skb)
3084 if (retcode != NO_ERROR)
3087 if (!adm_ctx.tconn) {
3088 retcode = ERR_RES_NOT_KNOWN;
3093 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3094 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3095 if (retcode < SS_SUCCESS) {
3096 drbd_msg_put_info("failed to demote");
3101 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3102 if (retcode < SS_SUCCESS) {
3103 drbd_msg_put_info("failed to disconnect");
3108 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3109 retcode = adm_detach(mdev, 0);
3110 if (retcode < SS_SUCCESS) {
3111 drbd_msg_put_info("failed to detach");
3116 /* If we reach this, all volumes (of this tconn) are Secondary,
3117 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3118 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3119 drbd_thread_stop(&adm_ctx.tconn->worker);
3121 /* Now, nothing can fail anymore */
3123 /* delete volumes */
3124 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3125 retcode = adm_delete_minor(mdev);
3126 if (retcode != NO_ERROR) {
3127 /* "can not happen" */
3128 drbd_msg_put_info("failed to delete volume");
3133 /* delete connection */
3134 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3135 list_del_rcu(&adm_ctx.tconn->all_tconn);
3137 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3141 /* "can not happen" */
3142 retcode = ERR_RES_IN_USE;
3143 drbd_msg_put_info("failed to delete connection");
3147 drbd_adm_finish(info, retcode);
3151 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3153 enum drbd_ret_code retcode;
3155 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3156 if (!adm_ctx.reply_skb)
3158 if (retcode != NO_ERROR)
3161 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3162 list_del_rcu(&adm_ctx.tconn->all_tconn);
3164 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3168 retcode = ERR_RES_IN_USE;
3171 if (retcode == NO_ERROR)
3172 drbd_thread_stop(&adm_ctx.tconn->worker);
3174 drbd_adm_finish(info, retcode);
3178 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3180 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3181 struct sk_buff *msg;
3182 struct drbd_genlmsghdr *d_out;
3186 seq = atomic_inc_return(&drbd_genl_seq);
3187 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3192 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3193 if (!d_out) /* cannot happen, but anyways. */
3194 goto nla_put_failure;
3195 d_out->minor = mdev_to_minor(mdev);
3196 d_out->ret_code = NO_ERROR;
3198 if (nla_put_status_info(msg, mdev, sib))
3199 goto nla_put_failure;
3200 genlmsg_end(msg, d_out);
3201 err = drbd_genl_multicast_events(msg, 0);
3202 /* msg has been consumed or freed in netlink_broadcast() */
3203 if (err && err != -ESRCH)
3211 dev_err(DEV, "Error %d while broadcasting event. "
3212 "Event seq:%u sib_reason:%u\n",
3213 err, seq, sib->sib_reason);