4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
117 struct sk_buff *skb = adm_ctx.reply_skb;
121 if (!info || !info[0])
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 err = nla_put_string(skb, T_info_text, info);
130 nla_nest_cancel(skb, nla);
133 nla_nest_end(skb, nla);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb)
162 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163 info, &drbd_genl_family, 0, cmd);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
166 if (!adm_ctx.reply_dh)
169 adm_ctx.reply_dh->minor = d_in->minor;
170 adm_ctx.reply_dh->ret_code = NO_ERROR;
172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
174 /* parse and validate only */
175 err = drbd_cfg_context_from_attrs(NULL, info);
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err = nla_put_nohdr(adm_ctx.reply_skb,
182 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183 info->attrs[DRBD_NLA_CFG_CONTEXT]);
187 /* and assign stuff to the global adm_ctx */
188 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
192 adm_ctx.conn_name = nla_data(nla);
194 adm_ctx.volume = VOLUME_UNSPECIFIED;
196 adm_ctx.minor = d_in->minor;
197 adm_ctx.mdev = minor_to_mdev(d_in->minor);
198 adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
200 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID;
204 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST;
209 /* some more paranoia, if the request was over-determined */
210 if (adm_ctx.mdev && adm_ctx.tconn &&
211 adm_ctx.mdev->tconn != adm_ctx.tconn) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST;
218 adm_ctx.volume != VOLUME_UNSPECIFIED &&
219 adm_ctx.volume != adm_ctx.mdev->vnr) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx.minor, adm_ctx.volume,
222 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223 drbd_msg_put_info("minor exists as different volume");
224 return ERR_INVALID_REQUEST;
230 nlmsg_free(adm_ctx.reply_skb);
231 adm_ctx.reply_skb = NULL;
235 static int drbd_adm_finish(struct genl_info *info, int retcode)
238 const char *conn_name = NULL;
241 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
242 adm_ctx.tconn = NULL;
245 if (!adm_ctx.reply_skb)
248 adm_ctx.reply_dh->ret_code = retcode;
250 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
252 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
254 conn_name = nla_data(nla);
257 drbd_adm_send_reply(adm_ctx.reply_skb, info);
261 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
267 nc = rcu_dereference(tconn->net_conf);
269 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
272 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
273 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
277 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
278 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
282 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
283 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
285 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
290 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
292 char *envp[] = { "HOME=/",
294 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
295 (char[20]) { }, /* address family */
296 (char[60]) { }, /* address */
299 char *argv[] = {usermode_helper, cmd, mb, NULL };
303 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
304 setup_khelper_env(mdev->tconn, envp);
306 /* The helper may take some time.
307 * write out any unsynced meta data changes now */
310 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
311 sib.sib_reason = SIB_HELPER_PRE;
312 sib.helper_name = cmd;
313 drbd_bcast_event(mdev, &sib);
314 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
316 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317 usermode_helper, cmd, mb,
318 (ret >> 8) & 0xff, ret);
320 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper, cmd, mb,
322 (ret >> 8) & 0xff, ret);
323 sib.sib_reason = SIB_HELPER_POST;
324 sib.helper_exit_code = ret;
325 drbd_bcast_event(mdev, &sib);
327 if (ret < 0) /* Ignore any ERRNOs we got. */
333 static void conn_md_sync(struct drbd_tconn *tconn)
335 struct drbd_conf *mdev;
338 down_read(&drbd_cfg_rwsem);
339 idr_for_each_entry(&tconn->volumes, mdev, vnr)
341 up_read(&drbd_cfg_rwsem);
344 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
346 char *envp[] = { "HOME=/",
348 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
349 (char[20]) { }, /* address family */
350 (char[60]) { }, /* address */
352 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
355 setup_khelper_env(tconn, envp);
358 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
359 /* TODO: conn_bcast_event() ?? */
361 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
363 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
364 usermode_helper, cmd, tconn->name,
365 (ret >> 8) & 0xff, ret);
367 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
368 usermode_helper, cmd, tconn->name,
369 (ret >> 8) & 0xff, ret);
370 /* TODO: conn_bcast_event() ?? */
372 if (ret < 0) /* Ignore any ERRNOs we got. */
378 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
380 enum drbd_fencing_p fp = FP_NOT_AVAIL;
381 struct drbd_conf *mdev;
385 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
386 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
387 fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
396 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
398 union drbd_state mask = { };
399 union drbd_state val = { };
400 enum drbd_fencing_p fp;
404 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
405 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
409 fp = highest_fencing_policy(tconn);
412 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
419 r = conn_khelper(tconn, "fence-peer");
421 switch ((r>>8) & 0xff) {
422 case 3: /* peer is inconsistent */
423 ex_to_string = "peer is inconsistent or worse";
425 val.pdsk = D_INCONSISTENT;
427 case 4: /* peer got outdated, or was already outdated */
428 ex_to_string = "peer was fenced";
430 val.pdsk = D_OUTDATED;
432 case 5: /* peer was down */
433 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
434 /* we will(have) create(d) a new UUID anyways... */
435 ex_to_string = "peer is unreachable, assumed to be dead";
437 val.pdsk = D_OUTDATED;
439 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
442 case 6: /* Peer is primary, voluntarily outdate myself.
443 * This is useful when an unconnected R_SECONDARY is asked to
444 * become R_PRIMARY, but finds the other peer being active. */
445 ex_to_string = "peer is active";
446 conn_warn(tconn, "Peer is primary, outdating myself.\n");
448 val.disk = D_OUTDATED;
451 if (fp != FP_STONITH)
452 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
453 ex_to_string = "peer was stonithed";
455 val.pdsk = D_OUTDATED;
458 /* The script is broken ... */
459 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
460 return false; /* Eventually leave IO frozen */
463 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
464 (r>>8) & 0xff, ex_to_string);
469 conn_request_state(tconn, mask, val, CS_VERBOSE);
470 here, because we might were able to re-establish the connection in the
472 spin_lock_irq(&tconn->req_lock);
473 if (tconn->cstate < C_WF_REPORT_PARAMS)
474 _conn_request_state(tconn, mask, val, CS_VERBOSE);
475 spin_unlock_irq(&tconn->req_lock);
477 return conn_highest_pdsk(tconn) <= D_OUTDATED;
480 static int _try_outdate_peer_async(void *data)
482 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
484 conn_try_outdate_peer(tconn);
486 kref_put(&tconn->kref, &conn_destroy);
490 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
492 struct task_struct *opa;
494 kref_get(&tconn->kref);
495 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
497 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
498 kref_put(&tconn->kref, &conn_destroy);
503 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
505 const int max_tries = 4;
506 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
510 union drbd_state mask, val;
512 if (new_role == R_PRIMARY)
513 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
515 mutex_lock(mdev->state_mutex);
517 mask.i = 0; mask.role = R_MASK;
518 val.i = 0; val.role = new_role;
520 while (try++ < max_tries) {
521 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
523 /* in case we first succeeded to outdate,
524 * but now suddenly could establish a connection */
525 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
531 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
532 (mdev->state.disk < D_UP_TO_DATE &&
533 mdev->state.disk >= D_INCONSISTENT)) {
535 val.disk = D_UP_TO_DATE;
540 if (rv == SS_NO_UP_TO_DATE_DISK &&
541 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
542 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
544 if (conn_try_outdate_peer(mdev->tconn)) {
545 val.disk = D_UP_TO_DATE;
551 if (rv == SS_NOTHING_TO_DO)
553 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
554 if (!conn_try_outdate_peer(mdev->tconn) && force) {
555 dev_warn(DEV, "Forced into split brain situation!\n");
557 val.pdsk = D_OUTDATED;
562 if (rv == SS_TWO_PRIMARIES) {
563 /* Maybe the peer is detected as dead very soon...
564 retry at most once more in this case. */
567 nc = rcu_dereference(mdev->tconn->net_conf);
568 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
570 schedule_timeout_interruptible(timeo);
575 if (rv < SS_SUCCESS) {
576 rv = _drbd_request_state(mdev, mask, val,
577 CS_VERBOSE + CS_WAIT_COMPLETE);
588 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
590 /* Wait until nothing is on the fly :) */
591 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
593 if (new_role == R_SECONDARY) {
594 set_disk_ro(mdev->vdisk, true);
595 if (get_ldev(mdev)) {
596 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
600 mutex_lock(&mdev->tconn->conf_update);
601 nc = mdev->tconn->net_conf;
603 nc->want_lose = 0; /* without copy; single bit op is atomic */
604 mutex_unlock(&mdev->tconn->conf_update);
606 set_disk_ro(mdev->vdisk, false);
607 if (get_ldev(mdev)) {
608 if (((mdev->state.conn < C_CONNECTED ||
609 mdev->state.pdsk <= D_FAILED)
610 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
611 drbd_uuid_new_current(mdev);
613 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
618 /* writeout of activity log covered areas of the bitmap
619 * to stable storage done in after state change already */
621 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
622 /* if this was forced, we should consider sync */
624 drbd_send_uuids(mdev);
625 drbd_send_state(mdev);
630 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
632 mutex_unlock(mdev->state_mutex);
636 static const char *from_attrs_err_to_txt(int err)
638 return err == -ENOMSG ? "required attribute missing" :
639 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
640 err == -EEXIST ? "can not change invariant setting" :
641 "invalid attribute value";
644 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
646 struct set_role_parms parms;
648 enum drbd_ret_code retcode;
650 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
651 if (!adm_ctx.reply_skb)
653 if (retcode != NO_ERROR)
656 memset(&parms, 0, sizeof(parms));
657 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
658 err = set_role_parms_from_attrs(&parms, info);
660 retcode = ERR_MANDATORY_TAG;
661 drbd_msg_put_info(from_attrs_err_to_txt(err));
666 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
667 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
669 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
671 drbd_adm_finish(info, retcode);
675 /* initializes the md.*_offset members, so we are able to find
676 * the on disk meta data */
677 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
678 struct drbd_backing_dev *bdev)
680 sector_t md_size_sect = 0;
681 switch (bdev->dc.meta_dev_idx) {
683 /* v07 style fixed size indexed meta data */
684 bdev->md.md_size_sect = MD_RESERVED_SECT;
685 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
686 bdev->md.al_offset = MD_AL_OFFSET;
687 bdev->md.bm_offset = MD_BM_OFFSET;
689 case DRBD_MD_INDEX_FLEX_EXT:
690 /* just occupy the full device; unit: sectors */
691 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
692 bdev->md.md_offset = 0;
693 bdev->md.al_offset = MD_AL_OFFSET;
694 bdev->md.bm_offset = MD_BM_OFFSET;
696 case DRBD_MD_INDEX_INTERNAL:
697 case DRBD_MD_INDEX_FLEX_INT:
698 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
699 /* al size is still fixed */
700 bdev->md.al_offset = -MD_AL_SECTORS;
701 /* we need (slightly less than) ~ this much bitmap sectors: */
702 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
703 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
704 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
705 md_size_sect = ALIGN(md_size_sect, 8);
707 /* plus the "drbd meta data super block",
708 * and the activity log; */
709 md_size_sect += MD_BM_OFFSET;
711 bdev->md.md_size_sect = md_size_sect;
712 /* bitmap offset is adjusted by 'super' block size */
713 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
718 /* input size is expected to be in KB */
719 char *ppsize(char *buf, unsigned long long size)
721 /* Needs 9 bytes at max including trailing NUL:
722 * -1ULL ==> "16384 EB" */
723 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
725 while (size >= 10000 && base < sizeof(units)-1) {
727 size = (size >> 10) + !!(size & (1<<9));
730 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
735 /* there is still a theoretical deadlock when called from receiver
736 * on an D_INCONSISTENT R_PRIMARY:
737 * remote READ does inc_ap_bio, receiver would need to receive answer
738 * packet from remote to dec_ap_bio again.
739 * receiver receive_sizes(), comes here,
740 * waits for ap_bio_cnt == 0. -> deadlock.
741 * but this cannot happen, actually, because:
742 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
743 * (not connected, or bad/no disk on peer):
744 * see drbd_fail_request_early, ap_bio_cnt is zero.
745 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
746 * peer may not initiate a resize.
748 /* Note these are not to be confused with
749 * drbd_adm_suspend_io/drbd_adm_resume_io,
750 * which are (sub) state changes triggered by admin (drbdsetup),
751 * and can be long lived.
752 * This changes an mdev->flag, is triggered by drbd internals,
753 * and should be short-lived. */
754 void drbd_suspend_io(struct drbd_conf *mdev)
756 set_bit(SUSPEND_IO, &mdev->flags);
757 if (drbd_suspended(mdev))
759 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
762 void drbd_resume_io(struct drbd_conf *mdev)
764 clear_bit(SUSPEND_IO, &mdev->flags);
765 wake_up(&mdev->misc_wait);
769 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
770 * @mdev: DRBD device.
772 * Returns 0 on success, negative return values indicate errors.
773 * You should call drbd_md_sync() after calling this function.
775 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
777 sector_t prev_first_sect, prev_size; /* previous meta location */
778 sector_t la_size, u_size;
782 int md_moved, la_size_changed;
783 enum determine_dev_size rv = unchanged;
786 * application request passes inc_ap_bio,
787 * but then cannot get an AL-reference.
788 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
791 * Suspend IO right here.
792 * still lock the act_log to not trigger ASSERTs there.
794 drbd_suspend_io(mdev);
796 /* no wait necessary anymore, actually we could assert that */
797 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
799 prev_first_sect = drbd_md_first_sector(mdev->ldev);
800 prev_size = mdev->ldev->md.md_size_sect;
801 la_size = mdev->ldev->md.la_size_sect;
803 /* TODO: should only be some assert here, not (re)init... */
804 drbd_md_set_sector_offsets(mdev, mdev->ldev);
806 u_size = mdev->ldev->dc.disk_size;
807 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
809 if (drbd_get_capacity(mdev->this_bdev) != size ||
810 drbd_bm_capacity(mdev) != size) {
812 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
814 /* currently there is only one error: ENOMEM! */
815 size = drbd_bm_capacity(mdev)>>1;
817 dev_err(DEV, "OUT OF MEMORY! "
818 "Could not allocate bitmap!\n");
820 dev_err(DEV, "BM resizing failed. "
821 "Leaving size unchanged at size = %lu KB\n",
822 (unsigned long)size);
826 /* racy, see comments above. */
827 drbd_set_my_capacity(mdev, size);
828 mdev->ldev->md.la_size_sect = size;
829 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
830 (unsigned long long)size>>1);
832 if (rv == dev_size_error)
835 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
837 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
838 || prev_size != mdev->ldev->md.md_size_sect;
840 if (la_size_changed || md_moved) {
843 drbd_al_shrink(mdev); /* All extents inactive. */
844 dev_info(DEV, "Writing the whole bitmap, %s\n",
845 la_size_changed && md_moved ? "size changed and md moved" :
846 la_size_changed ? "size changed" : "md moved");
847 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
848 err = drbd_bitmap_io(mdev, &drbd_bm_write,
849 "size changed", BM_LOCKED_MASK);
854 drbd_md_mark_dirty(mdev);
862 lc_unlock(mdev->act_log);
863 wake_up(&mdev->al_wait);
864 drbd_resume_io(mdev);
870 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
871 sector_t u_size, int assume_peer_has_space)
873 sector_t p_size = mdev->p_size; /* partner's disk size. */
874 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
875 sector_t m_size; /* my size */
878 m_size = drbd_get_max_capacity(bdev);
880 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
881 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
885 if (p_size && m_size) {
886 size = min_t(sector_t, p_size, m_size);
890 if (m_size && m_size < size)
892 if (p_size && p_size < size)
903 dev_err(DEV, "Both nodes diskless!\n");
907 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
908 (unsigned long)u_size>>1, (unsigned long)size>>1);
917 * drbd_check_al_size() - Ensures that the AL is of the right size
918 * @mdev: DRBD device.
920 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
921 * failed, and 0 on success. You should call drbd_md_sync() after you called
924 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
926 struct lru_cache *n, *t;
927 struct lc_element *e;
931 if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
932 dc->al_extents = DRBD_AL_EXTENTS_MIN;
935 mdev->act_log->nr_elements == dc->al_extents)
940 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
941 dc->al_extents, sizeof(struct lc_element), 0);
944 dev_err(DEV, "Cannot allocate act_log lru!\n");
947 spin_lock_irq(&mdev->al_lock);
949 for (i = 0; i < t->nr_elements; i++) {
950 e = lc_element_by_index(t, i);
952 dev_err(DEV, "refcnt(%d)==%d\n",
953 e->lc_number, e->refcnt);
959 spin_unlock_irq(&mdev->al_lock);
961 dev_err(DEV, "Activity log still in use!\n");
968 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
972 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
974 struct request_queue * const q = mdev->rq_queue;
975 int max_hw_sectors = max_bio_size >> 9;
976 int max_segments = 0;
978 if (get_ldev_if_state(mdev, D_ATTACHING)) {
979 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
981 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
982 max_segments = mdev->ldev->dc.max_bio_bvecs;
986 blk_queue_logical_block_size(q, 512);
987 blk_queue_max_hw_sectors(q, max_hw_sectors);
988 /* This is the workaround for "bio would need to, but cannot, be split" */
989 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
990 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
992 if (get_ldev_if_state(mdev, D_ATTACHING)) {
993 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
995 blk_queue_stack_limits(q, b);
997 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
998 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
999 q->backing_dev_info.ra_pages,
1000 b->backing_dev_info.ra_pages);
1001 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1007 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1009 int now, new, local, peer;
1011 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1012 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1013 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1015 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1016 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1017 mdev->local_max_bio_size = local;
1021 /* We may ignore peer limits if the peer is modern enough.
1022 Because new from 8.3.8 onwards the peer can use multiple
1023 BIOs for a single peer_request */
1024 if (mdev->state.conn >= C_CONNECTED) {
1025 if (mdev->tconn->agreed_pro_version < 94)
1026 peer = mdev->peer_max_bio_size;
1027 else if (mdev->tconn->agreed_pro_version == 94)
1028 peer = DRBD_MAX_SIZE_H80_PACKET;
1029 else /* drbd 8.3.8 onwards */
1030 peer = DRBD_MAX_BIO_SIZE;
1033 new = min_t(int, local, peer);
1035 if (mdev->state.role == R_PRIMARY && new < now)
1036 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1039 dev_info(DEV, "max BIO size = %u\n", new);
1041 drbd_setup_queue_param(mdev, new);
1044 /* Starts the worker thread */
1045 static void conn_reconfig_start(struct drbd_tconn *tconn)
1047 drbd_thread_start(&tconn->worker);
1048 conn_flush_workqueue(tconn);
1051 /* if still unconfigured, stops worker again. */
1052 static void conn_reconfig_done(struct drbd_tconn *tconn)
1055 spin_lock_irq(&tconn->req_lock);
1056 stop_threads = conn_all_vols_unconf(tconn);
1057 spin_unlock_irq(&tconn->req_lock);
1059 /* asender is implicitly stopped by receiver
1060 * in drbd_disconnect() */
1061 drbd_thread_stop(&tconn->receiver);
1062 drbd_thread_stop(&tconn->worker);
1066 /* Make sure IO is suspended before calling this function(). */
1067 static void drbd_suspend_al(struct drbd_conf *mdev)
1071 if (!lc_try_lock(mdev->act_log)) {
1072 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1076 drbd_al_shrink(mdev);
1077 spin_lock_irq(&mdev->tconn->req_lock);
1078 if (mdev->state.conn < C_CONNECTED)
1079 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1080 spin_unlock_irq(&mdev->tconn->req_lock);
1081 lc_unlock(mdev->act_log);
1084 dev_info(DEV, "Suspended AL updates\n");
1088 static bool should_set_defaults(struct genl_info *info)
1090 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1091 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1094 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1096 enum drbd_ret_code retcode;
1097 struct drbd_conf *mdev;
1098 struct disk_conf *new_disk_conf;
1100 int *rs_plan_s = NULL;
1102 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1103 if (!adm_ctx.reply_skb)
1105 if (retcode != NO_ERROR)
1108 mdev = adm_ctx.mdev;
1110 /* we also need a disk
1111 * to change the options on */
1112 if (!get_ldev(mdev)) {
1113 retcode = ERR_NO_DISK;
1117 /* FIXME freeze IO, cluster wide.
1119 * We should make sure no-one uses
1120 * some half-updated struct when we
1121 * assign it later. */
1123 new_disk_conf = kmalloc(sizeof(*new_disk_conf), GFP_KERNEL);
1124 if (!new_disk_conf) {
1125 retcode = ERR_NOMEM;
1129 memcpy(new_disk_conf, &mdev->ldev->dc, sizeof(*new_disk_conf));
1130 if (should_set_defaults(info))
1131 set_disk_conf_defaults(new_disk_conf);
1133 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1135 retcode = ERR_MANDATORY_TAG;
1136 drbd_msg_put_info(from_attrs_err_to_txt(err));
1139 if (!expect(new_disk_conf->resync_rate >= 1))
1140 new_disk_conf->resync_rate = 1;
1142 /* clip to allowed range */
1143 if (!expect(new_disk_conf->al_extents >= DRBD_AL_EXTENTS_MIN))
1144 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MIN;
1145 if (!expect(new_disk_conf->al_extents <= DRBD_AL_EXTENTS_MAX))
1146 new_disk_conf->al_extents = DRBD_AL_EXTENTS_MAX;
1148 /* most sanity checks done, try to assign the new sync-after
1149 * dependency. need to hold the global lock in there,
1150 * to avoid a race in the dependency loop check. */
1151 retcode = drbd_alter_sa(mdev, new_disk_conf->resync_after);
1152 if (retcode != NO_ERROR)
1155 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1156 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1157 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1159 dev_err(DEV, "kmalloc of fifo_buffer failed");
1160 retcode = ERR_NOMEM;
1165 if (fifo_size != mdev->rs_plan_s.size) {
1166 kfree(mdev->rs_plan_s.values);
1167 mdev->rs_plan_s.values = rs_plan_s;
1168 mdev->rs_plan_s.size = fifo_size;
1169 mdev->rs_planed = 0;
1173 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1174 drbd_al_shrink(mdev);
1175 err = drbd_check_al_size(mdev, new_disk_conf);
1176 lc_unlock(mdev->act_log);
1177 wake_up(&mdev->al_wait);
1180 retcode = ERR_NOMEM;
1185 * To avoid someone looking at a half-updated struct, we probably
1186 * should have a rw-semaphor on net_conf and disk_conf.
1188 mdev->ldev->dc = *new_disk_conf;
1193 if (mdev->state.conn >= C_CONNECTED)
1194 drbd_send_sync_param(mdev);
1198 kfree(new_disk_conf);
1201 drbd_adm_finish(info, retcode);
1205 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1207 struct drbd_conf *mdev;
1209 enum drbd_ret_code retcode;
1210 enum determine_dev_size dd;
1211 sector_t max_possible_sectors;
1212 sector_t min_md_device_sectors;
1213 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1214 struct block_device *bdev;
1215 struct lru_cache *resync_lru = NULL;
1216 union drbd_state ns, os;
1217 enum drbd_state_rv rv;
1218 struct net_conf *nc;
1219 int cp_discovered = 0;
1221 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1222 if (!adm_ctx.reply_skb)
1224 if (retcode != NO_ERROR)
1227 mdev = adm_ctx.mdev;
1228 conn_reconfig_start(mdev->tconn);
1230 /* if you want to reconfigure, please tear down first */
1231 if (mdev->state.disk > D_DISKLESS) {
1232 retcode = ERR_DISK_CONFIGURED;
1235 /* It may just now have detached because of IO error. Make sure
1236 * drbd_ldev_destroy is done already, we may end up here very fast,
1237 * e.g. if someone calls attach from the on-io-error handler,
1238 * to realize a "hot spare" feature (not that I'd recommend that) */
1239 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1241 /* allocation not in the IO path, drbdsetup context */
1242 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1244 retcode = ERR_NOMEM;
1248 set_disk_conf_defaults(&nbc->dc);
1250 err = disk_conf_from_attrs(&nbc->dc, info);
1252 retcode = ERR_MANDATORY_TAG;
1253 drbd_msg_put_info(from_attrs_err_to_txt(err));
1257 if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1258 retcode = ERR_MD_IDX_INVALID;
1263 nc = rcu_dereference(mdev->tconn->net_conf);
1265 if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1267 retcode = ERR_STONITH_AND_PROT_A;
1273 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1274 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1276 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1278 retcode = ERR_OPEN_DISK;
1281 nbc->backing_bdev = bdev;
1284 * meta_dev_idx >= 0: external fixed size, possibly multiple
1285 * drbd sharing one meta device. TODO in that case, paranoia
1286 * check that [md_bdev, meta_dev_idx] is not yet used by some
1287 * other drbd minor! (if you use drbd.conf + drbdadm, that
1288 * should check it for you already; but if you don't, or
1289 * someone fooled it, we need to double check here)
1291 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1292 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1293 ((int)nbc->dc.meta_dev_idx < 0) ?
1294 (void *)mdev : (void *)drbd_m_holder);
1296 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1298 retcode = ERR_OPEN_MD_DISK;
1301 nbc->md_bdev = bdev;
1303 if ((nbc->backing_bdev == nbc->md_bdev) !=
1304 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1305 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1306 retcode = ERR_MD_IDX_INVALID;
1310 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1311 1, 61, sizeof(struct bm_extent),
1312 offsetof(struct bm_extent, lce));
1314 retcode = ERR_NOMEM;
1318 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1319 drbd_md_set_sector_offsets(mdev, nbc);
1321 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1322 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1323 (unsigned long long) drbd_get_max_capacity(nbc),
1324 (unsigned long long) nbc->dc.disk_size);
1325 retcode = ERR_DISK_TO_SMALL;
1329 if ((int)nbc->dc.meta_dev_idx < 0) {
1330 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1331 /* at least one MB, otherwise it does not make sense */
1332 min_md_device_sectors = (2<<10);
1334 max_possible_sectors = DRBD_MAX_SECTORS;
1335 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1338 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1339 retcode = ERR_MD_DISK_TO_SMALL;
1340 dev_warn(DEV, "refusing attach: md-device too small, "
1341 "at least %llu sectors needed for this meta-disk type\n",
1342 (unsigned long long) min_md_device_sectors);
1346 /* Make sure the new disk is big enough
1347 * (we may currently be R_PRIMARY with no local disk...) */
1348 if (drbd_get_max_capacity(nbc) <
1349 drbd_get_capacity(mdev->this_bdev)) {
1350 retcode = ERR_DISK_TO_SMALL;
1354 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1356 if (nbc->known_size > max_possible_sectors) {
1357 dev_warn(DEV, "==> truncating very big lower level device "
1358 "to currently maximum possible %llu sectors <==\n",
1359 (unsigned long long) max_possible_sectors);
1360 if ((int)nbc->dc.meta_dev_idx >= 0)
1361 dev_warn(DEV, "==>> using internal or flexible "
1362 "meta data may help <<==\n");
1365 drbd_suspend_io(mdev);
1366 /* also wait for the last barrier ack. */
1367 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1368 /* and for any other previously queued work */
1369 drbd_flush_workqueue(mdev);
1371 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1372 retcode = rv; /* FIXME: Type mismatch. */
1373 drbd_resume_io(mdev);
1374 if (rv < SS_SUCCESS)
1377 if (!get_ldev_if_state(mdev, D_ATTACHING))
1378 goto force_diskless;
1380 drbd_md_set_sector_offsets(mdev, nbc);
1382 if (!mdev->bitmap) {
1383 if (drbd_bm_init(mdev)) {
1384 retcode = ERR_NOMEM;
1385 goto force_diskless_dec;
1389 retcode = drbd_md_read(mdev, nbc);
1390 if (retcode != NO_ERROR)
1391 goto force_diskless_dec;
1393 if (mdev->state.conn < C_CONNECTED &&
1394 mdev->state.role == R_PRIMARY &&
1395 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1396 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1397 (unsigned long long)mdev->ed_uuid);
1398 retcode = ERR_DATA_NOT_CURRENT;
1399 goto force_diskless_dec;
1402 /* Since we are diskless, fix the activity log first... */
1403 if (drbd_check_al_size(mdev, &nbc->dc)) {
1404 retcode = ERR_NOMEM;
1405 goto force_diskless_dec;
1408 /* Prevent shrinking of consistent devices ! */
1409 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1410 drbd_new_dev_size(mdev, nbc, nbc->dc.disk_size, 0) < nbc->md.la_size_sect) {
1411 dev_warn(DEV, "refusing to truncate a consistent device\n");
1412 retcode = ERR_DISK_TO_SMALL;
1413 goto force_diskless_dec;
1416 if (!drbd_al_read_log(mdev, nbc)) {
1417 retcode = ERR_IO_MD_DISK;
1418 goto force_diskless_dec;
1421 /* Reset the "barriers don't work" bits here, then force meta data to
1422 * be written, to ensure we determine if barriers are supported. */
1423 if (nbc->dc.no_md_flush)
1424 set_bit(MD_NO_FUA, &mdev->flags);
1426 clear_bit(MD_NO_FUA, &mdev->flags);
1428 /* Point of no return reached.
1429 * Devices and memory are no longer released by error cleanup below.
1430 * now mdev takes over responsibility, and the state engine should
1431 * clean it up somewhere. */
1432 D_ASSERT(mdev->ldev == NULL);
1434 mdev->resync = resync_lru;
1438 mdev->write_ordering = WO_bdev_flush;
1439 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1441 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1442 set_bit(CRASHED_PRIMARY, &mdev->flags);
1444 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1446 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1447 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1448 set_bit(CRASHED_PRIMARY, &mdev->flags);
1457 drbd_reconsider_max_bio_size(mdev);
1459 /* If I am currently not R_PRIMARY,
1460 * but meta data primary indicator is set,
1461 * I just now recover from a hard crash,
1462 * and have been R_PRIMARY before that crash.
1464 * Now, if I had no connection before that crash
1465 * (have been degraded R_PRIMARY), chances are that
1466 * I won't find my peer now either.
1468 * In that case, and _only_ in that case,
1469 * we use the degr-wfc-timeout instead of the default,
1470 * so we can automatically recover from a crash of a
1471 * degraded but active "cluster" after a certain timeout.
1473 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1474 if (mdev->state.role != R_PRIMARY &&
1475 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1476 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1477 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1479 dd = drbd_determine_dev_size(mdev, 0);
1480 if (dd == dev_size_error) {
1481 retcode = ERR_NOMEM_BITMAP;
1482 goto force_diskless_dec;
1483 } else if (dd == grew)
1484 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1486 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1487 dev_info(DEV, "Assuming that all blocks are out of sync "
1488 "(aka FullSync)\n");
1489 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1490 "set_n_write from attaching", BM_LOCKED_MASK)) {
1491 retcode = ERR_IO_MD_DISK;
1492 goto force_diskless_dec;
1495 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1496 "read from attaching", BM_LOCKED_MASK)) {
1497 retcode = ERR_IO_MD_DISK;
1498 goto force_diskless_dec;
1502 if (cp_discovered) {
1503 drbd_al_apply_to_bm(mdev);
1504 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1505 "crashed primary apply AL", BM_LOCKED_MASK)) {
1506 retcode = ERR_IO_MD_DISK;
1507 goto force_diskless_dec;
1511 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1512 drbd_suspend_al(mdev); /* IO is still suspended here... */
1514 spin_lock_irq(&mdev->tconn->req_lock);
1515 os = drbd_read_state(mdev);
1517 /* If MDF_CONSISTENT is not set go into inconsistent state,
1518 otherwise investigate MDF_WasUpToDate...
1519 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1520 otherwise into D_CONSISTENT state.
1522 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1523 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1524 ns.disk = D_CONSISTENT;
1526 ns.disk = D_OUTDATED;
1528 ns.disk = D_INCONSISTENT;
1531 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1532 ns.pdsk = D_OUTDATED;
1534 if ( ns.disk == D_CONSISTENT &&
1535 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1536 ns.disk = D_UP_TO_DATE;
1538 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1539 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1540 this point, because drbd_request_state() modifies these
1543 /* In case we are C_CONNECTED postpone any decision on the new disk
1544 state after the negotiation phase. */
1545 if (mdev->state.conn == C_CONNECTED) {
1546 mdev->new_state_tmp.i = ns.i;
1548 ns.disk = D_NEGOTIATING;
1550 /* We expect to receive up-to-date UUIDs soon.
1551 To avoid a race in receive_state, free p_uuid while
1552 holding req_lock. I.e. atomic with the state change */
1553 kfree(mdev->p_uuid);
1554 mdev->p_uuid = NULL;
1557 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1558 spin_unlock_irq(&mdev->tconn->req_lock);
1560 if (rv < SS_SUCCESS)
1561 goto force_diskless_dec;
1563 if (mdev->state.role == R_PRIMARY)
1564 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1566 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1568 drbd_md_mark_dirty(mdev);
1571 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1573 conn_reconfig_done(mdev->tconn);
1574 drbd_adm_finish(info, retcode);
1580 drbd_force_state(mdev, NS(disk, D_FAILED));
1583 conn_reconfig_done(mdev->tconn);
1585 if (nbc->backing_bdev)
1586 blkdev_put(nbc->backing_bdev,
1587 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1589 blkdev_put(nbc->md_bdev,
1590 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1593 lc_destroy(resync_lru);
1596 drbd_adm_finish(info, retcode);
1600 static int adm_detach(struct drbd_conf *mdev)
1602 enum drbd_state_rv retcode;
1604 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1605 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1606 /* D_FAILED will transition to DISKLESS. */
1607 ret = wait_event_interruptible(mdev->misc_wait,
1608 mdev->state.disk != D_FAILED);
1609 drbd_resume_io(mdev);
1610 if ((int)retcode == (int)SS_IS_DISKLESS)
1611 retcode = SS_NOTHING_TO_DO;
1617 /* Detaching the disk is a process in multiple stages. First we need to lock
1618 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1619 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1620 * internal references as well.
1621 * Only then we have finally detached. */
1622 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1624 enum drbd_ret_code retcode;
1626 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1627 if (!adm_ctx.reply_skb)
1629 if (retcode != NO_ERROR)
1632 retcode = adm_detach(adm_ctx.mdev);
1634 drbd_adm_finish(info, retcode);
1638 static bool conn_resync_running(struct drbd_tconn *tconn)
1640 struct drbd_conf *mdev;
1645 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1646 if (mdev->state.conn == C_SYNC_SOURCE ||
1647 mdev->state.conn == C_SYNC_TARGET ||
1648 mdev->state.conn == C_PAUSED_SYNC_S ||
1649 mdev->state.conn == C_PAUSED_SYNC_T) {
1659 static bool conn_ov_running(struct drbd_tconn *tconn)
1661 struct drbd_conf *mdev;
1666 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1667 if (mdev->state.conn == C_VERIFY_S ||
1668 mdev->state.conn == C_VERIFY_T) {
1678 static enum drbd_ret_code
1679 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1681 struct drbd_conf *mdev;
1684 if (old_conf && tconn->agreed_pro_version < 100 &&
1685 tconn->cstate == C_WF_REPORT_PARAMS &&
1686 new_conf->wire_protocol != old_conf->wire_protocol)
1687 return ERR_NEED_APV_100;
1689 if (new_conf->two_primaries &&
1690 (new_conf->wire_protocol != DRBD_PROT_C))
1691 return ERR_NOT_PROTO_C;
1693 idr_for_each_entry(&tconn->volumes, mdev, i) {
1694 if (get_ldev(mdev)) {
1695 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1697 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1698 return ERR_STONITH_AND_PROT_A;
1700 if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
1704 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1705 return ERR_CONG_NOT_PROTO_A;
1710 static enum drbd_ret_code
1711 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1713 static enum drbd_ret_code rv;
1714 struct drbd_conf *mdev;
1718 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1721 /* tconn->volumes protected by genl_lock() here */
1722 idr_for_each_entry(&tconn->volumes, mdev, i) {
1723 if (!mdev->bitmap) {
1724 if(drbd_bm_init(mdev))
1733 struct crypto_hash *verify_tfm;
1734 struct crypto_hash *csums_tfm;
1735 struct crypto_hash *cram_hmac_tfm;
1736 struct crypto_hash *integrity_tfm;
1742 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1747 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1756 static enum drbd_ret_code
1757 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1759 char hmac_name[CRYPTO_MAX_ALG_NAME];
1760 enum drbd_ret_code rv;
1763 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1767 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1771 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1775 if (new_conf->cram_hmac_alg[0] != 0) {
1776 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1777 new_conf->cram_hmac_alg);
1779 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1782 if (crypto->integrity_tfm) {
1783 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1784 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1785 if (!crypto->int_dig_in)
1787 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1788 if (!crypto->int_dig_vv)
1795 static void free_crypto(struct crypto *crypto)
1797 kfree(crypto->int_dig_in);
1798 kfree(crypto->int_dig_vv);
1799 crypto_free_hash(crypto->cram_hmac_tfm);
1800 crypto_free_hash(crypto->integrity_tfm);
1801 crypto_free_hash(crypto->csums_tfm);
1802 crypto_free_hash(crypto->verify_tfm);
1805 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1807 enum drbd_ret_code retcode;
1808 struct drbd_tconn *tconn;
1809 struct net_conf *old_conf, *new_conf = NULL;
1811 int ovr; /* online verify running */
1812 int rsr; /* re-sync running */
1813 struct crypto crypto = { };
1814 bool change_integrity_alg;
1816 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1817 if (!adm_ctx.reply_skb)
1819 if (retcode != NO_ERROR)
1822 tconn = adm_ctx.tconn;
1824 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1826 retcode = ERR_NOMEM;
1830 conn_reconfig_start(tconn);
1832 mutex_lock(&tconn->data.mutex);
1833 mutex_lock(&tconn->conf_update);
1834 old_conf = tconn->net_conf;
1837 drbd_msg_put_info("net conf missing, try connect");
1838 retcode = ERR_INVALID_REQUEST;
1842 *new_conf = *old_conf;
1843 if (should_set_defaults(info))
1844 set_net_conf_defaults(new_conf);
1846 err = net_conf_from_attrs_for_change(new_conf, info);
1848 retcode = ERR_MANDATORY_TAG;
1849 drbd_msg_put_info(from_attrs_err_to_txt(err));
1853 retcode = check_net_options(tconn, new_conf);
1854 if (retcode != NO_ERROR)
1857 /* re-sync running */
1858 rsr = conn_resync_running(tconn);
1859 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1860 retcode = ERR_CSUMS_RESYNC_RUNNING;
1864 /* online verify running */
1865 ovr = conn_ov_running(tconn);
1866 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1867 retcode = ERR_VERIFY_RUNNING;
1871 change_integrity_alg = strcmp(old_conf->integrity_alg,
1872 new_conf->integrity_alg);
1874 retcode = alloc_crypto(&crypto, new_conf);
1875 if (retcode != NO_ERROR)
1878 rcu_assign_pointer(tconn->net_conf, new_conf);
1881 crypto_free_hash(tconn->csums_tfm);
1882 tconn->csums_tfm = crypto.csums_tfm;
1883 crypto.csums_tfm = NULL;
1886 crypto_free_hash(tconn->verify_tfm);
1887 tconn->verify_tfm = crypto.verify_tfm;
1888 crypto.verify_tfm = NULL;
1891 kfree(tconn->int_dig_in);
1892 tconn->int_dig_in = crypto.int_dig_in;
1893 kfree(tconn->int_dig_vv);
1894 tconn->int_dig_vv = crypto.int_dig_vv;
1895 crypto_free_hash(tconn->integrity_tfm);
1896 tconn->integrity_tfm = crypto.integrity_tfm;
1897 if (change_integrity_alg) {
1898 /* Do this without trying to take tconn->data.mutex again. */
1899 if (__drbd_send_protocol(tconn))
1903 /* FIXME Changing cram_hmac while the connection is established is useless */
1904 crypto_free_hash(tconn->cram_hmac_tfm);
1905 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1907 mutex_unlock(&tconn->conf_update);
1908 mutex_unlock(&tconn->data.mutex);
1912 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1913 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1918 mutex_unlock(&tconn->conf_update);
1919 mutex_unlock(&tconn->data.mutex);
1920 free_crypto(&crypto);
1923 conn_reconfig_done(tconn);
1925 drbd_adm_finish(info, retcode);
1929 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1931 struct drbd_conf *mdev;
1932 struct net_conf *old_conf, *new_conf = NULL;
1933 struct crypto crypto = { };
1934 struct drbd_tconn *oconn;
1935 struct drbd_tconn *tconn;
1936 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1937 enum drbd_ret_code retcode;
1941 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1942 if (!adm_ctx.reply_skb)
1944 if (retcode != NO_ERROR)
1947 tconn = adm_ctx.tconn;
1948 conn_reconfig_start(tconn);
1950 if (tconn->cstate > C_STANDALONE) {
1951 retcode = ERR_NET_CONFIGURED;
1955 /* allocation not in the IO path, cqueue thread context */
1956 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
1958 retcode = ERR_NOMEM;
1962 set_net_conf_defaults(new_conf);
1964 err = net_conf_from_attrs(new_conf, info);
1966 retcode = ERR_MANDATORY_TAG;
1967 drbd_msg_put_info(from_attrs_err_to_txt(err));
1971 retcode = check_net_options(tconn, new_conf);
1972 if (retcode != NO_ERROR)
1977 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1978 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1980 /* No need to take drbd_cfg_rwsem here. All reconfiguration is
1981 * strictly serialized on genl_lock(). We are protected against
1982 * concurrent reconfiguration/addition/deletion */
1983 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1984 struct net_conf *nc;
1989 nc = rcu_dereference(oconn->net_conf);
1991 taken_addr = (struct sockaddr *)&nc->my_addr;
1992 if (new_conf->my_addr_len == nc->my_addr_len &&
1993 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1994 retcode = ERR_LOCAL_ADDR;
1996 taken_addr = (struct sockaddr *)&nc->peer_addr;
1997 if (new_conf->peer_addr_len == nc->peer_addr_len &&
1998 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1999 retcode = ERR_PEER_ADDR;
2002 if (retcode != NO_ERROR)
2006 retcode = alloc_crypto(&crypto, new_conf);
2007 if (retcode != NO_ERROR)
2010 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2012 conn_flush_workqueue(tconn);
2014 mutex_lock(&tconn->conf_update);
2015 old_conf = tconn->net_conf;
2017 retcode = ERR_NET_CONFIGURED;
2018 mutex_unlock(&tconn->conf_update);
2021 rcu_assign_pointer(tconn->net_conf, new_conf);
2023 conn_free_crypto(tconn);
2024 tconn->int_dig_in = crypto.int_dig_in;
2025 tconn->int_dig_vv = crypto.int_dig_vv;
2026 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2027 tconn->integrity_tfm = crypto.integrity_tfm;
2028 tconn->csums_tfm = crypto.csums_tfm;
2029 tconn->verify_tfm = crypto.verify_tfm;
2031 mutex_unlock(&tconn->conf_update);
2034 idr_for_each_entry(&tconn->volumes, mdev, i) {
2040 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2042 conn_reconfig_done(tconn);
2043 drbd_adm_finish(info, retcode);
2047 free_crypto(&crypto);
2050 conn_reconfig_done(tconn);
2052 drbd_adm_finish(info, retcode);
2056 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2058 enum drbd_state_rv rv;
2060 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2061 force ? CS_HARD : 0);
2064 case SS_NOTHING_TO_DO:
2066 case SS_ALREADY_STANDALONE:
2068 case SS_PRIMARY_NOP:
2069 /* Our state checking code wants to see the peer outdated. */
2070 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2071 pdsk, D_OUTDATED), CS_VERBOSE);
2073 case SS_CW_FAILED_BY_PEER:
2074 /* The peer probably wants to see us outdated. */
2075 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2076 disk, D_OUTDATED), 0);
2077 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2078 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2083 /* no special handling necessary */
2086 if (rv >= SS_SUCCESS) {
2087 enum drbd_state_rv rv2;
2088 /* No one else can reconfigure the network while I am here.
2089 * The state handling only uses drbd_thread_stop_nowait(),
2090 * we want to really wait here until the receiver is no more.
2092 drbd_thread_stop(&adm_ctx.tconn->receiver);
2094 /* Race breaker. This additional state change request may be
2095 * necessary, if this was a forced disconnect during a receiver
2096 * restart. We may have "killed" the receiver thread just
2097 * after drbdd_init() returned. Typically, we should be
2098 * C_STANDALONE already, now, and this becomes a no-op.
2100 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2101 CS_VERBOSE | CS_HARD);
2102 if (rv2 < SS_SUCCESS)
2104 "unexpected rv2=%d in conn_try_disconnect()\n",
2110 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2112 struct disconnect_parms parms;
2113 struct drbd_tconn *tconn;
2114 enum drbd_state_rv rv;
2115 enum drbd_ret_code retcode;
2118 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2119 if (!adm_ctx.reply_skb)
2121 if (retcode != NO_ERROR)
2124 tconn = adm_ctx.tconn;
2125 memset(&parms, 0, sizeof(parms));
2126 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2127 err = disconnect_parms_from_attrs(&parms, info);
2129 retcode = ERR_MANDATORY_TAG;
2130 drbd_msg_put_info(from_attrs_err_to_txt(err));
2135 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2136 if (rv < SS_SUCCESS)
2137 retcode = rv; /* FIXME: Type mismatch. */
2141 drbd_adm_finish(info, retcode);
2145 void resync_after_online_grow(struct drbd_conf *mdev)
2147 int iass; /* I am sync source */
2149 dev_info(DEV, "Resync of new storage after online grow\n");
2150 if (mdev->state.role != mdev->state.peer)
2151 iass = (mdev->state.role == R_PRIMARY);
2153 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2156 drbd_start_resync(mdev, C_SYNC_SOURCE);
2158 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2161 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2163 struct resize_parms rs;
2164 struct drbd_conf *mdev;
2165 enum drbd_ret_code retcode;
2166 enum determine_dev_size dd;
2167 enum dds_flags ddsf;
2170 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2171 if (!adm_ctx.reply_skb)
2173 if (retcode != NO_ERROR)
2176 memset(&rs, 0, sizeof(struct resize_parms));
2177 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2178 err = resize_parms_from_attrs(&rs, info);
2180 retcode = ERR_MANDATORY_TAG;
2181 drbd_msg_put_info(from_attrs_err_to_txt(err));
2186 mdev = adm_ctx.mdev;
2187 if (mdev->state.conn > C_CONNECTED) {
2188 retcode = ERR_RESIZE_RESYNC;
2192 if (mdev->state.role == R_SECONDARY &&
2193 mdev->state.peer == R_SECONDARY) {
2194 retcode = ERR_NO_PRIMARY;
2198 if (!get_ldev(mdev)) {
2199 retcode = ERR_NO_DISK;
2203 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2204 retcode = ERR_NEED_APV_93;
2208 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2209 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2211 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2212 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2213 dd = drbd_determine_dev_size(mdev, ddsf);
2216 if (dd == dev_size_error) {
2217 retcode = ERR_NOMEM_BITMAP;
2221 if (mdev->state.conn == C_CONNECTED) {
2223 set_bit(RESIZE_PENDING, &mdev->flags);
2225 drbd_send_uuids(mdev);
2226 drbd_send_sizes(mdev, 1, ddsf);
2230 drbd_adm_finish(info, retcode);
2234 void drbd_set_res_opts_defaults(struct res_opts *r)
2236 return set_res_opts_defaults(r);
2239 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2241 enum drbd_ret_code retcode;
2242 cpumask_var_t new_cpu_mask;
2243 struct drbd_tconn *tconn;
2244 struct res_opts res_opts;
2247 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2248 if (!adm_ctx.reply_skb)
2250 if (retcode != NO_ERROR)
2252 tconn = adm_ctx.tconn;
2254 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2255 retcode = ERR_NOMEM;
2256 drbd_msg_put_info("unable to allocate cpumask");
2260 res_opts = tconn->res_opts;
2261 if (should_set_defaults(info))
2262 set_res_opts_defaults(&res_opts);
2264 err = res_opts_from_attrs(&res_opts, info);
2266 retcode = ERR_MANDATORY_TAG;
2267 drbd_msg_put_info(from_attrs_err_to_txt(err));
2271 /* silently ignore cpu mask on UP kernel */
2272 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2273 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2274 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2276 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2277 retcode = ERR_CPU_MASK_PARSE;
2283 tconn->res_opts = res_opts;
2285 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2286 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2287 drbd_calc_cpu_mask(tconn);
2288 tconn->receiver.reset_cpu_mask = 1;
2289 tconn->asender.reset_cpu_mask = 1;
2290 tconn->worker.reset_cpu_mask = 1;
2294 free_cpumask_var(new_cpu_mask);
2296 drbd_adm_finish(info, retcode);
2300 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2302 struct drbd_conf *mdev;
2303 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2305 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2306 if (!adm_ctx.reply_skb)
2308 if (retcode != NO_ERROR)
2311 mdev = adm_ctx.mdev;
2313 /* If there is still bitmap IO pending, probably because of a previous
2314 * resync just being finished, wait for it before requesting a new resync. */
2315 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2317 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2319 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2320 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2322 while (retcode == SS_NEED_CONNECTION) {
2323 spin_lock_irq(&mdev->tconn->req_lock);
2324 if (mdev->state.conn < C_CONNECTED)
2325 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2326 spin_unlock_irq(&mdev->tconn->req_lock);
2328 if (retcode != SS_NEED_CONNECTION)
2331 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2335 drbd_adm_finish(info, retcode);
2339 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2343 rv = drbd_bmio_set_n_write(mdev);
2344 drbd_suspend_al(mdev);
2348 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2349 union drbd_state mask, union drbd_state val)
2351 enum drbd_ret_code retcode;
2353 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2354 if (!adm_ctx.reply_skb)
2356 if (retcode != NO_ERROR)
2359 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2361 drbd_adm_finish(info, retcode);
2365 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2367 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2370 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2372 enum drbd_ret_code retcode;
2374 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2375 if (!adm_ctx.reply_skb)
2377 if (retcode != NO_ERROR)
2380 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2381 retcode = ERR_PAUSE_IS_SET;
2383 drbd_adm_finish(info, retcode);
2387 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2389 union drbd_dev_state s;
2390 enum drbd_ret_code retcode;
2392 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2393 if (!adm_ctx.reply_skb)
2395 if (retcode != NO_ERROR)
2398 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2399 s = adm_ctx.mdev->state;
2400 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2401 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2402 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2404 retcode = ERR_PAUSE_IS_CLEAR;
2409 drbd_adm_finish(info, retcode);
2413 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2415 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2418 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2420 struct drbd_conf *mdev;
2421 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2423 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2424 if (!adm_ctx.reply_skb)
2426 if (retcode != NO_ERROR)
2429 mdev = adm_ctx.mdev;
2430 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2431 drbd_uuid_new_current(mdev);
2432 clear_bit(NEW_CUR_UUID, &mdev->flags);
2434 drbd_suspend_io(mdev);
2435 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2436 if (retcode == SS_SUCCESS) {
2437 if (mdev->state.conn < C_CONNECTED)
2438 tl_clear(mdev->tconn);
2439 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2440 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2442 drbd_resume_io(mdev);
2445 drbd_adm_finish(info, retcode);
2449 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2451 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2454 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2457 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2459 goto nla_put_failure;
2460 if (vnr != VOLUME_UNSPECIFIED)
2461 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2462 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2463 nla_nest_end(skb, nla);
2468 nla_nest_cancel(skb, nla);
2472 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2473 const struct sib_info *sib)
2475 struct state_info *si = NULL; /* for sizeof(si->member); */
2476 struct net_conf *nc;
2480 int exclude_sensitive;
2482 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2483 * to. So we better exclude_sensitive information.
2485 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2486 * in the context of the requesting user process. Exclude sensitive
2487 * information, unless current has superuser.
2489 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2490 * relies on the current implementation of netlink_dump(), which
2491 * executes the dump callback successively from netlink_recvmsg(),
2492 * always in the context of the receiving process */
2493 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2495 got_ldev = get_ldev(mdev);
2497 /* We need to add connection name and volume number information still.
2498 * Minor number is in drbd_genlmsghdr. */
2499 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2500 goto nla_put_failure;
2502 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2503 goto nla_put_failure;
2506 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2507 goto nla_put_failure;
2510 nc = rcu_dereference(mdev->tconn->net_conf);
2512 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2515 goto nla_put_failure;
2517 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2519 goto nla_put_failure;
2520 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2521 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2522 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2523 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2526 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2527 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2528 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2529 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2530 if (C_SYNC_SOURCE <= mdev->state.conn &&
2531 C_PAUSED_SYNC_T >= mdev->state.conn) {
2532 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2533 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2538 switch(sib->sib_reason) {
2539 case SIB_SYNC_PROGRESS:
2540 case SIB_GET_STATUS_REPLY:
2542 case SIB_STATE_CHANGE:
2543 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2544 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2546 case SIB_HELPER_POST:
2548 T_helper_exit_code, sib->helper_exit_code);
2550 case SIB_HELPER_PRE:
2551 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2555 nla_nest_end(skb, nla);
2565 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2567 enum drbd_ret_code retcode;
2570 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2571 if (!adm_ctx.reply_skb)
2573 if (retcode != NO_ERROR)
2576 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2578 nlmsg_free(adm_ctx.reply_skb);
2582 drbd_adm_finish(info, retcode);
2586 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2588 struct drbd_conf *mdev;
2589 struct drbd_genlmsghdr *dh;
2590 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2591 struct drbd_tconn *tconn = NULL;
2592 struct drbd_tconn *tmp;
2593 unsigned volume = cb->args[1];
2595 /* Open coded, deferred, iteration:
2596 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2597 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2601 * where tconn is cb->args[0];
2602 * and i is cb->args[1];
2604 * cb->args[2] indicates if we shall loop over all resources,
2605 * or just dump all volumes of a single resource.
2607 * This may miss entries inserted after this dump started,
2608 * or entries deleted before they are reached.
2610 * We need to make sure the mdev won't disappear while
2611 * we are looking at it, and revalidate our iterators
2612 * on each iteration.
2615 /* synchronize with conn_create()/conn_destroy() */
2616 down_read(&drbd_cfg_rwsem);
2617 /* revalidate iterator position */
2618 list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2620 /* first iteration */
2632 mdev = idr_get_next(&tconn->volumes, &volume);
2634 /* No more volumes to dump on this tconn.
2635 * Advance tconn iterator. */
2636 pos = list_entry(tconn->all_tconn.next,
2637 struct drbd_tconn, all_tconn);
2638 /* Did we dump any volume on this tconn yet? */
2640 /* If we reached the end of the list,
2641 * or only a single resource dump was requested,
2643 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2651 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2652 cb->nlh->nlmsg_seq, &drbd_genl_family,
2653 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2658 /* this is a tconn without a single volume */
2660 dh->ret_code = NO_ERROR;
2661 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2662 genlmsg_cancel(skb, dh);
2664 genlmsg_end(skb, dh);
2668 D_ASSERT(mdev->vnr == volume);
2669 D_ASSERT(mdev->tconn == tconn);
2671 dh->minor = mdev_to_minor(mdev);
2672 dh->ret_code = NO_ERROR;
2674 if (nla_put_status_info(skb, mdev, NULL)) {
2675 genlmsg_cancel(skb, dh);
2678 genlmsg_end(skb, dh);
2682 up_read(&drbd_cfg_rwsem);
2683 /* where to start the next iteration */
2684 cb->args[0] = (long)pos;
2685 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2687 /* No more tconns/volumes/minors found results in an empty skb.
2688 * Which will terminate the dump. */
2693 * Request status of all resources, or of all volumes within a single resource.
2695 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2696 * Which means we cannot use the family->attrbuf or other such members, because
2697 * dump is NOT protected by the genl_lock(). During dump, we only have access
2698 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2700 * Once things are setup properly, we call into get_one_status().
2702 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2704 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2706 const char *conn_name;
2707 struct drbd_tconn *tconn;
2709 /* Is this a followup call? */
2711 /* ... of a single resource dump,
2712 * and the resource iterator has been advanced already? */
2713 if (cb->args[2] && cb->args[2] != cb->args[0])
2714 return 0; /* DONE. */
2718 /* First call (from netlink_dump_start). We need to figure out
2719 * which resource(s) the user wants us to dump. */
2720 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2721 nlmsg_attrlen(cb->nlh, hdrlen),
2722 DRBD_NLA_CFG_CONTEXT);
2724 /* No explicit context given. Dump all. */
2727 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2728 /* context given, but no name present? */
2731 conn_name = nla_data(nla);
2732 tconn = conn_get_by_name(conn_name);
2737 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2739 /* prime iterators, and set "filter" mode mark:
2740 * only dump this tconn. */
2741 cb->args[0] = (long)tconn;
2742 /* cb->args[1] = 0; passed in this way. */
2743 cb->args[2] = (long)tconn;
2746 return get_one_status(skb, cb);
2749 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2751 enum drbd_ret_code retcode;
2752 struct timeout_parms tp;
2755 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2756 if (!adm_ctx.reply_skb)
2758 if (retcode != NO_ERROR)
2762 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2763 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2766 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2768 nlmsg_free(adm_ctx.reply_skb);
2772 drbd_adm_finish(info, retcode);
2776 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2778 struct drbd_conf *mdev;
2779 enum drbd_ret_code retcode;
2781 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2782 if (!adm_ctx.reply_skb)
2784 if (retcode != NO_ERROR)
2787 mdev = adm_ctx.mdev;
2788 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2789 /* resume from last known position, if possible */
2790 struct start_ov_parms parms =
2791 { .ov_start_sector = mdev->ov_start_sector };
2792 int err = start_ov_parms_from_attrs(&parms, info);
2794 retcode = ERR_MANDATORY_TAG;
2795 drbd_msg_put_info(from_attrs_err_to_txt(err));
2798 /* w_make_ov_request expects position to be aligned */
2799 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2801 /* If there is still bitmap IO pending, e.g. previous resync or verify
2802 * just being finished, wait for it before requesting a new resync. */
2803 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2804 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2806 drbd_adm_finish(info, retcode);
2811 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2813 struct drbd_conf *mdev;
2814 enum drbd_ret_code retcode;
2815 int skip_initial_sync = 0;
2817 struct new_c_uuid_parms args;
2819 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2820 if (!adm_ctx.reply_skb)
2822 if (retcode != NO_ERROR)
2825 mdev = adm_ctx.mdev;
2826 memset(&args, 0, sizeof(args));
2827 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2828 err = new_c_uuid_parms_from_attrs(&args, info);
2830 retcode = ERR_MANDATORY_TAG;
2831 drbd_msg_put_info(from_attrs_err_to_txt(err));
2836 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2838 if (!get_ldev(mdev)) {
2839 retcode = ERR_NO_DISK;
2843 /* this is "skip initial sync", assume to be clean */
2844 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2845 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2846 dev_info(DEV, "Preparing to skip initial sync\n");
2847 skip_initial_sync = 1;
2848 } else if (mdev->state.conn != C_STANDALONE) {
2849 retcode = ERR_CONNECTED;
2853 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2854 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2856 if (args.clear_bm) {
2857 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2858 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2860 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2861 retcode = ERR_IO_MD_DISK;
2863 if (skip_initial_sync) {
2864 drbd_send_uuids_skip_initial_sync(mdev);
2865 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2866 drbd_print_uuids(mdev, "cleared bitmap UUID");
2867 spin_lock_irq(&mdev->tconn->req_lock);
2868 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2870 spin_unlock_irq(&mdev->tconn->req_lock);
2878 mutex_unlock(mdev->state_mutex);
2880 drbd_adm_finish(info, retcode);
2884 static enum drbd_ret_code
2885 drbd_check_conn_name(const char *name)
2887 if (!name || !name[0]) {
2888 drbd_msg_put_info("connection name missing");
2889 return ERR_MANDATORY_TAG;
2891 /* if we want to use these in sysfs/configfs/debugfs some day,
2892 * we must not allow slashes */
2893 if (strchr(name, '/')) {
2894 drbd_msg_put_info("invalid connection name");
2895 return ERR_INVALID_REQUEST;
2900 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2902 enum drbd_ret_code retcode;
2904 retcode = drbd_adm_prepare(skb, info, 0);
2905 if (!adm_ctx.reply_skb)
2907 if (retcode != NO_ERROR)
2910 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2911 if (retcode != NO_ERROR)
2914 if (adm_ctx.tconn) {
2915 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2916 retcode = ERR_INVALID_REQUEST;
2917 drbd_msg_put_info("connection exists");
2919 /* else: still NO_ERROR */
2923 if (!conn_create(adm_ctx.conn_name))
2924 retcode = ERR_NOMEM;
2926 drbd_adm_finish(info, retcode);
2930 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2932 struct drbd_genlmsghdr *dh = info->userhdr;
2933 enum drbd_ret_code retcode;
2935 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2936 if (!adm_ctx.reply_skb)
2938 if (retcode != NO_ERROR)
2941 /* FIXME drop minor_count parameter, limit to MINORMASK */
2942 if (dh->minor >= minor_count) {
2943 drbd_msg_put_info("requested minor out of range");
2944 retcode = ERR_INVALID_REQUEST;
2947 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
2948 drbd_msg_put_info("requested volume id out of range");
2949 retcode = ERR_INVALID_REQUEST;
2953 /* drbd_adm_prepare made sure already
2954 * that mdev->tconn and mdev->vnr match the request. */
2956 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2957 retcode = ERR_MINOR_EXISTS;
2958 /* else: still NO_ERROR */
2962 down_write(&drbd_cfg_rwsem);
2963 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2964 up_write(&drbd_cfg_rwsem);
2966 drbd_adm_finish(info, retcode);
2970 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2972 if (mdev->state.disk == D_DISKLESS &&
2973 /* no need to be mdev->state.conn == C_STANDALONE &&
2974 * we may want to delete a minor from a live replication group.
2976 mdev->state.role == R_SECONDARY) {
2977 drbd_delete_device(mdev);
2980 return ERR_MINOR_CONFIGURED;
2983 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2985 enum drbd_ret_code retcode;
2987 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2988 if (!adm_ctx.reply_skb)
2990 if (retcode != NO_ERROR)
2993 down_write(&drbd_cfg_rwsem);
2994 retcode = adm_delete_minor(adm_ctx.mdev);
2995 up_write(&drbd_cfg_rwsem);
2997 drbd_adm_finish(info, retcode);
3001 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3003 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3004 struct drbd_conf *mdev;
3007 retcode = drbd_adm_prepare(skb, info, 0);
3008 if (!adm_ctx.reply_skb)
3010 if (retcode != NO_ERROR)
3013 if (!adm_ctx.tconn) {
3014 retcode = ERR_CONN_NOT_KNOWN;
3018 down_read(&drbd_cfg_rwsem);
3020 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3021 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3022 if (retcode < SS_SUCCESS) {
3023 drbd_msg_put_info("failed to demote");
3027 up_read(&drbd_cfg_rwsem);
3029 /* disconnect; may stop the receiver;
3030 * must not hold the drbd_cfg_rwsem */
3031 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3032 if (retcode < SS_SUCCESS) {
3033 drbd_msg_put_info("failed to disconnect");
3037 down_read(&drbd_cfg_rwsem);
3039 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3040 retcode = adm_detach(mdev);
3041 if (retcode < SS_SUCCESS) {
3042 drbd_msg_put_info("failed to detach");
3046 up_read(&drbd_cfg_rwsem);
3048 /* If we reach this, all volumes (of this tconn) are Secondary,
3049 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3050 * actually stopped, state handling only does drbd_thread_stop_nowait().
3051 * This needs to be done without holding drbd_cfg_rwsem. */
3052 drbd_thread_stop(&adm_ctx.tconn->worker);
3054 /* Now, nothing can fail anymore */
3056 /* delete volumes */
3057 down_write(&drbd_cfg_rwsem);
3058 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3059 retcode = adm_delete_minor(mdev);
3060 if (retcode != NO_ERROR) {
3061 /* "can not happen" */
3062 drbd_msg_put_info("failed to delete volume");
3063 up_write(&drbd_cfg_rwsem);
3068 /* delete connection */
3069 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3070 list_del(&adm_ctx.tconn->all_tconn);
3071 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3075 /* "can not happen" */
3076 retcode = ERR_CONN_IN_USE;
3077 drbd_msg_put_info("failed to delete connection");
3079 up_write(&drbd_cfg_rwsem);
3082 up_read(&drbd_cfg_rwsem);
3084 drbd_adm_finish(info, retcode);
3088 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3090 enum drbd_ret_code retcode;
3092 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3093 if (!adm_ctx.reply_skb)
3095 if (retcode != NO_ERROR)
3098 down_write(&drbd_cfg_rwsem);
3099 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3100 list_del(&adm_ctx.tconn->all_tconn);
3101 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3105 retcode = ERR_CONN_IN_USE;
3107 up_write(&drbd_cfg_rwsem);
3109 if (retcode == NO_ERROR)
3110 drbd_thread_stop(&adm_ctx.tconn->worker);
3112 drbd_adm_finish(info, retcode);
3116 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3118 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3119 struct sk_buff *msg;
3120 struct drbd_genlmsghdr *d_out;
3124 seq = atomic_inc_return(&drbd_genl_seq);
3125 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3130 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3131 if (!d_out) /* cannot happen, but anyways. */
3132 goto nla_put_failure;
3133 d_out->minor = mdev_to_minor(mdev);
3134 d_out->ret_code = 0;
3136 if (nla_put_status_info(msg, mdev, sib))
3137 goto nla_put_failure;
3138 genlmsg_end(msg, d_out);
3139 err = drbd_genl_multicast_events(msg, 0);
3140 /* msg has been consumed or freed in netlink_broadcast() */
3141 if (err && err != -ESRCH)
3149 dev_err(DEV, "Error %d while broadcasting event. "
3150 "Event seq:%u sib_reason:%u\n",
3151 err, seq, sib->sib_reason);