4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
117 struct sk_buff *skb = adm_ctx.reply_skb;
121 if (!info || !info[0])
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 err = nla_put_string(skb, T_info_text, info);
130 nla_nest_cancel(skb, nla);
133 nla_nest_end(skb, nla);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb)
162 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163 info, &drbd_genl_family, 0, cmd);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
166 if (!adm_ctx.reply_dh)
169 adm_ctx.reply_dh->minor = d_in->minor;
170 adm_ctx.reply_dh->ret_code = NO_ERROR;
172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
174 /* parse and validate only */
175 err = drbd_cfg_context_from_attrs(NULL, info);
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err = nla_put_nohdr(adm_ctx.reply_skb,
182 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183 info->attrs[DRBD_NLA_CFG_CONTEXT]);
187 /* and assign stuff to the global adm_ctx */
188 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
192 adm_ctx.conn_name = nla_data(nla);
194 adm_ctx.volume = VOLUME_UNSPECIFIED;
196 adm_ctx.minor = d_in->minor;
197 adm_ctx.mdev = minor_to_mdev(d_in->minor);
198 adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
200 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID;
204 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST;
209 /* some more paranoia, if the request was over-determined */
210 if (adm_ctx.mdev && adm_ctx.tconn &&
211 adm_ctx.mdev->tconn != adm_ctx.tconn) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST;
218 adm_ctx.volume != VOLUME_UNSPECIFIED &&
219 adm_ctx.volume != adm_ctx.mdev->vnr) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx.minor, adm_ctx.volume,
222 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223 drbd_msg_put_info("minor exists as different volume");
224 return ERR_INVALID_REQUEST;
226 if (adm_ctx.mdev && !adm_ctx.tconn)
227 adm_ctx.tconn = adm_ctx.mdev->tconn;
231 nlmsg_free(adm_ctx.reply_skb);
232 adm_ctx.reply_skb = NULL;
236 static int drbd_adm_finish(struct genl_info *info, int retcode)
239 const char *conn_name = NULL;
241 if (!adm_ctx.reply_skb)
244 adm_ctx.reply_dh->ret_code = retcode;
246 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
248 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
250 conn_name = nla_data(nla);
253 drbd_adm_send_reply(adm_ctx.reply_skb, info);
257 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
261 if (get_net_conf(tconn)) {
262 switch (((struct sockaddr *)tconn->net_conf->peer_addr)->sa_family) {
265 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
266 &((struct sockaddr_in6 *)tconn->net_conf->peer_addr)->sin6_addr);
270 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
271 &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
275 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
276 &((struct sockaddr_in *)tconn->net_conf->peer_addr)->sin_addr);
278 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
283 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
285 char *envp[] = { "HOME=/",
287 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
288 (char[20]) { }, /* address family */
289 (char[60]) { }, /* address */
292 char *argv[] = {usermode_helper, cmd, mb, NULL };
296 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
297 setup_khelper_env(mdev->tconn, envp);
299 /* The helper may take some time.
300 * write out any unsynced meta data changes now */
303 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
304 sib.sib_reason = SIB_HELPER_PRE;
305 sib.helper_name = cmd;
306 drbd_bcast_event(mdev, &sib);
307 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
309 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
310 usermode_helper, cmd, mb,
311 (ret >> 8) & 0xff, ret);
313 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
314 usermode_helper, cmd, mb,
315 (ret >> 8) & 0xff, ret);
316 sib.sib_reason = SIB_HELPER_POST;
317 sib.helper_exit_code = ret;
318 drbd_bcast_event(mdev, &sib);
320 if (ret < 0) /* Ignore any ERRNOs we got. */
326 static void conn_md_sync(struct drbd_tconn *tconn)
328 struct drbd_conf *mdev;
331 down_read(&drbd_cfg_rwsem);
332 idr_for_each_entry(&tconn->volumes, mdev, vnr)
334 up_read(&drbd_cfg_rwsem);
337 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
339 char *envp[] = { "HOME=/",
341 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
342 (char[20]) { }, /* address family */
343 (char[60]) { }, /* address */
345 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
348 setup_khelper_env(tconn, envp);
351 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
352 /* TODO: conn_bcast_event() ?? */
354 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
356 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
357 usermode_helper, cmd, tconn->name,
358 (ret >> 8) & 0xff, ret);
360 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
361 usermode_helper, cmd, tconn->name,
362 (ret >> 8) & 0xff, ret);
363 /* TODO: conn_bcast_event() ?? */
365 if (ret < 0) /* Ignore any ERRNOs we got. */
371 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
373 enum drbd_fencing_p fp = FP_NOT_AVAIL;
374 struct drbd_conf *mdev;
378 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
379 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
380 fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
389 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
391 union drbd_state mask = { };
392 union drbd_state val = { };
393 enum drbd_fencing_p fp;
397 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
398 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
402 fp = highest_fencing_policy(tconn);
405 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
412 r = conn_khelper(tconn, "fence-peer");
414 switch ((r>>8) & 0xff) {
415 case 3: /* peer is inconsistent */
416 ex_to_string = "peer is inconsistent or worse";
418 val.pdsk = D_INCONSISTENT;
420 case 4: /* peer got outdated, or was already outdated */
421 ex_to_string = "peer was fenced";
423 val.pdsk = D_OUTDATED;
425 case 5: /* peer was down */
426 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
427 /* we will(have) create(d) a new UUID anyways... */
428 ex_to_string = "peer is unreachable, assumed to be dead";
430 val.pdsk = D_OUTDATED;
432 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
435 case 6: /* Peer is primary, voluntarily outdate myself.
436 * This is useful when an unconnected R_SECONDARY is asked to
437 * become R_PRIMARY, but finds the other peer being active. */
438 ex_to_string = "peer is active";
439 conn_warn(tconn, "Peer is primary, outdating myself.\n");
441 val.disk = D_OUTDATED;
444 if (fp != FP_STONITH)
445 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
446 ex_to_string = "peer was stonithed";
448 val.pdsk = D_OUTDATED;
451 /* The script is broken ... */
452 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
453 return false; /* Eventually leave IO frozen */
456 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
457 (r>>8) & 0xff, ex_to_string);
462 conn_request_state(tconn, mask, val, CS_VERBOSE);
463 here, because we might were able to re-establish the connection in the
465 spin_lock_irq(&tconn->req_lock);
466 if (tconn->cstate < C_WF_REPORT_PARAMS)
467 _conn_request_state(tconn, mask, val, CS_VERBOSE);
468 spin_unlock_irq(&tconn->req_lock);
470 return conn_highest_pdsk(tconn) <= D_OUTDATED;
473 static int _try_outdate_peer_async(void *data)
475 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
477 conn_try_outdate_peer(tconn);
482 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
484 struct task_struct *opa;
486 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
488 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
492 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
494 const int max_tries = 4;
495 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
498 union drbd_state mask, val;
500 if (new_role == R_PRIMARY)
501 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
503 mutex_lock(mdev->state_mutex);
505 mask.i = 0; mask.role = R_MASK;
506 val.i = 0; val.role = new_role;
508 while (try++ < max_tries) {
509 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
511 /* in case we first succeeded to outdate,
512 * but now suddenly could establish a connection */
513 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
519 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
520 (mdev->state.disk < D_UP_TO_DATE &&
521 mdev->state.disk >= D_INCONSISTENT)) {
523 val.disk = D_UP_TO_DATE;
528 if (rv == SS_NO_UP_TO_DATE_DISK &&
529 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
530 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
532 if (conn_try_outdate_peer(mdev->tconn)) {
533 val.disk = D_UP_TO_DATE;
539 if (rv == SS_NOTHING_TO_DO)
541 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
542 if (!conn_try_outdate_peer(mdev->tconn) && force) {
543 dev_warn(DEV, "Forced into split brain situation!\n");
545 val.pdsk = D_OUTDATED;
550 if (rv == SS_TWO_PRIMARIES) {
551 /* Maybe the peer is detected as dead very soon...
552 retry at most once more in this case. */
553 schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
558 if (rv < SS_SUCCESS) {
559 rv = _drbd_request_state(mdev, mask, val,
560 CS_VERBOSE + CS_WAIT_COMPLETE);
571 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
573 /* Wait until nothing is on the fly :) */
574 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
576 if (new_role == R_SECONDARY) {
577 set_disk_ro(mdev->vdisk, true);
578 if (get_ldev(mdev)) {
579 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
583 if (get_net_conf(mdev->tconn)) {
584 mdev->tconn->net_conf->want_lose = 0;
585 put_net_conf(mdev->tconn);
587 set_disk_ro(mdev->vdisk, false);
588 if (get_ldev(mdev)) {
589 if (((mdev->state.conn < C_CONNECTED ||
590 mdev->state.pdsk <= D_FAILED)
591 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
592 drbd_uuid_new_current(mdev);
594 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
599 /* writeout of activity log covered areas of the bitmap
600 * to stable storage done in after state change already */
602 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
603 /* if this was forced, we should consider sync */
605 drbd_send_uuids(mdev);
606 drbd_send_state(mdev);
611 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
613 mutex_unlock(mdev->state_mutex);
617 static const char *from_attrs_err_to_txt(int err)
619 return err == -ENOMSG ? "required attribute missing" :
620 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
621 err == -EEXIST ? "can not change invariant setting" :
622 "invalid attribute value";
625 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
627 struct set_role_parms parms;
629 enum drbd_ret_code retcode;
631 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
632 if (!adm_ctx.reply_skb)
634 if (retcode != NO_ERROR)
637 memset(&parms, 0, sizeof(parms));
638 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
639 err = set_role_parms_from_attrs(&parms, info);
641 retcode = ERR_MANDATORY_TAG;
642 drbd_msg_put_info(from_attrs_err_to_txt(err));
647 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
648 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
650 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
652 drbd_adm_finish(info, retcode);
656 /* initializes the md.*_offset members, so we are able to find
657 * the on disk meta data */
658 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
659 struct drbd_backing_dev *bdev)
661 sector_t md_size_sect = 0;
662 switch (bdev->dc.meta_dev_idx) {
664 /* v07 style fixed size indexed meta data */
665 bdev->md.md_size_sect = MD_RESERVED_SECT;
666 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
667 bdev->md.al_offset = MD_AL_OFFSET;
668 bdev->md.bm_offset = MD_BM_OFFSET;
670 case DRBD_MD_INDEX_FLEX_EXT:
671 /* just occupy the full device; unit: sectors */
672 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
673 bdev->md.md_offset = 0;
674 bdev->md.al_offset = MD_AL_OFFSET;
675 bdev->md.bm_offset = MD_BM_OFFSET;
677 case DRBD_MD_INDEX_INTERNAL:
678 case DRBD_MD_INDEX_FLEX_INT:
679 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
680 /* al size is still fixed */
681 bdev->md.al_offset = -MD_AL_SECTORS;
682 /* we need (slightly less than) ~ this much bitmap sectors: */
683 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
684 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
685 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
686 md_size_sect = ALIGN(md_size_sect, 8);
688 /* plus the "drbd meta data super block",
689 * and the activity log; */
690 md_size_sect += MD_BM_OFFSET;
692 bdev->md.md_size_sect = md_size_sect;
693 /* bitmap offset is adjusted by 'super' block size */
694 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
699 /* input size is expected to be in KB */
700 char *ppsize(char *buf, unsigned long long size)
702 /* Needs 9 bytes at max including trailing NUL:
703 * -1ULL ==> "16384 EB" */
704 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
706 while (size >= 10000 && base < sizeof(units)-1) {
708 size = (size >> 10) + !!(size & (1<<9));
711 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
716 /* there is still a theoretical deadlock when called from receiver
717 * on an D_INCONSISTENT R_PRIMARY:
718 * remote READ does inc_ap_bio, receiver would need to receive answer
719 * packet from remote to dec_ap_bio again.
720 * receiver receive_sizes(), comes here,
721 * waits for ap_bio_cnt == 0. -> deadlock.
722 * but this cannot happen, actually, because:
723 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
724 * (not connected, or bad/no disk on peer):
725 * see drbd_fail_request_early, ap_bio_cnt is zero.
726 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
727 * peer may not initiate a resize.
729 /* Note these are not to be confused with
730 * drbd_adm_suspend_io/drbd_adm_resume_io,
731 * which are (sub) state changes triggered by admin (drbdsetup),
732 * and can be long lived.
733 * This changes an mdev->flag, is triggered by drbd internals,
734 * and should be short-lived. */
735 void drbd_suspend_io(struct drbd_conf *mdev)
737 set_bit(SUSPEND_IO, &mdev->flags);
738 if (drbd_suspended(mdev))
740 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
743 void drbd_resume_io(struct drbd_conf *mdev)
745 clear_bit(SUSPEND_IO, &mdev->flags);
746 wake_up(&mdev->misc_wait);
750 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
751 * @mdev: DRBD device.
753 * Returns 0 on success, negative return values indicate errors.
754 * You should call drbd_md_sync() after calling this function.
756 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
758 sector_t prev_first_sect, prev_size; /* previous meta location */
763 int md_moved, la_size_changed;
764 enum determine_dev_size rv = unchanged;
767 * application request passes inc_ap_bio,
768 * but then cannot get an AL-reference.
769 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
772 * Suspend IO right here.
773 * still lock the act_log to not trigger ASSERTs there.
775 drbd_suspend_io(mdev);
777 /* no wait necessary anymore, actually we could assert that */
778 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
780 prev_first_sect = drbd_md_first_sector(mdev->ldev);
781 prev_size = mdev->ldev->md.md_size_sect;
782 la_size = mdev->ldev->md.la_size_sect;
784 /* TODO: should only be some assert here, not (re)init... */
785 drbd_md_set_sector_offsets(mdev, mdev->ldev);
787 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
789 if (drbd_get_capacity(mdev->this_bdev) != size ||
790 drbd_bm_capacity(mdev) != size) {
792 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
794 /* currently there is only one error: ENOMEM! */
795 size = drbd_bm_capacity(mdev)>>1;
797 dev_err(DEV, "OUT OF MEMORY! "
798 "Could not allocate bitmap!\n");
800 dev_err(DEV, "BM resizing failed. "
801 "Leaving size unchanged at size = %lu KB\n",
802 (unsigned long)size);
806 /* racy, see comments above. */
807 drbd_set_my_capacity(mdev, size);
808 mdev->ldev->md.la_size_sect = size;
809 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
810 (unsigned long long)size>>1);
812 if (rv == dev_size_error)
815 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
817 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
818 || prev_size != mdev->ldev->md.md_size_sect;
820 if (la_size_changed || md_moved) {
823 drbd_al_shrink(mdev); /* All extents inactive. */
824 dev_info(DEV, "Writing the whole bitmap, %s\n",
825 la_size_changed && md_moved ? "size changed and md moved" :
826 la_size_changed ? "size changed" : "md moved");
827 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
828 err = drbd_bitmap_io(mdev, &drbd_bm_write,
829 "size changed", BM_LOCKED_MASK);
834 drbd_md_mark_dirty(mdev);
842 lc_unlock(mdev->act_log);
843 wake_up(&mdev->al_wait);
844 drbd_resume_io(mdev);
850 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
852 sector_t p_size = mdev->p_size; /* partner's disk size. */
853 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
854 sector_t m_size; /* my size */
855 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
858 m_size = drbd_get_max_capacity(bdev);
860 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
861 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
865 if (p_size && m_size) {
866 size = min_t(sector_t, p_size, m_size);
870 if (m_size && m_size < size)
872 if (p_size && p_size < size)
883 dev_err(DEV, "Both nodes diskless!\n");
887 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
888 (unsigned long)u_size>>1, (unsigned long)size>>1);
897 * drbd_check_al_size() - Ensures that the AL is of the right size
898 * @mdev: DRBD device.
900 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
901 * failed, and 0 on success. You should call drbd_md_sync() after you called
904 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
906 struct lru_cache *n, *t;
907 struct lc_element *e;
911 if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
912 dc->al_extents = DRBD_AL_EXTENTS_MIN;
915 mdev->act_log->nr_elements == dc->al_extents)
920 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
921 dc->al_extents, sizeof(struct lc_element), 0);
924 dev_err(DEV, "Cannot allocate act_log lru!\n");
927 spin_lock_irq(&mdev->al_lock);
929 for (i = 0; i < t->nr_elements; i++) {
930 e = lc_element_by_index(t, i);
932 dev_err(DEV, "refcnt(%d)==%d\n",
933 e->lc_number, e->refcnt);
939 spin_unlock_irq(&mdev->al_lock);
941 dev_err(DEV, "Activity log still in use!\n");
948 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
952 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
954 struct request_queue * const q = mdev->rq_queue;
955 int max_hw_sectors = max_bio_size >> 9;
956 int max_segments = 0;
958 if (get_ldev_if_state(mdev, D_ATTACHING)) {
959 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
961 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
962 max_segments = mdev->ldev->dc.max_bio_bvecs;
966 blk_queue_logical_block_size(q, 512);
967 blk_queue_max_hw_sectors(q, max_hw_sectors);
968 /* This is the workaround for "bio would need to, but cannot, be split" */
969 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
970 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
972 if (get_ldev_if_state(mdev, D_ATTACHING)) {
973 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
975 blk_queue_stack_limits(q, b);
977 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
978 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
979 q->backing_dev_info.ra_pages,
980 b->backing_dev_info.ra_pages);
981 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
987 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
989 int now, new, local, peer;
991 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
992 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
993 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
995 if (get_ldev_if_state(mdev, D_ATTACHING)) {
996 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
997 mdev->local_max_bio_size = local;
1001 /* We may ignore peer limits if the peer is modern enough.
1002 Because new from 8.3.8 onwards the peer can use multiple
1003 BIOs for a single peer_request */
1004 if (mdev->state.conn >= C_CONNECTED) {
1005 if (mdev->tconn->agreed_pro_version < 94)
1006 peer = mdev->peer_max_bio_size;
1007 else if (mdev->tconn->agreed_pro_version == 94)
1008 peer = DRBD_MAX_SIZE_H80_PACKET;
1009 else /* drbd 8.3.8 onwards */
1010 peer = DRBD_MAX_BIO_SIZE;
1013 new = min_t(int, local, peer);
1015 if (mdev->state.role == R_PRIMARY && new < now)
1016 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1019 dev_info(DEV, "max BIO size = %u\n", new);
1021 drbd_setup_queue_param(mdev, new);
1024 /* serialize deconfig (worker exiting, doing cleanup)
1025 * and reconfig (drbdsetup disk, drbdsetup net)
1027 * Wait for a potentially exiting worker, then restart it,
1028 * or start a new one. Flush any pending work, there may still be an
1029 * after_state_change queued.
1031 static void conn_reconfig_start(struct drbd_tconn *tconn)
1033 wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
1034 wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
1035 drbd_thread_start(&tconn->worker);
1036 conn_flush_workqueue(tconn);
1039 /* if still unconfigured, stops worker again.
1040 * if configured now, clears CONFIG_PENDING.
1041 * wakes potential waiters */
1042 static void conn_reconfig_done(struct drbd_tconn *tconn)
1044 spin_lock_irq(&tconn->req_lock);
1045 if (conn_all_vols_unconf(tconn)) {
1046 set_bit(OBJECT_DYING, &tconn->flags);
1047 drbd_thread_stop_nowait(&tconn->worker);
1049 clear_bit(CONFIG_PENDING, &tconn->flags);
1050 spin_unlock_irq(&tconn->req_lock);
1051 wake_up(&tconn->ping_wait);
1054 /* Make sure IO is suspended before calling this function(). */
1055 static void drbd_suspend_al(struct drbd_conf *mdev)
1059 if (!lc_try_lock(mdev->act_log)) {
1060 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1064 drbd_al_shrink(mdev);
1065 spin_lock_irq(&mdev->tconn->req_lock);
1066 if (mdev->state.conn < C_CONNECTED)
1067 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1068 spin_unlock_irq(&mdev->tconn->req_lock);
1069 lc_unlock(mdev->act_log);
1072 dev_info(DEV, "Suspended AL updates\n");
1075 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1077 enum drbd_ret_code retcode;
1078 struct drbd_conf *mdev;
1079 struct disk_conf *ndc; /* new disk conf */
1081 int *rs_plan_s = NULL;
1083 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1084 if (!adm_ctx.reply_skb)
1086 if (retcode != NO_ERROR)
1089 mdev = adm_ctx.mdev;
1091 /* we also need a disk
1092 * to change the options on */
1093 if (!get_ldev(mdev)) {
1094 retcode = ERR_NO_DISK;
1098 /* FIXME freeze IO, cluster wide.
1100 * We should make sure no-one uses
1101 * some half-updated struct when we
1102 * assign it later. */
1104 ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
1106 retcode = ERR_NOMEM;
1110 memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
1111 err = disk_conf_from_attrs_for_change(ndc, info);
1113 retcode = ERR_MANDATORY_TAG;
1114 drbd_msg_put_info(from_attrs_err_to_txt(err));
1117 if (!expect(ndc->resync_rate >= 1))
1118 ndc->resync_rate = 1;
1120 /* clip to allowed range */
1121 if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
1122 ndc->al_extents = DRBD_AL_EXTENTS_MIN;
1123 if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
1124 ndc->al_extents = DRBD_AL_EXTENTS_MAX;
1126 /* most sanity checks done, try to assign the new sync-after
1127 * dependency. need to hold the global lock in there,
1128 * to avoid a race in the dependency loop check. */
1129 retcode = drbd_alter_sa(mdev, ndc->resync_after);
1130 if (retcode != NO_ERROR)
1133 fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1134 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1135 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1137 dev_err(DEV, "kmalloc of fifo_buffer failed");
1138 retcode = ERR_NOMEM;
1143 if (fifo_size != mdev->rs_plan_s.size) {
1144 kfree(mdev->rs_plan_s.values);
1145 mdev->rs_plan_s.values = rs_plan_s;
1146 mdev->rs_plan_s.size = fifo_size;
1147 mdev->rs_planed = 0;
1151 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1152 drbd_al_shrink(mdev);
1153 err = drbd_check_al_size(mdev, ndc);
1154 lc_unlock(mdev->act_log);
1155 wake_up(&mdev->al_wait);
1158 retcode = ERR_NOMEM;
1163 * To avoid someone looking at a half-updated struct, we probably
1164 * should have a rw-semaphor on net_conf and disk_conf.
1166 mdev->ldev->dc = *ndc;
1171 if (mdev->state.conn >= C_CONNECTED)
1172 drbd_send_sync_param(mdev);
1179 drbd_adm_finish(info, retcode);
1183 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1185 struct drbd_conf *mdev;
1187 enum drbd_ret_code retcode;
1188 enum determine_dev_size dd;
1189 sector_t max_possible_sectors;
1190 sector_t min_md_device_sectors;
1191 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1192 struct block_device *bdev;
1193 struct lru_cache *resync_lru = NULL;
1194 union drbd_state ns, os;
1195 enum drbd_state_rv rv;
1196 int cp_discovered = 0;
1198 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1199 if (!adm_ctx.reply_skb)
1201 if (retcode != NO_ERROR)
1204 mdev = adm_ctx.mdev;
1205 conn_reconfig_start(mdev->tconn);
1207 /* if you want to reconfigure, please tear down first */
1208 if (mdev->state.disk > D_DISKLESS) {
1209 retcode = ERR_DISK_CONFIGURED;
1212 /* It may just now have detached because of IO error. Make sure
1213 * drbd_ldev_destroy is done already, we may end up here very fast,
1214 * e.g. if someone calls attach from the on-io-error handler,
1215 * to realize a "hot spare" feature (not that I'd recommend that) */
1216 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1218 /* allocation not in the IO path, drbdsetup context */
1219 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1221 retcode = ERR_NOMEM;
1225 nbc->dc = (struct disk_conf) {
1226 {}, 0, /* backing_dev */
1227 {}, 0, /* meta_dev */
1228 0, /* meta_dev_idx */
1229 DRBD_DISK_SIZE_SECT_DEF, /* disk_size */
1230 DRBD_MAX_BIO_BVECS_DEF, /* max_bio_bvecs */
1231 DRBD_ON_IO_ERROR_DEF, /* on_io_error */
1232 DRBD_FENCING_DEF, /* fencing */
1233 DRBD_RATE_DEF, /* resync_rate */
1234 DRBD_AFTER_DEF, /* resync_after */
1235 DRBD_AL_EXTENTS_DEF, /* al_extents */
1236 DRBD_C_PLAN_AHEAD_DEF, /* c_plan_ahead */
1237 DRBD_C_DELAY_TARGET_DEF, /* c_delay_target */
1238 DRBD_C_FILL_TARGET_DEF, /* c_fill_target */
1239 DRBD_C_MAX_RATE_DEF, /* c_max_rate */
1240 DRBD_C_MIN_RATE_DEF, /* c_min_rate */
1241 0, /* no_disk_barrier */
1242 0, /* no_disk_flush */
1243 0, /* no_disk_drain */
1244 0, /* no_md_flush */
1247 err = disk_conf_from_attrs(&nbc->dc, info);
1249 retcode = ERR_MANDATORY_TAG;
1250 drbd_msg_put_info(from_attrs_err_to_txt(err));
1254 if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1255 retcode = ERR_MD_IDX_INVALID;
1259 if (get_net_conf(mdev->tconn)) {
1260 int prot = mdev->tconn->net_conf->wire_protocol;
1261 put_net_conf(mdev->tconn);
1262 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1263 retcode = ERR_STONITH_AND_PROT_A;
1268 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1269 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1271 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1273 retcode = ERR_OPEN_DISK;
1276 nbc->backing_bdev = bdev;
1279 * meta_dev_idx >= 0: external fixed size, possibly multiple
1280 * drbd sharing one meta device. TODO in that case, paranoia
1281 * check that [md_bdev, meta_dev_idx] is not yet used by some
1282 * other drbd minor! (if you use drbd.conf + drbdadm, that
1283 * should check it for you already; but if you don't, or
1284 * someone fooled it, we need to double check here)
1286 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1287 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1288 ((int)nbc->dc.meta_dev_idx < 0) ?
1289 (void *)mdev : (void *)drbd_m_holder);
1291 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1293 retcode = ERR_OPEN_MD_DISK;
1296 nbc->md_bdev = bdev;
1298 if ((nbc->backing_bdev == nbc->md_bdev) !=
1299 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1300 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1301 retcode = ERR_MD_IDX_INVALID;
1305 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1306 1, 61, sizeof(struct bm_extent),
1307 offsetof(struct bm_extent, lce));
1309 retcode = ERR_NOMEM;
1313 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1314 drbd_md_set_sector_offsets(mdev, nbc);
1316 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1317 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1318 (unsigned long long) drbd_get_max_capacity(nbc),
1319 (unsigned long long) nbc->dc.disk_size);
1320 retcode = ERR_DISK_TO_SMALL;
1324 if ((int)nbc->dc.meta_dev_idx < 0) {
1325 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1326 /* at least one MB, otherwise it does not make sense */
1327 min_md_device_sectors = (2<<10);
1329 max_possible_sectors = DRBD_MAX_SECTORS;
1330 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1333 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1334 retcode = ERR_MD_DISK_TO_SMALL;
1335 dev_warn(DEV, "refusing attach: md-device too small, "
1336 "at least %llu sectors needed for this meta-disk type\n",
1337 (unsigned long long) min_md_device_sectors);
1341 /* Make sure the new disk is big enough
1342 * (we may currently be R_PRIMARY with no local disk...) */
1343 if (drbd_get_max_capacity(nbc) <
1344 drbd_get_capacity(mdev->this_bdev)) {
1345 retcode = ERR_DISK_TO_SMALL;
1349 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1351 if (nbc->known_size > max_possible_sectors) {
1352 dev_warn(DEV, "==> truncating very big lower level device "
1353 "to currently maximum possible %llu sectors <==\n",
1354 (unsigned long long) max_possible_sectors);
1355 if ((int)nbc->dc.meta_dev_idx >= 0)
1356 dev_warn(DEV, "==>> using internal or flexible "
1357 "meta data may help <<==\n");
1360 drbd_suspend_io(mdev);
1361 /* also wait for the last barrier ack. */
1362 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1363 /* and for any other previously queued work */
1364 drbd_flush_workqueue(mdev);
1366 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1367 retcode = rv; /* FIXME: Type mismatch. */
1368 drbd_resume_io(mdev);
1369 if (rv < SS_SUCCESS)
1372 if (!get_ldev_if_state(mdev, D_ATTACHING))
1373 goto force_diskless;
1375 drbd_md_set_sector_offsets(mdev, nbc);
1377 if (!mdev->bitmap) {
1378 if (drbd_bm_init(mdev)) {
1379 retcode = ERR_NOMEM;
1380 goto force_diskless_dec;
1384 retcode = drbd_md_read(mdev, nbc);
1385 if (retcode != NO_ERROR)
1386 goto force_diskless_dec;
1388 if (mdev->state.conn < C_CONNECTED &&
1389 mdev->state.role == R_PRIMARY &&
1390 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1391 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1392 (unsigned long long)mdev->ed_uuid);
1393 retcode = ERR_DATA_NOT_CURRENT;
1394 goto force_diskless_dec;
1397 /* Since we are diskless, fix the activity log first... */
1398 if (drbd_check_al_size(mdev, &nbc->dc)) {
1399 retcode = ERR_NOMEM;
1400 goto force_diskless_dec;
1403 /* Prevent shrinking of consistent devices ! */
1404 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1405 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1406 dev_warn(DEV, "refusing to truncate a consistent device\n");
1407 retcode = ERR_DISK_TO_SMALL;
1408 goto force_diskless_dec;
1411 if (!drbd_al_read_log(mdev, nbc)) {
1412 retcode = ERR_IO_MD_DISK;
1413 goto force_diskless_dec;
1416 /* Reset the "barriers don't work" bits here, then force meta data to
1417 * be written, to ensure we determine if barriers are supported. */
1418 if (nbc->dc.no_md_flush)
1419 set_bit(MD_NO_FUA, &mdev->flags);
1421 clear_bit(MD_NO_FUA, &mdev->flags);
1423 /* Point of no return reached.
1424 * Devices and memory are no longer released by error cleanup below.
1425 * now mdev takes over responsibility, and the state engine should
1426 * clean it up somewhere. */
1427 D_ASSERT(mdev->ldev == NULL);
1429 mdev->resync = resync_lru;
1433 mdev->write_ordering = WO_bdev_flush;
1434 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1436 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1437 set_bit(CRASHED_PRIMARY, &mdev->flags);
1439 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1441 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1442 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1443 set_bit(CRASHED_PRIMARY, &mdev->flags);
1452 drbd_reconsider_max_bio_size(mdev);
1454 /* If I am currently not R_PRIMARY,
1455 * but meta data primary indicator is set,
1456 * I just now recover from a hard crash,
1457 * and have been R_PRIMARY before that crash.
1459 * Now, if I had no connection before that crash
1460 * (have been degraded R_PRIMARY), chances are that
1461 * I won't find my peer now either.
1463 * In that case, and _only_ in that case,
1464 * we use the degr-wfc-timeout instead of the default,
1465 * so we can automatically recover from a crash of a
1466 * degraded but active "cluster" after a certain timeout.
1468 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1469 if (mdev->state.role != R_PRIMARY &&
1470 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1471 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1472 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1474 dd = drbd_determine_dev_size(mdev, 0);
1475 if (dd == dev_size_error) {
1476 retcode = ERR_NOMEM_BITMAP;
1477 goto force_diskless_dec;
1478 } else if (dd == grew)
1479 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1481 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1482 dev_info(DEV, "Assuming that all blocks are out of sync "
1483 "(aka FullSync)\n");
1484 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1485 "set_n_write from attaching", BM_LOCKED_MASK)) {
1486 retcode = ERR_IO_MD_DISK;
1487 goto force_diskless_dec;
1490 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1491 "read from attaching", BM_LOCKED_MASK)) {
1492 retcode = ERR_IO_MD_DISK;
1493 goto force_diskless_dec;
1497 if (cp_discovered) {
1498 drbd_al_apply_to_bm(mdev);
1499 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1500 "crashed primary apply AL", BM_LOCKED_MASK)) {
1501 retcode = ERR_IO_MD_DISK;
1502 goto force_diskless_dec;
1506 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1507 drbd_suspend_al(mdev); /* IO is still suspended here... */
1509 spin_lock_irq(&mdev->tconn->req_lock);
1510 os = drbd_read_state(mdev);
1512 /* If MDF_CONSISTENT is not set go into inconsistent state,
1513 otherwise investigate MDF_WasUpToDate...
1514 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1515 otherwise into D_CONSISTENT state.
1517 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1518 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1519 ns.disk = D_CONSISTENT;
1521 ns.disk = D_OUTDATED;
1523 ns.disk = D_INCONSISTENT;
1526 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1527 ns.pdsk = D_OUTDATED;
1529 if ( ns.disk == D_CONSISTENT &&
1530 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1531 ns.disk = D_UP_TO_DATE;
1533 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1534 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1535 this point, because drbd_request_state() modifies these
1538 /* In case we are C_CONNECTED postpone any decision on the new disk
1539 state after the negotiation phase. */
1540 if (mdev->state.conn == C_CONNECTED) {
1541 mdev->new_state_tmp.i = ns.i;
1543 ns.disk = D_NEGOTIATING;
1545 /* We expect to receive up-to-date UUIDs soon.
1546 To avoid a race in receive_state, free p_uuid while
1547 holding req_lock. I.e. atomic with the state change */
1548 kfree(mdev->p_uuid);
1549 mdev->p_uuid = NULL;
1552 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1553 spin_unlock_irq(&mdev->tconn->req_lock);
1555 if (rv < SS_SUCCESS)
1556 goto force_diskless_dec;
1558 if (mdev->state.role == R_PRIMARY)
1559 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1561 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1563 drbd_md_mark_dirty(mdev);
1566 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1568 conn_reconfig_done(mdev->tconn);
1569 drbd_adm_finish(info, retcode);
1575 drbd_force_state(mdev, NS(disk, D_FAILED));
1578 conn_reconfig_done(mdev->tconn);
1580 if (nbc->backing_bdev)
1581 blkdev_put(nbc->backing_bdev,
1582 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1584 blkdev_put(nbc->md_bdev,
1585 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1588 lc_destroy(resync_lru);
1591 drbd_adm_finish(info, retcode);
1595 static int adm_detach(struct drbd_conf *mdev)
1597 enum drbd_state_rv retcode;
1598 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1599 retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1600 wait_event(mdev->misc_wait,
1601 mdev->state.disk != D_DISKLESS ||
1602 !atomic_read(&mdev->local_cnt));
1603 drbd_resume_io(mdev);
1607 /* Detaching the disk is a process in multiple stages. First we need to lock
1608 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1609 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1610 * internal references as well.
1611 * Only then we have finally detached. */
1612 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1614 enum drbd_ret_code retcode;
1616 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1617 if (!adm_ctx.reply_skb)
1619 if (retcode != NO_ERROR)
1622 retcode = adm_detach(adm_ctx.mdev);
1624 drbd_adm_finish(info, retcode);
1628 static bool conn_resync_running(struct drbd_tconn *tconn)
1630 struct drbd_conf *mdev;
1635 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1636 if (mdev->state.conn == C_SYNC_SOURCE ||
1637 mdev->state.conn == C_SYNC_TARGET ||
1638 mdev->state.conn == C_PAUSED_SYNC_S ||
1639 mdev->state.conn == C_PAUSED_SYNC_T) {
1649 static bool conn_ov_running(struct drbd_tconn *tconn)
1651 struct drbd_conf *mdev;
1656 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1657 if (mdev->state.conn == C_VERIFY_S ||
1658 mdev->state.conn == C_VERIFY_T) {
1668 static enum drbd_ret_code
1669 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1671 struct drbd_conf *mdev;
1674 if (tconn->net_conf && tconn->agreed_pro_version < 100 &&
1675 tconn->cstate == C_WF_REPORT_PARAMS &&
1676 new_conf->wire_protocol != tconn->net_conf->wire_protocol)
1677 return ERR_NEED_APV_100;
1679 if (new_conf->two_primaries &&
1680 (new_conf->wire_protocol != DRBD_PROT_C))
1681 return ERR_NOT_PROTO_C;
1684 idr_for_each_entry(&tconn->volumes, mdev, i) {
1685 if (get_ldev(mdev)) {
1686 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1688 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1690 return ERR_STONITH_AND_PROT_A;
1693 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1697 if (!mdev->bitmap) {
1698 if(drbd_bm_init(mdev)) {
1706 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1707 return ERR_CONG_NOT_PROTO_A;
1712 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1714 enum drbd_ret_code retcode;
1715 struct drbd_tconn *tconn;
1716 struct net_conf *new_conf = NULL;
1718 int ovr; /* online verify running */
1719 int rsr; /* re-sync running */
1720 struct crypto_hash *verify_tfm = NULL;
1721 struct crypto_hash *csums_tfm = NULL;
1724 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1725 if (!adm_ctx.reply_skb)
1727 if (retcode != NO_ERROR)
1730 tconn = adm_ctx.tconn;
1732 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1734 retcode = ERR_NOMEM;
1738 /* we also need a net config
1739 * to change the options on */
1740 if (!get_net_conf(tconn)) {
1741 drbd_msg_put_info("net conf missing, try connect");
1742 retcode = ERR_INVALID_REQUEST;
1746 conn_reconfig_start(tconn);
1748 memcpy(new_conf, tconn->net_conf, sizeof(*new_conf));
1749 err = net_conf_from_attrs_for_change(new_conf, info);
1751 retcode = ERR_MANDATORY_TAG;
1752 drbd_msg_put_info(from_attrs_err_to_txt(err));
1756 retcode = check_net_options(tconn, new_conf);
1757 if (retcode != NO_ERROR)
1760 /* re-sync running */
1761 rsr = conn_resync_running(tconn);
1762 if (rsr && strcmp(new_conf->csums_alg, tconn->net_conf->csums_alg)) {
1763 retcode = ERR_CSUMS_RESYNC_RUNNING;
1767 if (!rsr && new_conf->csums_alg[0]) {
1768 csums_tfm = crypto_alloc_hash(new_conf->csums_alg, 0, CRYPTO_ALG_ASYNC);
1769 if (IS_ERR(csums_tfm)) {
1771 retcode = ERR_CSUMS_ALG;
1775 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1776 retcode = ERR_CSUMS_ALG_ND;
1781 /* online verify running */
1782 ovr = conn_ov_running(tconn);
1784 if (strcmp(new_conf->verify_alg, tconn->net_conf->verify_alg)) {
1785 retcode = ERR_VERIFY_RUNNING;
1790 if (!ovr && new_conf->verify_alg[0]) {
1791 verify_tfm = crypto_alloc_hash(new_conf->verify_alg, 0, CRYPTO_ALG_ASYNC);
1792 if (IS_ERR(verify_tfm)) {
1794 retcode = ERR_VERIFY_ALG;
1798 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1799 retcode = ERR_VERIFY_ALG_ND;
1805 /* For now, use struct assignment, not pointer assignment.
1806 * We don't have any means to determine who might still
1807 * keep a local alias into the struct,
1808 * so we cannot just free it and hope for the best :(
1810 * To avoid someone looking at a half-updated struct, we probably
1811 * should have a rw-semaphor on net_conf and disk_conf.
1813 *tconn->net_conf = *new_conf;
1816 crypto_free_hash(tconn->csums_tfm);
1817 tconn->csums_tfm = csums_tfm;
1821 crypto_free_hash(tconn->verify_tfm);
1822 tconn->verify_tfm = verify_tfm;
1826 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1827 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1830 crypto_free_hash(csums_tfm);
1831 crypto_free_hash(verify_tfm);
1833 put_net_conf(tconn);
1834 conn_reconfig_done(tconn);
1836 drbd_adm_finish(info, retcode);
1840 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1842 char hmac_name[CRYPTO_MAX_ALG_NAME];
1843 struct drbd_conf *mdev;
1844 struct net_conf *new_conf = NULL;
1845 struct crypto_hash *tfm = NULL;
1846 struct crypto_hash *integrity_w_tfm = NULL;
1847 struct crypto_hash *integrity_r_tfm = NULL;
1848 void *int_dig_in = NULL;
1849 void *int_dig_vv = NULL;
1850 struct drbd_tconn *oconn;
1851 struct drbd_tconn *tconn;
1852 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1853 enum drbd_ret_code retcode;
1857 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1858 if (!adm_ctx.reply_skb)
1860 if (retcode != NO_ERROR)
1863 tconn = adm_ctx.tconn;
1864 conn_reconfig_start(tconn);
1866 if (tconn->cstate > C_STANDALONE) {
1867 retcode = ERR_NET_CONFIGURED;
1871 /* allocation not in the IO path, cqueue thread context */
1872 new_conf = kmalloc(sizeof(struct net_conf), GFP_KERNEL);
1874 retcode = ERR_NOMEM;
1878 *new_conf = (struct net_conf) {
1879 {}, 0, /* my_addr */
1880 {}, 0, /* peer_addr */
1881 {}, 0, /* shared_secret */
1882 {}, 0, /* cram_hmac_alg */
1883 {}, 0, /* integrity_alg */
1884 {}, 0, /* verify_alg */
1885 {}, 0, /* csums_alg */
1886 DRBD_PROTOCOL_DEF, /* wire_protocol */
1887 DRBD_CONNECT_INT_DEF, /* try_connect_int */
1888 DRBD_TIMEOUT_DEF, /* timeout */
1889 DRBD_PING_INT_DEF, /* ping_int */
1890 DRBD_PING_TIMEO_DEF, /* ping_timeo */
1891 DRBD_SNDBUF_SIZE_DEF, /* sndbuf_size */
1892 DRBD_RCVBUF_SIZE_DEF, /* rcvbuf_size */
1893 DRBD_KO_COUNT_DEF, /* ko_count */
1894 DRBD_MAX_BUFFERS_DEF, /* max_buffers */
1895 DRBD_MAX_EPOCH_SIZE_DEF, /* max_epoch_size */
1896 DRBD_UNPLUG_WATERMARK_DEF, /* unplug_watermark */
1897 DRBD_AFTER_SB_0P_DEF, /* after_sb_0p */
1898 DRBD_AFTER_SB_1P_DEF, /* after_sb_1p */
1899 DRBD_AFTER_SB_2P_DEF, /* after_sb_2p */
1900 DRBD_RR_CONFLICT_DEF, /* rr_conflict */
1901 DRBD_ON_CONGESTION_DEF, /* on_congestion */
1902 DRBD_CONG_FILL_DEF, /* cong_fill */
1903 DRBD_CONG_EXTENTS_DEF, /* cong_extents */
1904 0, /* two_primaries */
1907 0, /* always_asbp */
1912 err = net_conf_from_attrs(new_conf, info);
1914 retcode = ERR_MANDATORY_TAG;
1915 drbd_msg_put_info(from_attrs_err_to_txt(err));
1919 retcode = check_net_options(tconn, new_conf);
1920 if (retcode != NO_ERROR)
1925 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1926 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1928 /* No need to take drbd_cfg_rwsem here. All reconfiguration is
1929 * strictly serialized on genl_lock(). We are protected against
1930 * concurrent reconfiguration/addition/deletion */
1931 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1934 if (get_net_conf(oconn)) {
1935 taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1936 if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1937 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1938 retcode = ERR_LOCAL_ADDR;
1940 taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1941 if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1942 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1943 retcode = ERR_PEER_ADDR;
1945 put_net_conf(oconn);
1946 if (retcode != NO_ERROR)
1951 if (new_conf->cram_hmac_alg[0] != 0) {
1952 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1953 new_conf->cram_hmac_alg);
1954 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1957 retcode = ERR_AUTH_ALG;
1961 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1962 retcode = ERR_AUTH_ALG_ND;
1967 if (new_conf->integrity_alg[0]) {
1968 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1969 if (IS_ERR(integrity_w_tfm)) {
1970 integrity_w_tfm = NULL;
1971 retcode=ERR_INTEGRITY_ALG;
1975 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1976 retcode=ERR_INTEGRITY_ALG_ND;
1980 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1981 if (IS_ERR(integrity_r_tfm)) {
1982 integrity_r_tfm = NULL;
1983 retcode=ERR_INTEGRITY_ALG;
1988 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1990 /* allocation not in the IO path, cqueue thread context */
1991 if (integrity_w_tfm) {
1992 i = crypto_hash_digestsize(integrity_w_tfm);
1993 int_dig_in = kmalloc(i, GFP_KERNEL);
1995 retcode = ERR_NOMEM;
1998 int_dig_vv = kmalloc(i, GFP_KERNEL);
2000 retcode = ERR_NOMEM;
2005 conn_flush_workqueue(tconn);
2006 spin_lock_irq(&tconn->req_lock);
2007 if (tconn->net_conf != NULL) {
2008 retcode = ERR_NET_CONFIGURED;
2009 spin_unlock_irq(&tconn->req_lock);
2012 tconn->net_conf = new_conf;
2014 crypto_free_hash(tconn->cram_hmac_tfm);
2015 tconn->cram_hmac_tfm = tfm;
2017 crypto_free_hash(tconn->integrity_w_tfm);
2018 tconn->integrity_w_tfm = integrity_w_tfm;
2020 crypto_free_hash(tconn->integrity_r_tfm);
2021 tconn->integrity_r_tfm = integrity_r_tfm;
2023 kfree(tconn->int_dig_in);
2024 kfree(tconn->int_dig_vv);
2025 tconn->int_dig_in=int_dig_in;
2026 tconn->int_dig_vv=int_dig_vv;
2027 retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2028 spin_unlock_irq(&tconn->req_lock);
2031 idr_for_each_entry(&tconn->volumes, mdev, i) {
2034 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2037 conn_reconfig_done(tconn);
2038 drbd_adm_finish(info, retcode);
2044 crypto_free_hash(tfm);
2045 crypto_free_hash(integrity_w_tfm);
2046 crypto_free_hash(integrity_r_tfm);
2049 conn_reconfig_done(tconn);
2051 drbd_adm_finish(info, retcode);
2055 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2057 enum drbd_state_rv rv;
2059 spin_lock_irq(&tconn->req_lock);
2060 if (tconn->cstate >= C_WF_CONNECTION)
2061 _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2062 spin_unlock_irq(&tconn->req_lock);
2066 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
2069 case SS_NOTHING_TO_DO:
2070 case SS_ALREADY_STANDALONE:
2072 case SS_PRIMARY_NOP:
2073 /* Our state checking code wants to see the peer outdated. */
2074 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2075 pdsk, D_OUTDATED), CS_VERBOSE);
2077 case SS_CW_FAILED_BY_PEER:
2078 /* The peer probably wants to see us outdated. */
2079 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2080 disk, D_OUTDATED), 0);
2081 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2082 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
2087 /* no special handling necessary */
2093 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2095 struct disconnect_parms parms;
2096 struct drbd_tconn *tconn;
2097 enum drbd_state_rv rv;
2098 enum drbd_ret_code retcode;
2101 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2102 if (!adm_ctx.reply_skb)
2104 if (retcode != NO_ERROR)
2107 tconn = adm_ctx.tconn;
2108 memset(&parms, 0, sizeof(parms));
2109 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2110 err = disconnect_parms_from_attrs(&parms, info);
2112 retcode = ERR_MANDATORY_TAG;
2113 drbd_msg_put_info(from_attrs_err_to_txt(err));
2118 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2119 if (rv < SS_SUCCESS)
2122 if (wait_event_interruptible(tconn->ping_wait,
2123 tconn->cstate != C_DISCONNECTING)) {
2124 /* Do not test for mdev->state.conn == C_STANDALONE, since
2125 someone else might connect us in the mean time! */
2132 drbd_adm_finish(info, retcode);
2136 void resync_after_online_grow(struct drbd_conf *mdev)
2138 int iass; /* I am sync source */
2140 dev_info(DEV, "Resync of new storage after online grow\n");
2141 if (mdev->state.role != mdev->state.peer)
2142 iass = (mdev->state.role == R_PRIMARY);
2144 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2147 drbd_start_resync(mdev, C_SYNC_SOURCE);
2149 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2152 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2154 struct resize_parms rs;
2155 struct drbd_conf *mdev;
2156 enum drbd_ret_code retcode;
2157 enum determine_dev_size dd;
2158 enum dds_flags ddsf;
2161 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2162 if (!adm_ctx.reply_skb)
2164 if (retcode != NO_ERROR)
2167 memset(&rs, 0, sizeof(struct resize_parms));
2168 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2169 err = resize_parms_from_attrs(&rs, info);
2171 retcode = ERR_MANDATORY_TAG;
2172 drbd_msg_put_info(from_attrs_err_to_txt(err));
2177 mdev = adm_ctx.mdev;
2178 if (mdev->state.conn > C_CONNECTED) {
2179 retcode = ERR_RESIZE_RESYNC;
2183 if (mdev->state.role == R_SECONDARY &&
2184 mdev->state.peer == R_SECONDARY) {
2185 retcode = ERR_NO_PRIMARY;
2189 if (!get_ldev(mdev)) {
2190 retcode = ERR_NO_DISK;
2194 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2195 retcode = ERR_NEED_APV_93;
2199 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2200 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2202 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
2203 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2204 dd = drbd_determine_dev_size(mdev, ddsf);
2207 if (dd == dev_size_error) {
2208 retcode = ERR_NOMEM_BITMAP;
2212 if (mdev->state.conn == C_CONNECTED) {
2214 set_bit(RESIZE_PENDING, &mdev->flags);
2216 drbd_send_uuids(mdev);
2217 drbd_send_sizes(mdev, 1, ddsf);
2221 drbd_adm_finish(info, retcode);
2225 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2227 enum drbd_ret_code retcode;
2228 cpumask_var_t new_cpu_mask;
2229 struct drbd_tconn *tconn;
2230 int *rs_plan_s = NULL;
2234 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2235 if (!adm_ctx.reply_skb)
2237 if (retcode != NO_ERROR)
2239 tconn = adm_ctx.tconn;
2241 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2242 retcode = ERR_NOMEM;
2243 drbd_msg_put_info("unable to allocate cpumask");
2247 if (((struct drbd_genlmsghdr*)info->userhdr)->flags
2248 & DRBD_GENL_F_SET_DEFAULTS) {
2249 memset(&sc, 0, sizeof(struct res_opts));
2250 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
2252 sc = tconn->res_opts;
2254 err = res_opts_from_attrs(&sc, info);
2256 retcode = ERR_MANDATORY_TAG;
2257 drbd_msg_put_info(from_attrs_err_to_txt(err));
2261 /* silently ignore cpu mask on UP kernel */
2262 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
2263 err = __bitmap_parse(sc.cpu_mask, 32, 0,
2264 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2266 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2267 retcode = ERR_CPU_MASK_PARSE;
2273 tconn->res_opts = sc;
2275 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2276 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2277 drbd_calc_cpu_mask(tconn);
2278 tconn->receiver.reset_cpu_mask = 1;
2279 tconn->asender.reset_cpu_mask = 1;
2280 tconn->worker.reset_cpu_mask = 1;
2285 free_cpumask_var(new_cpu_mask);
2287 drbd_adm_finish(info, retcode);
2291 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2293 struct drbd_conf *mdev;
2294 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2296 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2297 if (!adm_ctx.reply_skb)
2299 if (retcode != NO_ERROR)
2302 mdev = adm_ctx.mdev;
2304 /* If there is still bitmap IO pending, probably because of a previous
2305 * resync just being finished, wait for it before requesting a new resync. */
2306 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2308 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2310 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2311 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2313 while (retcode == SS_NEED_CONNECTION) {
2314 spin_lock_irq(&mdev->tconn->req_lock);
2315 if (mdev->state.conn < C_CONNECTED)
2316 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2317 spin_unlock_irq(&mdev->tconn->req_lock);
2319 if (retcode != SS_NEED_CONNECTION)
2322 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2326 drbd_adm_finish(info, retcode);
2330 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2334 rv = drbd_bmio_set_n_write(mdev);
2335 drbd_suspend_al(mdev);
2339 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2340 union drbd_state mask, union drbd_state val)
2342 enum drbd_ret_code retcode;
2344 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2345 if (!adm_ctx.reply_skb)
2347 if (retcode != NO_ERROR)
2350 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2352 drbd_adm_finish(info, retcode);
2356 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2358 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2361 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2363 enum drbd_ret_code retcode;
2365 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2366 if (!adm_ctx.reply_skb)
2368 if (retcode != NO_ERROR)
2371 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2372 retcode = ERR_PAUSE_IS_SET;
2374 drbd_adm_finish(info, retcode);
2378 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2380 union drbd_dev_state s;
2381 enum drbd_ret_code retcode;
2383 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2384 if (!adm_ctx.reply_skb)
2386 if (retcode != NO_ERROR)
2389 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2390 s = adm_ctx.mdev->state;
2391 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2392 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2393 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2395 retcode = ERR_PAUSE_IS_CLEAR;
2400 drbd_adm_finish(info, retcode);
2404 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2406 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2409 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2411 struct drbd_conf *mdev;
2412 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2414 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2415 if (!adm_ctx.reply_skb)
2417 if (retcode != NO_ERROR)
2420 mdev = adm_ctx.mdev;
2421 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2422 drbd_uuid_new_current(mdev);
2423 clear_bit(NEW_CUR_UUID, &mdev->flags);
2425 drbd_suspend_io(mdev);
2426 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2427 if (retcode == SS_SUCCESS) {
2428 if (mdev->state.conn < C_CONNECTED)
2429 tl_clear(mdev->tconn);
2430 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2431 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2433 drbd_resume_io(mdev);
2436 drbd_adm_finish(info, retcode);
2440 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2442 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2445 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2448 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2450 goto nla_put_failure;
2451 if (vnr != VOLUME_UNSPECIFIED)
2452 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2453 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2454 nla_nest_end(skb, nla);
2459 nla_nest_cancel(skb, nla);
2463 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2464 const struct sib_info *sib)
2466 struct state_info *si = NULL; /* for sizeof(si->member); */
2471 int exclude_sensitive;
2473 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2474 * to. So we better exclude_sensitive information.
2476 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2477 * in the context of the requesting user process. Exclude sensitive
2478 * information, unless current has superuser.
2480 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2481 * relies on the current implementation of netlink_dump(), which
2482 * executes the dump callback successively from netlink_recvmsg(),
2483 * always in the context of the receiving process */
2484 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2486 got_ldev = get_ldev(mdev);
2487 got_net = get_net_conf(mdev->tconn);
2489 /* We need to add connection name and volume number information still.
2490 * Minor number is in drbd_genlmsghdr. */
2491 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2492 goto nla_put_failure;
2494 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2495 goto nla_put_failure;
2498 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2499 goto nla_put_failure;
2501 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2502 goto nla_put_failure;
2504 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2506 goto nla_put_failure;
2507 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2508 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2509 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2510 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2513 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2514 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2515 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2516 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2517 if (C_SYNC_SOURCE <= mdev->state.conn &&
2518 C_PAUSED_SYNC_T >= mdev->state.conn) {
2519 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2520 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2525 switch(sib->sib_reason) {
2526 case SIB_SYNC_PROGRESS:
2527 case SIB_GET_STATUS_REPLY:
2529 case SIB_STATE_CHANGE:
2530 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2531 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2533 case SIB_HELPER_POST:
2535 T_helper_exit_code, sib->helper_exit_code);
2537 case SIB_HELPER_PRE:
2538 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2542 nla_nest_end(skb, nla);
2550 put_net_conf(mdev->tconn);
2554 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2556 enum drbd_ret_code retcode;
2559 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2560 if (!adm_ctx.reply_skb)
2562 if (retcode != NO_ERROR)
2565 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2567 nlmsg_free(adm_ctx.reply_skb);
2571 drbd_adm_finish(info, retcode);
2575 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2577 struct drbd_conf *mdev;
2578 struct drbd_genlmsghdr *dh;
2579 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2580 struct drbd_tconn *tconn = NULL;
2581 struct drbd_tconn *tmp;
2582 unsigned volume = cb->args[1];
2584 /* Open coded, deferred, iteration:
2585 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2586 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2590 * where tconn is cb->args[0];
2591 * and i is cb->args[1];
2593 * This may miss entries inserted after this dump started,
2594 * or entries deleted before they are reached.
2596 * We need to make sure the mdev won't disappear while
2597 * we are looking at it, and revalidate our iterators
2598 * on each iteration.
2601 /* synchronize with drbd_new_tconn/drbd_free_tconn */
2602 down_read(&drbd_cfg_rwsem);
2604 /* revalidate iterator position */
2605 list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2607 /* first iteration */
2618 mdev = idr_get_next(&tconn->volumes, &volume);
2620 /* No more volumes to dump on this tconn.
2621 * Advance tconn iterator. */
2622 pos = list_entry(tconn->all_tconn.next,
2623 struct drbd_tconn, all_tconn);
2624 /* But, did we dump any volume on this tconn yet? */
2632 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2633 cb->nlh->nlmsg_seq, &drbd_genl_family,
2634 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2639 /* this is a tconn without a single volume */
2641 dh->ret_code = NO_ERROR;
2642 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2643 genlmsg_cancel(skb, dh);
2645 genlmsg_end(skb, dh);
2649 D_ASSERT(mdev->vnr == volume);
2650 D_ASSERT(mdev->tconn == tconn);
2652 dh->minor = mdev_to_minor(mdev);
2653 dh->ret_code = NO_ERROR;
2655 if (nla_put_status_info(skb, mdev, NULL)) {
2656 genlmsg_cancel(skb, dh);
2659 genlmsg_end(skb, dh);
2663 up_read(&drbd_cfg_rwsem);
2664 /* where to start the next iteration */
2665 cb->args[0] = (long)pos;
2666 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2668 /* No more tconns/volumes/minors found results in an empty skb.
2669 * Which will terminate the dump. */
2673 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2675 enum drbd_ret_code retcode;
2676 struct timeout_parms tp;
2679 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2680 if (!adm_ctx.reply_skb)
2682 if (retcode != NO_ERROR)
2686 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2687 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2690 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2692 nlmsg_free(adm_ctx.reply_skb);
2696 drbd_adm_finish(info, retcode);
2700 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2702 struct drbd_conf *mdev;
2703 enum drbd_ret_code retcode;
2705 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2706 if (!adm_ctx.reply_skb)
2708 if (retcode != NO_ERROR)
2711 mdev = adm_ctx.mdev;
2712 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2713 /* resume from last known position, if possible */
2714 struct start_ov_parms parms =
2715 { .ov_start_sector = mdev->ov_start_sector };
2716 int err = start_ov_parms_from_attrs(&parms, info);
2718 retcode = ERR_MANDATORY_TAG;
2719 drbd_msg_put_info(from_attrs_err_to_txt(err));
2722 /* w_make_ov_request expects position to be aligned */
2723 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2725 /* If there is still bitmap IO pending, e.g. previous resync or verify
2726 * just being finished, wait for it before requesting a new resync. */
2727 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2728 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2730 drbd_adm_finish(info, retcode);
2735 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2737 struct drbd_conf *mdev;
2738 enum drbd_ret_code retcode;
2739 int skip_initial_sync = 0;
2741 struct new_c_uuid_parms args;
2743 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2744 if (!adm_ctx.reply_skb)
2746 if (retcode != NO_ERROR)
2749 mdev = adm_ctx.mdev;
2750 memset(&args, 0, sizeof(args));
2751 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2752 err = new_c_uuid_parms_from_attrs(&args, info);
2754 retcode = ERR_MANDATORY_TAG;
2755 drbd_msg_put_info(from_attrs_err_to_txt(err));
2760 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2762 if (!get_ldev(mdev)) {
2763 retcode = ERR_NO_DISK;
2767 /* this is "skip initial sync", assume to be clean */
2768 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2769 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2770 dev_info(DEV, "Preparing to skip initial sync\n");
2771 skip_initial_sync = 1;
2772 } else if (mdev->state.conn != C_STANDALONE) {
2773 retcode = ERR_CONNECTED;
2777 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2778 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2780 if (args.clear_bm) {
2781 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2782 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2784 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2785 retcode = ERR_IO_MD_DISK;
2787 if (skip_initial_sync) {
2788 drbd_send_uuids_skip_initial_sync(mdev);
2789 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2790 drbd_print_uuids(mdev, "cleared bitmap UUID");
2791 spin_lock_irq(&mdev->tconn->req_lock);
2792 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2794 spin_unlock_irq(&mdev->tconn->req_lock);
2802 mutex_unlock(mdev->state_mutex);
2804 drbd_adm_finish(info, retcode);
2808 static enum drbd_ret_code
2809 drbd_check_conn_name(const char *name)
2811 if (!name || !name[0]) {
2812 drbd_msg_put_info("connection name missing");
2813 return ERR_MANDATORY_TAG;
2815 /* if we want to use these in sysfs/configfs/debugfs some day,
2816 * we must not allow slashes */
2817 if (strchr(name, '/')) {
2818 drbd_msg_put_info("invalid connection name");
2819 return ERR_INVALID_REQUEST;
2824 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2826 enum drbd_ret_code retcode;
2828 retcode = drbd_adm_prepare(skb, info, 0);
2829 if (!adm_ctx.reply_skb)
2831 if (retcode != NO_ERROR)
2834 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2835 if (retcode != NO_ERROR)
2838 if (adm_ctx.tconn) {
2839 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2840 retcode = ERR_INVALID_REQUEST;
2841 drbd_msg_put_info("connection exists");
2843 /* else: still NO_ERROR */
2847 if (!drbd_new_tconn(adm_ctx.conn_name))
2848 retcode = ERR_NOMEM;
2850 drbd_adm_finish(info, retcode);
2854 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2856 struct drbd_genlmsghdr *dh = info->userhdr;
2857 enum drbd_ret_code retcode;
2859 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2860 if (!adm_ctx.reply_skb)
2862 if (retcode != NO_ERROR)
2865 /* FIXME drop minor_count parameter, limit to MINORMASK */
2866 if (dh->minor >= minor_count) {
2867 drbd_msg_put_info("requested minor out of range");
2868 retcode = ERR_INVALID_REQUEST;
2871 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
2872 drbd_msg_put_info("requested volume id out of range");
2873 retcode = ERR_INVALID_REQUEST;
2877 /* drbd_adm_prepare made sure already
2878 * that mdev->tconn and mdev->vnr match the request. */
2880 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
2881 retcode = ERR_MINOR_EXISTS;
2882 /* else: still NO_ERROR */
2886 down_write(&drbd_cfg_rwsem);
2887 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2888 up_write(&drbd_cfg_rwsem);
2890 drbd_adm_finish(info, retcode);
2894 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
2896 if (mdev->state.disk == D_DISKLESS &&
2897 /* no need to be mdev->state.conn == C_STANDALONE &&
2898 * we may want to delete a minor from a live replication group.
2900 mdev->state.role == R_SECONDARY) {
2901 drbd_delete_device(mdev);
2904 return ERR_MINOR_CONFIGURED;
2907 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2909 enum drbd_ret_code retcode;
2911 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2912 if (!adm_ctx.reply_skb)
2914 if (retcode != NO_ERROR)
2917 down_write(&drbd_cfg_rwsem);
2918 retcode = adm_delete_minor(adm_ctx.mdev);
2919 up_write(&drbd_cfg_rwsem);
2920 /* if this was the last volume of this connection,
2921 * this will terminate all threads */
2922 if (retcode == NO_ERROR)
2923 conn_reconfig_done(adm_ctx.tconn);
2925 drbd_adm_finish(info, retcode);
2929 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
2931 enum drbd_ret_code retcode;
2932 enum drbd_state_rv rv;
2933 struct drbd_conf *mdev;
2936 retcode = drbd_adm_prepare(skb, info, 0);
2937 if (!adm_ctx.reply_skb)
2939 if (retcode != NO_ERROR)
2942 if (!adm_ctx.tconn) {
2943 retcode = ERR_CONN_NOT_KNOWN;
2947 down_read(&drbd_cfg_rwsem);
2949 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2950 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
2951 if (retcode < SS_SUCCESS) {
2952 drbd_msg_put_info("failed to demote");
2958 rv = conn_try_disconnect(adm_ctx.tconn, 0);
2959 if (rv < SS_SUCCESS) {
2960 retcode = rv; /* enum type mismatch! */
2961 drbd_msg_put_info("failed to disconnect");
2966 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2967 rv = adm_detach(mdev);
2968 if (rv < SS_SUCCESS) {
2969 retcode = rv; /* enum type mismatch! */
2970 drbd_msg_put_info("failed to detach");
2974 up_read(&drbd_cfg_rwsem);
2976 /* delete volumes */
2977 down_write(&drbd_cfg_rwsem);
2978 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
2979 retcode = adm_delete_minor(mdev);
2980 if (retcode != NO_ERROR) {
2981 /* "can not happen" */
2982 drbd_msg_put_info("failed to delete volume");
2983 up_write(&drbd_cfg_rwsem);
2988 /* stop all threads */
2989 conn_reconfig_done(adm_ctx.tconn);
2991 /* delete connection */
2992 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2993 drbd_free_tconn(adm_ctx.tconn);
2996 /* "can not happen" */
2997 retcode = ERR_CONN_IN_USE;
2998 drbd_msg_put_info("failed to delete connection");
3001 up_write(&drbd_cfg_rwsem);
3004 up_read(&drbd_cfg_rwsem);
3006 drbd_adm_finish(info, retcode);
3010 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3012 enum drbd_ret_code retcode;
3014 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3015 if (!adm_ctx.reply_skb)
3017 if (retcode != NO_ERROR)
3020 down_write(&drbd_cfg_rwsem);
3021 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3022 drbd_free_tconn(adm_ctx.tconn);
3025 retcode = ERR_CONN_IN_USE;
3027 up_write(&drbd_cfg_rwsem);
3030 drbd_adm_finish(info, retcode);
3034 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3036 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3037 struct sk_buff *msg;
3038 struct drbd_genlmsghdr *d_out;
3042 seq = atomic_inc_return(&drbd_genl_seq);
3043 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3048 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3049 if (!d_out) /* cannot happen, but anyways. */
3050 goto nla_put_failure;
3051 d_out->minor = mdev_to_minor(mdev);
3052 d_out->ret_code = 0;
3054 if (nla_put_status_info(msg, mdev, sib))
3055 goto nla_put_failure;
3056 genlmsg_end(msg, d_out);
3057 err = drbd_genl_multicast_events(msg, 0);
3058 /* msg has been consumed or freed in netlink_broadcast() */
3059 if (err && err != -ESRCH)
3067 dev_err(DEV, "Error %d while broadcasting event. "
3068 "Event seq:%u sib_reason:%u\n",
3069 err, seq, sib->sib_reason);