4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 /* used blkdev_get_by_path, to claim our meta data device(s) */
81 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
83 /* Configuration is strictly serialized, because generic netlink message
84 * processing is strictly serialized by the genl_lock().
85 * Which means we can use one static global drbd_config_context struct.
87 static struct drbd_config_context {
88 /* assigned from drbd_genlmsghdr */
90 /* assigned from request attributes, if present */
92 #define VOLUME_UNSPECIFIED (-1U)
93 /* pointer into the request skb,
94 * limited lifetime! */
98 struct sk_buff *reply_skb;
99 /* pointer into reply buffer */
100 struct drbd_genlmsghdr *reply_dh;
101 /* resolved from attributes, if possible */
102 struct drbd_conf *mdev;
103 struct drbd_tconn *tconn;
106 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
108 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
109 if (genlmsg_reply(skb, info))
110 printk(KERN_ERR "drbd: error sending genl reply\n");
113 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
114 * reason it could fail was no space in skb, and there are 4k available. */
115 int drbd_msg_put_info(const char *info)
117 struct sk_buff *skb = adm_ctx.reply_skb;
121 if (!info || !info[0])
124 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
128 err = nla_put_string(skb, T_info_text, info);
130 nla_nest_cancel(skb, nla);
133 nla_nest_end(skb, nla);
137 /* This would be a good candidate for a "pre_doit" hook,
138 * and per-family private info->pointers.
139 * But we need to stay compatible with older kernels.
140 * If it returns successfully, adm_ctx members are valid.
142 #define DRBD_ADM_NEED_MINOR 1
143 #define DRBD_ADM_NEED_CONN 2
144 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
147 struct drbd_genlmsghdr *d_in = info->userhdr;
148 const u8 cmd = info->genlhdr->cmd;
151 memset(&adm_ctx, 0, sizeof(adm_ctx));
153 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
154 if (cmd != DRBD_ADM_GET_STATUS
155 && security_netlink_recv(skb, CAP_SYS_ADMIN))
158 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
159 if (!adm_ctx.reply_skb)
162 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
163 info, &drbd_genl_family, 0, cmd);
164 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
166 if (!adm_ctx.reply_dh)
169 adm_ctx.reply_dh->minor = d_in->minor;
170 adm_ctx.reply_dh->ret_code = NO_ERROR;
172 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
174 /* parse and validate only */
175 err = drbd_cfg_context_from_attrs(NULL, info);
179 /* It was present, and valid,
180 * copy it over to the reply skb. */
181 err = nla_put_nohdr(adm_ctx.reply_skb,
182 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
183 info->attrs[DRBD_NLA_CFG_CONTEXT]);
187 /* and assign stuff to the global adm_ctx */
188 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
189 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
190 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
192 adm_ctx.conn_name = nla_data(nla);
194 adm_ctx.volume = VOLUME_UNSPECIFIED;
196 adm_ctx.minor = d_in->minor;
197 adm_ctx.mdev = minor_to_mdev(d_in->minor);
198 adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
200 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
201 drbd_msg_put_info("unknown minor");
202 return ERR_MINOR_INVALID;
204 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
205 drbd_msg_put_info("unknown connection");
206 return ERR_INVALID_REQUEST;
209 /* some more paranoia, if the request was over-determined */
210 if (adm_ctx.mdev && adm_ctx.tconn &&
211 adm_ctx.mdev->tconn != adm_ctx.tconn) {
212 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
213 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
214 drbd_msg_put_info("minor exists in different connection");
215 return ERR_INVALID_REQUEST;
218 adm_ctx.volume != VOLUME_UNSPECIFIED &&
219 adm_ctx.volume != adm_ctx.mdev->vnr) {
220 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
221 adm_ctx.minor, adm_ctx.volume,
222 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
223 drbd_msg_put_info("minor exists as different volume");
224 return ERR_INVALID_REQUEST;
230 nlmsg_free(adm_ctx.reply_skb);
231 adm_ctx.reply_skb = NULL;
235 static int drbd_adm_finish(struct genl_info *info, int retcode)
238 const char *conn_name = NULL;
241 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
242 adm_ctx.tconn = NULL;
245 if (!adm_ctx.reply_skb)
248 adm_ctx.reply_dh->ret_code = retcode;
250 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
252 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
254 conn_name = nla_data(nla);
257 drbd_adm_send_reply(adm_ctx.reply_skb, info);
261 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
267 nc = rcu_dereference(tconn->net_conf);
269 switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
272 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
273 &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
277 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
278 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
282 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
283 &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
285 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
290 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
292 char *envp[] = { "HOME=/",
294 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
295 (char[20]) { }, /* address family */
296 (char[60]) { }, /* address */
299 char *argv[] = {usermode_helper, cmd, mb, NULL };
303 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
304 setup_khelper_env(mdev->tconn, envp);
306 /* The helper may take some time.
307 * write out any unsynced meta data changes now */
310 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
311 sib.sib_reason = SIB_HELPER_PRE;
312 sib.helper_name = cmd;
313 drbd_bcast_event(mdev, &sib);
314 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
316 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
317 usermode_helper, cmd, mb,
318 (ret >> 8) & 0xff, ret);
320 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
321 usermode_helper, cmd, mb,
322 (ret >> 8) & 0xff, ret);
323 sib.sib_reason = SIB_HELPER_POST;
324 sib.helper_exit_code = ret;
325 drbd_bcast_event(mdev, &sib);
327 if (ret < 0) /* Ignore any ERRNOs we got. */
333 static void conn_md_sync(struct drbd_tconn *tconn)
335 struct drbd_conf *mdev;
339 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
340 kref_get(&mdev->kref);
343 kref_put(&mdev->kref, &drbd_minor_destroy);
349 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
351 char *envp[] = { "HOME=/",
353 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
354 (char[20]) { }, /* address family */
355 (char[60]) { }, /* address */
357 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
360 setup_khelper_env(tconn, envp);
363 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
364 /* TODO: conn_bcast_event() ?? */
366 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
368 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
369 usermode_helper, cmd, tconn->name,
370 (ret >> 8) & 0xff, ret);
372 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
373 usermode_helper, cmd, tconn->name,
374 (ret >> 8) & 0xff, ret);
375 /* TODO: conn_bcast_event() ?? */
377 if (ret < 0) /* Ignore any ERRNOs we got. */
383 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
385 enum drbd_fencing_p fp = FP_NOT_AVAIL;
386 struct drbd_conf *mdev;
390 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
391 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
392 fp = max_t(enum drbd_fencing_p, fp,
393 rcu_dereference(mdev->ldev->disk_conf)->fencing);
402 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
404 union drbd_state mask = { };
405 union drbd_state val = { };
406 enum drbd_fencing_p fp;
410 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
411 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
415 fp = highest_fencing_policy(tconn);
418 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
425 r = conn_khelper(tconn, "fence-peer");
427 switch ((r>>8) & 0xff) {
428 case 3: /* peer is inconsistent */
429 ex_to_string = "peer is inconsistent or worse";
431 val.pdsk = D_INCONSISTENT;
433 case 4: /* peer got outdated, or was already outdated */
434 ex_to_string = "peer was fenced";
436 val.pdsk = D_OUTDATED;
438 case 5: /* peer was down */
439 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
440 /* we will(have) create(d) a new UUID anyways... */
441 ex_to_string = "peer is unreachable, assumed to be dead";
443 val.pdsk = D_OUTDATED;
445 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
448 case 6: /* Peer is primary, voluntarily outdate myself.
449 * This is useful when an unconnected R_SECONDARY is asked to
450 * become R_PRIMARY, but finds the other peer being active. */
451 ex_to_string = "peer is active";
452 conn_warn(tconn, "Peer is primary, outdating myself.\n");
454 val.disk = D_OUTDATED;
457 if (fp != FP_STONITH)
458 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
459 ex_to_string = "peer was stonithed";
461 val.pdsk = D_OUTDATED;
464 /* The script is broken ... */
465 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
466 return false; /* Eventually leave IO frozen */
469 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
470 (r>>8) & 0xff, ex_to_string);
475 conn_request_state(tconn, mask, val, CS_VERBOSE);
476 here, because we might were able to re-establish the connection in the
478 spin_lock_irq(&tconn->req_lock);
479 if (tconn->cstate < C_WF_REPORT_PARAMS)
480 _conn_request_state(tconn, mask, val, CS_VERBOSE);
481 spin_unlock_irq(&tconn->req_lock);
483 return conn_highest_pdsk(tconn) <= D_OUTDATED;
486 static int _try_outdate_peer_async(void *data)
488 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
490 conn_try_outdate_peer(tconn);
492 kref_put(&tconn->kref, &conn_destroy);
496 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
498 struct task_struct *opa;
500 kref_get(&tconn->kref);
501 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
503 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
504 kref_put(&tconn->kref, &conn_destroy);
509 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
511 const int max_tries = 4;
512 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
516 union drbd_state mask, val;
518 if (new_role == R_PRIMARY)
519 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
521 mutex_lock(mdev->state_mutex);
523 mask.i = 0; mask.role = R_MASK;
524 val.i = 0; val.role = new_role;
526 while (try++ < max_tries) {
527 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
529 /* in case we first succeeded to outdate,
530 * but now suddenly could establish a connection */
531 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
537 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
538 (mdev->state.disk < D_UP_TO_DATE &&
539 mdev->state.disk >= D_INCONSISTENT)) {
541 val.disk = D_UP_TO_DATE;
546 if (rv == SS_NO_UP_TO_DATE_DISK &&
547 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
548 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
550 if (conn_try_outdate_peer(mdev->tconn)) {
551 val.disk = D_UP_TO_DATE;
557 if (rv == SS_NOTHING_TO_DO)
559 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
560 if (!conn_try_outdate_peer(mdev->tconn) && force) {
561 dev_warn(DEV, "Forced into split brain situation!\n");
563 val.pdsk = D_OUTDATED;
568 if (rv == SS_TWO_PRIMARIES) {
569 /* Maybe the peer is detected as dead very soon...
570 retry at most once more in this case. */
573 nc = rcu_dereference(mdev->tconn->net_conf);
574 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
576 schedule_timeout_interruptible(timeo);
581 if (rv < SS_SUCCESS) {
582 rv = _drbd_request_state(mdev, mask, val,
583 CS_VERBOSE + CS_WAIT_COMPLETE);
594 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
596 /* Wait until nothing is on the fly :) */
597 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
599 if (new_role == R_SECONDARY) {
600 set_disk_ro(mdev->vdisk, true);
601 if (get_ldev(mdev)) {
602 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
606 mutex_lock(&mdev->tconn->conf_update);
607 nc = mdev->tconn->net_conf;
609 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
610 mutex_unlock(&mdev->tconn->conf_update);
612 set_disk_ro(mdev->vdisk, false);
613 if (get_ldev(mdev)) {
614 if (((mdev->state.conn < C_CONNECTED ||
615 mdev->state.pdsk <= D_FAILED)
616 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
617 drbd_uuid_new_current(mdev);
619 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
624 /* writeout of activity log covered areas of the bitmap
625 * to stable storage done in after state change already */
627 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
628 /* if this was forced, we should consider sync */
630 drbd_send_uuids(mdev);
631 drbd_send_state(mdev);
636 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
638 mutex_unlock(mdev->state_mutex);
642 static const char *from_attrs_err_to_txt(int err)
644 return err == -ENOMSG ? "required attribute missing" :
645 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
646 err == -EEXIST ? "can not change invariant setting" :
647 "invalid attribute value";
650 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
652 struct set_role_parms parms;
654 enum drbd_ret_code retcode;
656 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
657 if (!adm_ctx.reply_skb)
659 if (retcode != NO_ERROR)
662 memset(&parms, 0, sizeof(parms));
663 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
664 err = set_role_parms_from_attrs(&parms, info);
666 retcode = ERR_MANDATORY_TAG;
667 drbd_msg_put_info(from_attrs_err_to_txt(err));
672 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
673 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
675 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
677 drbd_adm_finish(info, retcode);
681 /* initializes the md.*_offset members, so we are able to find
682 * the on disk meta data */
683 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
684 struct drbd_backing_dev *bdev)
686 sector_t md_size_sect = 0;
690 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
692 switch (meta_dev_idx) {
694 /* v07 style fixed size indexed meta data */
695 bdev->md.md_size_sect = MD_RESERVED_SECT;
696 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
697 bdev->md.al_offset = MD_AL_OFFSET;
698 bdev->md.bm_offset = MD_BM_OFFSET;
700 case DRBD_MD_INDEX_FLEX_EXT:
701 /* just occupy the full device; unit: sectors */
702 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
703 bdev->md.md_offset = 0;
704 bdev->md.al_offset = MD_AL_OFFSET;
705 bdev->md.bm_offset = MD_BM_OFFSET;
707 case DRBD_MD_INDEX_INTERNAL:
708 case DRBD_MD_INDEX_FLEX_INT:
709 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
710 /* al size is still fixed */
711 bdev->md.al_offset = -MD_AL_SECTORS;
712 /* we need (slightly less than) ~ this much bitmap sectors: */
713 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
714 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
715 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
716 md_size_sect = ALIGN(md_size_sect, 8);
718 /* plus the "drbd meta data super block",
719 * and the activity log; */
720 md_size_sect += MD_BM_OFFSET;
722 bdev->md.md_size_sect = md_size_sect;
723 /* bitmap offset is adjusted by 'super' block size */
724 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
730 /* input size is expected to be in KB */
731 char *ppsize(char *buf, unsigned long long size)
733 /* Needs 9 bytes at max including trailing NUL:
734 * -1ULL ==> "16384 EB" */
735 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
737 while (size >= 10000 && base < sizeof(units)-1) {
739 size = (size >> 10) + !!(size & (1<<9));
742 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
747 /* there is still a theoretical deadlock when called from receiver
748 * on an D_INCONSISTENT R_PRIMARY:
749 * remote READ does inc_ap_bio, receiver would need to receive answer
750 * packet from remote to dec_ap_bio again.
751 * receiver receive_sizes(), comes here,
752 * waits for ap_bio_cnt == 0. -> deadlock.
753 * but this cannot happen, actually, because:
754 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
755 * (not connected, or bad/no disk on peer):
756 * see drbd_fail_request_early, ap_bio_cnt is zero.
757 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
758 * peer may not initiate a resize.
760 /* Note these are not to be confused with
761 * drbd_adm_suspend_io/drbd_adm_resume_io,
762 * which are (sub) state changes triggered by admin (drbdsetup),
763 * and can be long lived.
764 * This changes an mdev->flag, is triggered by drbd internals,
765 * and should be short-lived. */
766 void drbd_suspend_io(struct drbd_conf *mdev)
768 set_bit(SUSPEND_IO, &mdev->flags);
769 if (drbd_suspended(mdev))
771 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
774 void drbd_resume_io(struct drbd_conf *mdev)
776 clear_bit(SUSPEND_IO, &mdev->flags);
777 wake_up(&mdev->misc_wait);
781 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
782 * @mdev: DRBD device.
784 * Returns 0 on success, negative return values indicate errors.
785 * You should call drbd_md_sync() after calling this function.
787 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
789 sector_t prev_first_sect, prev_size; /* previous meta location */
790 sector_t la_size, u_size;
794 int md_moved, la_size_changed;
795 enum determine_dev_size rv = unchanged;
798 * application request passes inc_ap_bio,
799 * but then cannot get an AL-reference.
800 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
803 * Suspend IO right here.
804 * still lock the act_log to not trigger ASSERTs there.
806 drbd_suspend_io(mdev);
808 /* no wait necessary anymore, actually we could assert that */
809 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
811 prev_first_sect = drbd_md_first_sector(mdev->ldev);
812 prev_size = mdev->ldev->md.md_size_sect;
813 la_size = mdev->ldev->md.la_size_sect;
815 /* TODO: should only be some assert here, not (re)init... */
816 drbd_md_set_sector_offsets(mdev, mdev->ldev);
819 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
821 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
823 if (drbd_get_capacity(mdev->this_bdev) != size ||
824 drbd_bm_capacity(mdev) != size) {
826 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
828 /* currently there is only one error: ENOMEM! */
829 size = drbd_bm_capacity(mdev)>>1;
831 dev_err(DEV, "OUT OF MEMORY! "
832 "Could not allocate bitmap!\n");
834 dev_err(DEV, "BM resizing failed. "
835 "Leaving size unchanged at size = %lu KB\n",
836 (unsigned long)size);
840 /* racy, see comments above. */
841 drbd_set_my_capacity(mdev, size);
842 mdev->ldev->md.la_size_sect = size;
843 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
844 (unsigned long long)size>>1);
846 if (rv == dev_size_error)
849 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
851 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
852 || prev_size != mdev->ldev->md.md_size_sect;
854 if (la_size_changed || md_moved) {
857 drbd_al_shrink(mdev); /* All extents inactive. */
858 dev_info(DEV, "Writing the whole bitmap, %s\n",
859 la_size_changed && md_moved ? "size changed and md moved" :
860 la_size_changed ? "size changed" : "md moved");
861 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
862 err = drbd_bitmap_io(mdev, &drbd_bm_write,
863 "size changed", BM_LOCKED_MASK);
868 drbd_md_mark_dirty(mdev);
876 lc_unlock(mdev->act_log);
877 wake_up(&mdev->al_wait);
878 drbd_resume_io(mdev);
884 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
885 sector_t u_size, int assume_peer_has_space)
887 sector_t p_size = mdev->p_size; /* partner's disk size. */
888 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
889 sector_t m_size; /* my size */
892 m_size = drbd_get_max_capacity(bdev);
894 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
895 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
899 if (p_size && m_size) {
900 size = min_t(sector_t, p_size, m_size);
904 if (m_size && m_size < size)
906 if (p_size && p_size < size)
917 dev_err(DEV, "Both nodes diskless!\n");
921 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
922 (unsigned long)u_size>>1, (unsigned long)size>>1);
931 * drbd_check_al_size() - Ensures that the AL is of the right size
932 * @mdev: DRBD device.
934 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
935 * failed, and 0 on success. You should call drbd_md_sync() after you called
938 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
940 struct lru_cache *n, *t;
941 struct lc_element *e;
946 mdev->act_log->nr_elements == dc->al_extents)
951 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
952 dc->al_extents, sizeof(struct lc_element), 0);
955 dev_err(DEV, "Cannot allocate act_log lru!\n");
958 spin_lock_irq(&mdev->al_lock);
960 for (i = 0; i < t->nr_elements; i++) {
961 e = lc_element_by_index(t, i);
963 dev_err(DEV, "refcnt(%d)==%d\n",
964 e->lc_number, e->refcnt);
970 spin_unlock_irq(&mdev->al_lock);
972 dev_err(DEV, "Activity log still in use!\n");
979 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
983 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
985 struct request_queue * const q = mdev->rq_queue;
986 int max_hw_sectors = max_bio_size >> 9;
987 int max_segments = 0;
989 if (get_ldev_if_state(mdev, D_ATTACHING)) {
990 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
992 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
994 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
999 blk_queue_logical_block_size(q, 512);
1000 blk_queue_max_hw_sectors(q, max_hw_sectors);
1001 /* This is the workaround for "bio would need to, but cannot, be split" */
1002 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1003 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1005 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1006 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1008 blk_queue_stack_limits(q, b);
1010 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1011 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1012 q->backing_dev_info.ra_pages,
1013 b->backing_dev_info.ra_pages);
1014 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1020 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1022 int now, new, local, peer;
1024 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1025 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1026 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1028 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1029 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1030 mdev->local_max_bio_size = local;
1034 /* We may ignore peer limits if the peer is modern enough.
1035 Because new from 8.3.8 onwards the peer can use multiple
1036 BIOs for a single peer_request */
1037 if (mdev->state.conn >= C_CONNECTED) {
1038 if (mdev->tconn->agreed_pro_version < 94)
1039 peer = mdev->peer_max_bio_size;
1040 else if (mdev->tconn->agreed_pro_version == 94)
1041 peer = DRBD_MAX_SIZE_H80_PACKET;
1042 else /* drbd 8.3.8 onwards */
1043 peer = DRBD_MAX_BIO_SIZE;
1046 new = min_t(int, local, peer);
1048 if (mdev->state.role == R_PRIMARY && new < now)
1049 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1052 dev_info(DEV, "max BIO size = %u\n", new);
1054 drbd_setup_queue_param(mdev, new);
1057 /* Starts the worker thread */
1058 static void conn_reconfig_start(struct drbd_tconn *tconn)
1060 drbd_thread_start(&tconn->worker);
1061 conn_flush_workqueue(tconn);
1064 /* if still unconfigured, stops worker again. */
1065 static void conn_reconfig_done(struct drbd_tconn *tconn)
1068 spin_lock_irq(&tconn->req_lock);
1069 stop_threads = conn_all_vols_unconf(tconn);
1070 spin_unlock_irq(&tconn->req_lock);
1072 /* asender is implicitly stopped by receiver
1073 * in conn_disconnect() */
1074 drbd_thread_stop(&tconn->receiver);
1075 drbd_thread_stop(&tconn->worker);
1079 /* Make sure IO is suspended before calling this function(). */
1080 static void drbd_suspend_al(struct drbd_conf *mdev)
1084 if (!lc_try_lock(mdev->act_log)) {
1085 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1089 drbd_al_shrink(mdev);
1090 spin_lock_irq(&mdev->tconn->req_lock);
1091 if (mdev->state.conn < C_CONNECTED)
1092 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1093 spin_unlock_irq(&mdev->tconn->req_lock);
1094 lc_unlock(mdev->act_log);
1097 dev_info(DEV, "Suspended AL updates\n");
1101 static bool should_set_defaults(struct genl_info *info)
1103 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1104 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1107 static void enforce_disk_conf_limits(struct disk_conf *dc)
1109 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1110 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1111 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1112 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1114 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1115 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1118 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1120 enum drbd_ret_code retcode;
1121 struct drbd_conf *mdev;
1122 struct disk_conf *new_disk_conf, *old_disk_conf;
1123 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1126 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1127 if (!adm_ctx.reply_skb)
1129 if (retcode != NO_ERROR)
1132 mdev = adm_ctx.mdev;
1134 /* we also need a disk
1135 * to change the options on */
1136 if (!get_ldev(mdev)) {
1137 retcode = ERR_NO_DISK;
1141 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1142 if (!new_disk_conf) {
1143 retcode = ERR_NOMEM;
1147 mutex_lock(&mdev->tconn->conf_update);
1148 old_disk_conf = mdev->ldev->disk_conf;
1149 *new_disk_conf = *old_disk_conf;
1150 if (should_set_defaults(info))
1151 set_disk_conf_defaults(new_disk_conf);
1153 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1155 retcode = ERR_MANDATORY_TAG;
1156 drbd_msg_put_info(from_attrs_err_to_txt(err));
1159 if (!expect(new_disk_conf->resync_rate >= 1))
1160 new_disk_conf->resync_rate = 1;
1162 enforce_disk_conf_limits(new_disk_conf);
1164 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1165 if (fifo_size != mdev->rs_plan_s->size) {
1166 new_plan = fifo_alloc(fifo_size);
1168 dev_err(DEV, "kmalloc of fifo_buffer failed");
1169 retcode = ERR_NOMEM;
1174 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1175 drbd_al_shrink(mdev);
1176 err = drbd_check_al_size(mdev, new_disk_conf);
1177 lc_unlock(mdev->act_log);
1178 wake_up(&mdev->al_wait);
1181 retcode = ERR_NOMEM;
1185 write_lock_irq(&global_state_lock);
1186 retcode = drbd_sync_after_valid(mdev, new_disk_conf->resync_after);
1187 if (retcode == NO_ERROR) {
1188 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1189 drbd_sync_after_changed(mdev);
1191 write_unlock_irq(&global_state_lock);
1193 if (retcode != NO_ERROR)
1197 old_plan = mdev->rs_plan_s;
1198 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1201 mutex_unlock(&mdev->tconn->conf_update);
1204 if (mdev->state.conn >= C_CONNECTED)
1205 drbd_send_sync_param(mdev);
1208 kfree(old_disk_conf);
1213 mutex_unlock(&mdev->tconn->conf_update);
1215 kfree(new_disk_conf);
1220 drbd_adm_finish(info, retcode);
1224 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1226 struct drbd_conf *mdev;
1228 enum drbd_ret_code retcode;
1229 enum determine_dev_size dd;
1230 sector_t max_possible_sectors;
1231 sector_t min_md_device_sectors;
1232 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1233 struct disk_conf *new_disk_conf = NULL;
1234 struct block_device *bdev;
1235 struct lru_cache *resync_lru = NULL;
1236 struct fifo_buffer *new_plan = NULL;
1237 union drbd_state ns, os;
1238 enum drbd_state_rv rv;
1239 struct net_conf *nc;
1240 int cp_discovered = 0;
1242 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1243 if (!adm_ctx.reply_skb)
1245 if (retcode != NO_ERROR)
1248 mdev = adm_ctx.mdev;
1249 conn_reconfig_start(mdev->tconn);
1251 /* if you want to reconfigure, please tear down first */
1252 if (mdev->state.disk > D_DISKLESS) {
1253 retcode = ERR_DISK_CONFIGURED;
1256 /* It may just now have detached because of IO error. Make sure
1257 * drbd_ldev_destroy is done already, we may end up here very fast,
1258 * e.g. if someone calls attach from the on-io-error handler,
1259 * to realize a "hot spare" feature (not that I'd recommend that) */
1260 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1262 /* allocation not in the IO path, drbdsetup context */
1263 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1265 retcode = ERR_NOMEM;
1268 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1269 if (!new_disk_conf) {
1270 retcode = ERR_NOMEM;
1273 nbc->disk_conf = new_disk_conf;
1275 set_disk_conf_defaults(new_disk_conf);
1276 err = disk_conf_from_attrs(new_disk_conf, info);
1278 retcode = ERR_MANDATORY_TAG;
1279 drbd_msg_put_info(from_attrs_err_to_txt(err));
1283 enforce_disk_conf_limits(new_disk_conf);
1285 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1287 retcode = ERR_NOMEM;
1291 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1292 retcode = ERR_MD_IDX_INVALID;
1297 nc = rcu_dereference(mdev->tconn->net_conf);
1299 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1301 retcode = ERR_STONITH_AND_PROT_A;
1307 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1308 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1310 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1312 retcode = ERR_OPEN_DISK;
1315 nbc->backing_bdev = bdev;
1318 * meta_dev_idx >= 0: external fixed size, possibly multiple
1319 * drbd sharing one meta device. TODO in that case, paranoia
1320 * check that [md_bdev, meta_dev_idx] is not yet used by some
1321 * other drbd minor! (if you use drbd.conf + drbdadm, that
1322 * should check it for you already; but if you don't, or
1323 * someone fooled it, we need to double check here)
1325 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1326 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1327 (new_disk_conf->meta_dev_idx < 0) ?
1328 (void *)mdev : (void *)drbd_m_holder);
1330 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1332 retcode = ERR_OPEN_MD_DISK;
1335 nbc->md_bdev = bdev;
1337 if ((nbc->backing_bdev == nbc->md_bdev) !=
1338 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1339 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1340 retcode = ERR_MD_IDX_INVALID;
1344 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1345 1, 61, sizeof(struct bm_extent),
1346 offsetof(struct bm_extent, lce));
1348 retcode = ERR_NOMEM;
1352 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1353 drbd_md_set_sector_offsets(mdev, nbc);
1355 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1356 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1357 (unsigned long long) drbd_get_max_capacity(nbc),
1358 (unsigned long long) new_disk_conf->disk_size);
1359 retcode = ERR_DISK_TO_SMALL;
1363 if (new_disk_conf->meta_dev_idx < 0) {
1364 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1365 /* at least one MB, otherwise it does not make sense */
1366 min_md_device_sectors = (2<<10);
1368 max_possible_sectors = DRBD_MAX_SECTORS;
1369 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
1372 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1373 retcode = ERR_MD_DISK_TO_SMALL;
1374 dev_warn(DEV, "refusing attach: md-device too small, "
1375 "at least %llu sectors needed for this meta-disk type\n",
1376 (unsigned long long) min_md_device_sectors);
1380 /* Make sure the new disk is big enough
1381 * (we may currently be R_PRIMARY with no local disk...) */
1382 if (drbd_get_max_capacity(nbc) <
1383 drbd_get_capacity(mdev->this_bdev)) {
1384 retcode = ERR_DISK_TO_SMALL;
1388 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1390 if (nbc->known_size > max_possible_sectors) {
1391 dev_warn(DEV, "==> truncating very big lower level device "
1392 "to currently maximum possible %llu sectors <==\n",
1393 (unsigned long long) max_possible_sectors);
1394 if (new_disk_conf->meta_dev_idx >= 0)
1395 dev_warn(DEV, "==>> using internal or flexible "
1396 "meta data may help <<==\n");
1399 drbd_suspend_io(mdev);
1400 /* also wait for the last barrier ack. */
1401 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1402 /* and for any other previously queued work */
1403 drbd_flush_workqueue(mdev);
1405 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1406 retcode = rv; /* FIXME: Type mismatch. */
1407 drbd_resume_io(mdev);
1408 if (rv < SS_SUCCESS)
1411 if (!get_ldev_if_state(mdev, D_ATTACHING))
1412 goto force_diskless;
1414 drbd_md_set_sector_offsets(mdev, nbc);
1416 if (!mdev->bitmap) {
1417 if (drbd_bm_init(mdev)) {
1418 retcode = ERR_NOMEM;
1419 goto force_diskless_dec;
1423 retcode = drbd_md_read(mdev, nbc);
1424 if (retcode != NO_ERROR)
1425 goto force_diskless_dec;
1427 if (mdev->state.conn < C_CONNECTED &&
1428 mdev->state.role == R_PRIMARY &&
1429 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1430 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1431 (unsigned long long)mdev->ed_uuid);
1432 retcode = ERR_DATA_NOT_CURRENT;
1433 goto force_diskless_dec;
1436 /* Since we are diskless, fix the activity log first... */
1437 if (drbd_check_al_size(mdev, new_disk_conf)) {
1438 retcode = ERR_NOMEM;
1439 goto force_diskless_dec;
1442 /* Prevent shrinking of consistent devices ! */
1443 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1444 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1445 dev_warn(DEV, "refusing to truncate a consistent device\n");
1446 retcode = ERR_DISK_TO_SMALL;
1447 goto force_diskless_dec;
1450 if (!drbd_al_read_log(mdev, nbc)) {
1451 retcode = ERR_IO_MD_DISK;
1452 goto force_diskless_dec;
1455 /* Reset the "barriers don't work" bits here, then force meta data to
1456 * be written, to ensure we determine if barriers are supported. */
1457 if (new_disk_conf->md_flushes)
1458 clear_bit(MD_NO_FUA, &mdev->flags);
1460 set_bit(MD_NO_FUA, &mdev->flags);
1462 /* Point of no return reached.
1463 * Devices and memory are no longer released by error cleanup below.
1464 * now mdev takes over responsibility, and the state engine should
1465 * clean it up somewhere. */
1466 D_ASSERT(mdev->ldev == NULL);
1468 mdev->resync = resync_lru;
1469 mdev->rs_plan_s = new_plan;
1472 new_disk_conf = NULL;
1475 mdev->write_ordering = WO_bdev_flush;
1476 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1478 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1479 set_bit(CRASHED_PRIMARY, &mdev->flags);
1481 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1483 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1484 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
1485 set_bit(CRASHED_PRIMARY, &mdev->flags);
1494 drbd_reconsider_max_bio_size(mdev);
1496 /* If I am currently not R_PRIMARY,
1497 * but meta data primary indicator is set,
1498 * I just now recover from a hard crash,
1499 * and have been R_PRIMARY before that crash.
1501 * Now, if I had no connection before that crash
1502 * (have been degraded R_PRIMARY), chances are that
1503 * I won't find my peer now either.
1505 * In that case, and _only_ in that case,
1506 * we use the degr-wfc-timeout instead of the default,
1507 * so we can automatically recover from a crash of a
1508 * degraded but active "cluster" after a certain timeout.
1510 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1511 if (mdev->state.role != R_PRIMARY &&
1512 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1513 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1514 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1516 dd = drbd_determine_dev_size(mdev, 0);
1517 if (dd == dev_size_error) {
1518 retcode = ERR_NOMEM_BITMAP;
1519 goto force_diskless_dec;
1520 } else if (dd == grew)
1521 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1523 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1524 dev_info(DEV, "Assuming that all blocks are out of sync "
1525 "(aka FullSync)\n");
1526 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1527 "set_n_write from attaching", BM_LOCKED_MASK)) {
1528 retcode = ERR_IO_MD_DISK;
1529 goto force_diskless_dec;
1532 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1533 "read from attaching", BM_LOCKED_MASK)) {
1534 retcode = ERR_IO_MD_DISK;
1535 goto force_diskless_dec;
1539 if (cp_discovered) {
1540 drbd_al_apply_to_bm(mdev);
1541 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1542 "crashed primary apply AL", BM_LOCKED_MASK)) {
1543 retcode = ERR_IO_MD_DISK;
1544 goto force_diskless_dec;
1548 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1549 drbd_suspend_al(mdev); /* IO is still suspended here... */
1551 spin_lock_irq(&mdev->tconn->req_lock);
1552 os = drbd_read_state(mdev);
1554 /* If MDF_CONSISTENT is not set go into inconsistent state,
1555 otherwise investigate MDF_WasUpToDate...
1556 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1557 otherwise into D_CONSISTENT state.
1559 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1560 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1561 ns.disk = D_CONSISTENT;
1563 ns.disk = D_OUTDATED;
1565 ns.disk = D_INCONSISTENT;
1568 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1569 ns.pdsk = D_OUTDATED;
1572 if (ns.disk == D_CONSISTENT &&
1573 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1574 ns.disk = D_UP_TO_DATE;
1577 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1578 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1579 this point, because drbd_request_state() modifies these
1582 /* In case we are C_CONNECTED postpone any decision on the new disk
1583 state after the negotiation phase. */
1584 if (mdev->state.conn == C_CONNECTED) {
1585 mdev->new_state_tmp.i = ns.i;
1587 ns.disk = D_NEGOTIATING;
1589 /* We expect to receive up-to-date UUIDs soon.
1590 To avoid a race in receive_state, free p_uuid while
1591 holding req_lock. I.e. atomic with the state change */
1592 kfree(mdev->p_uuid);
1593 mdev->p_uuid = NULL;
1596 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1597 spin_unlock_irq(&mdev->tconn->req_lock);
1599 if (rv < SS_SUCCESS)
1600 goto force_diskless_dec;
1602 if (mdev->state.role == R_PRIMARY)
1603 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1605 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1607 drbd_md_mark_dirty(mdev);
1610 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1612 conn_reconfig_done(mdev->tconn);
1613 drbd_adm_finish(info, retcode);
1619 drbd_force_state(mdev, NS(disk, D_FAILED));
1622 conn_reconfig_done(mdev->tconn);
1624 if (nbc->backing_bdev)
1625 blkdev_put(nbc->backing_bdev,
1626 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1628 blkdev_put(nbc->md_bdev,
1629 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1632 kfree(new_disk_conf);
1633 lc_destroy(resync_lru);
1637 drbd_adm_finish(info, retcode);
1641 static int adm_detach(struct drbd_conf *mdev)
1643 enum drbd_state_rv retcode;
1645 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1646 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1647 /* D_FAILED will transition to DISKLESS. */
1648 ret = wait_event_interruptible(mdev->misc_wait,
1649 mdev->state.disk != D_FAILED);
1650 drbd_resume_io(mdev);
1651 if ((int)retcode == (int)SS_IS_DISKLESS)
1652 retcode = SS_NOTHING_TO_DO;
1658 /* Detaching the disk is a process in multiple stages. First we need to lock
1659 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1660 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1661 * internal references as well.
1662 * Only then we have finally detached. */
1663 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1665 enum drbd_ret_code retcode;
1667 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1668 if (!adm_ctx.reply_skb)
1670 if (retcode != NO_ERROR)
1673 retcode = adm_detach(adm_ctx.mdev);
1675 drbd_adm_finish(info, retcode);
1679 static bool conn_resync_running(struct drbd_tconn *tconn)
1681 struct drbd_conf *mdev;
1686 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1687 if (mdev->state.conn == C_SYNC_SOURCE ||
1688 mdev->state.conn == C_SYNC_TARGET ||
1689 mdev->state.conn == C_PAUSED_SYNC_S ||
1690 mdev->state.conn == C_PAUSED_SYNC_T) {
1700 static bool conn_ov_running(struct drbd_tconn *tconn)
1702 struct drbd_conf *mdev;
1707 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1708 if (mdev->state.conn == C_VERIFY_S ||
1709 mdev->state.conn == C_VERIFY_T) {
1719 static enum drbd_ret_code
1720 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1722 struct drbd_conf *mdev;
1725 if (old_conf && tconn->agreed_pro_version < 100 &&
1726 tconn->cstate == C_WF_REPORT_PARAMS &&
1727 new_conf->wire_protocol != old_conf->wire_protocol)
1728 return ERR_NEED_APV_100;
1730 if (new_conf->two_primaries &&
1731 (new_conf->wire_protocol != DRBD_PROT_C))
1732 return ERR_NOT_PROTO_C;
1734 idr_for_each_entry(&tconn->volumes, mdev, i) {
1735 if (get_ldev(mdev)) {
1736 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1738 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1739 return ERR_STONITH_AND_PROT_A;
1741 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1745 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1746 return ERR_CONG_NOT_PROTO_A;
1751 static enum drbd_ret_code
1752 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1754 static enum drbd_ret_code rv;
1755 struct drbd_conf *mdev;
1759 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1762 /* tconn->volumes protected by genl_lock() here */
1763 idr_for_each_entry(&tconn->volumes, mdev, i) {
1764 if (!mdev->bitmap) {
1765 if(drbd_bm_init(mdev))
1774 struct crypto_hash *verify_tfm;
1775 struct crypto_hash *csums_tfm;
1776 struct crypto_hash *cram_hmac_tfm;
1777 struct crypto_hash *integrity_tfm;
1783 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1788 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1797 static enum drbd_ret_code
1798 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1800 char hmac_name[CRYPTO_MAX_ALG_NAME];
1801 enum drbd_ret_code rv;
1804 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1808 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1812 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1816 if (new_conf->cram_hmac_alg[0] != 0) {
1817 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1818 new_conf->cram_hmac_alg);
1820 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1823 if (crypto->integrity_tfm) {
1824 hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
1825 crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
1826 if (!crypto->int_dig_in)
1828 crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
1829 if (!crypto->int_dig_vv)
1836 static void free_crypto(struct crypto *crypto)
1838 kfree(crypto->int_dig_in);
1839 kfree(crypto->int_dig_vv);
1840 crypto_free_hash(crypto->cram_hmac_tfm);
1841 crypto_free_hash(crypto->integrity_tfm);
1842 crypto_free_hash(crypto->csums_tfm);
1843 crypto_free_hash(crypto->verify_tfm);
1846 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1848 enum drbd_ret_code retcode;
1849 struct drbd_tconn *tconn;
1850 struct net_conf *old_conf, *new_conf = NULL;
1852 int ovr; /* online verify running */
1853 int rsr; /* re-sync running */
1854 struct crypto crypto = { };
1855 bool change_integrity_alg;
1857 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1858 if (!adm_ctx.reply_skb)
1860 if (retcode != NO_ERROR)
1863 tconn = adm_ctx.tconn;
1865 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1867 retcode = ERR_NOMEM;
1871 conn_reconfig_start(tconn);
1873 mutex_lock(&tconn->data.mutex);
1874 mutex_lock(&tconn->conf_update);
1875 old_conf = tconn->net_conf;
1878 drbd_msg_put_info("net conf missing, try connect");
1879 retcode = ERR_INVALID_REQUEST;
1883 *new_conf = *old_conf;
1884 if (should_set_defaults(info))
1885 set_net_conf_defaults(new_conf);
1887 err = net_conf_from_attrs_for_change(new_conf, info);
1889 retcode = ERR_MANDATORY_TAG;
1890 drbd_msg_put_info(from_attrs_err_to_txt(err));
1894 retcode = check_net_options(tconn, new_conf);
1895 if (retcode != NO_ERROR)
1898 /* re-sync running */
1899 rsr = conn_resync_running(tconn);
1900 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1901 retcode = ERR_CSUMS_RESYNC_RUNNING;
1905 /* online verify running */
1906 ovr = conn_ov_running(tconn);
1907 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1908 retcode = ERR_VERIFY_RUNNING;
1912 change_integrity_alg = strcmp(old_conf->integrity_alg,
1913 new_conf->integrity_alg);
1915 retcode = alloc_crypto(&crypto, new_conf);
1916 if (retcode != NO_ERROR)
1919 rcu_assign_pointer(tconn->net_conf, new_conf);
1922 crypto_free_hash(tconn->csums_tfm);
1923 tconn->csums_tfm = crypto.csums_tfm;
1924 crypto.csums_tfm = NULL;
1927 crypto_free_hash(tconn->verify_tfm);
1928 tconn->verify_tfm = crypto.verify_tfm;
1929 crypto.verify_tfm = NULL;
1932 kfree(tconn->int_dig_in);
1933 tconn->int_dig_in = crypto.int_dig_in;
1934 kfree(tconn->int_dig_vv);
1935 tconn->int_dig_vv = crypto.int_dig_vv;
1936 crypto_free_hash(tconn->integrity_tfm);
1937 tconn->integrity_tfm = crypto.integrity_tfm;
1938 if (change_integrity_alg) {
1939 /* Do this without trying to take tconn->data.mutex again. */
1940 if (__drbd_send_protocol(tconn))
1944 /* FIXME Changing cram_hmac while the connection is established is useless */
1945 crypto_free_hash(tconn->cram_hmac_tfm);
1946 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
1948 mutex_unlock(&tconn->conf_update);
1949 mutex_unlock(&tconn->data.mutex);
1953 if (tconn->cstate >= C_WF_REPORT_PARAMS)
1954 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
1959 mutex_unlock(&tconn->conf_update);
1960 mutex_unlock(&tconn->data.mutex);
1961 free_crypto(&crypto);
1964 conn_reconfig_done(tconn);
1966 drbd_adm_finish(info, retcode);
1970 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1972 struct drbd_conf *mdev;
1973 struct net_conf *old_conf, *new_conf = NULL;
1974 struct crypto crypto = { };
1975 struct drbd_tconn *oconn;
1976 struct drbd_tconn *tconn;
1977 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1978 enum drbd_ret_code retcode;
1982 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1983 if (!adm_ctx.reply_skb)
1985 if (retcode != NO_ERROR)
1988 tconn = adm_ctx.tconn;
1989 conn_reconfig_start(tconn);
1991 if (tconn->cstate > C_STANDALONE) {
1992 retcode = ERR_NET_CONFIGURED;
1996 /* allocation not in the IO path, cqueue thread context */
1997 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
1999 retcode = ERR_NOMEM;
2003 set_net_conf_defaults(new_conf);
2005 err = net_conf_from_attrs(new_conf, info);
2007 retcode = ERR_MANDATORY_TAG;
2008 drbd_msg_put_info(from_attrs_err_to_txt(err));
2012 retcode = check_net_options(tconn, new_conf);
2013 if (retcode != NO_ERROR)
2018 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
2019 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
2021 /* No need for _rcu here. All reconfiguration is
2022 * strictly serialized on genl_lock(). We are protected against
2023 * concurrent reconfiguration/addition/deletion */
2024 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
2025 struct net_conf *nc;
2030 nc = rcu_dereference(oconn->net_conf);
2032 taken_addr = (struct sockaddr *)&nc->my_addr;
2033 if (new_conf->my_addr_len == nc->my_addr_len &&
2034 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
2035 retcode = ERR_LOCAL_ADDR;
2037 taken_addr = (struct sockaddr *)&nc->peer_addr;
2038 if (new_conf->peer_addr_len == nc->peer_addr_len &&
2039 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
2040 retcode = ERR_PEER_ADDR;
2043 if (retcode != NO_ERROR)
2047 retcode = alloc_crypto(&crypto, new_conf);
2048 if (retcode != NO_ERROR)
2051 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2053 conn_flush_workqueue(tconn);
2055 mutex_lock(&tconn->conf_update);
2056 old_conf = tconn->net_conf;
2058 retcode = ERR_NET_CONFIGURED;
2059 mutex_unlock(&tconn->conf_update);
2062 rcu_assign_pointer(tconn->net_conf, new_conf);
2064 conn_free_crypto(tconn);
2065 tconn->int_dig_in = crypto.int_dig_in;
2066 tconn->int_dig_vv = crypto.int_dig_vv;
2067 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2068 tconn->integrity_tfm = crypto.integrity_tfm;
2069 tconn->csums_tfm = crypto.csums_tfm;
2070 tconn->verify_tfm = crypto.verify_tfm;
2072 mutex_unlock(&tconn->conf_update);
2075 idr_for_each_entry(&tconn->volumes, mdev, i) {
2081 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2083 conn_reconfig_done(tconn);
2084 drbd_adm_finish(info, retcode);
2088 free_crypto(&crypto);
2091 conn_reconfig_done(tconn);
2093 drbd_adm_finish(info, retcode);
2097 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2099 enum drbd_state_rv rv;
2101 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2102 force ? CS_HARD : 0);
2105 case SS_NOTHING_TO_DO:
2107 case SS_ALREADY_STANDALONE:
2109 case SS_PRIMARY_NOP:
2110 /* Our state checking code wants to see the peer outdated. */
2111 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2112 pdsk, D_OUTDATED), CS_VERBOSE);
2114 case SS_CW_FAILED_BY_PEER:
2115 /* The peer probably wants to see us outdated. */
2116 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2117 disk, D_OUTDATED), 0);
2118 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2119 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2124 /* no special handling necessary */
2127 if (rv >= SS_SUCCESS) {
2128 enum drbd_state_rv rv2;
2129 /* No one else can reconfigure the network while I am here.
2130 * The state handling only uses drbd_thread_stop_nowait(),
2131 * we want to really wait here until the receiver is no more.
2133 drbd_thread_stop(&adm_ctx.tconn->receiver);
2135 /* Race breaker. This additional state change request may be
2136 * necessary, if this was a forced disconnect during a receiver
2137 * restart. We may have "killed" the receiver thread just
2138 * after drbdd_init() returned. Typically, we should be
2139 * C_STANDALONE already, now, and this becomes a no-op.
2141 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2142 CS_VERBOSE | CS_HARD);
2143 if (rv2 < SS_SUCCESS)
2145 "unexpected rv2=%d in conn_try_disconnect()\n",
2151 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2153 struct disconnect_parms parms;
2154 struct drbd_tconn *tconn;
2155 enum drbd_state_rv rv;
2156 enum drbd_ret_code retcode;
2159 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2160 if (!adm_ctx.reply_skb)
2162 if (retcode != NO_ERROR)
2165 tconn = adm_ctx.tconn;
2166 memset(&parms, 0, sizeof(parms));
2167 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2168 err = disconnect_parms_from_attrs(&parms, info);
2170 retcode = ERR_MANDATORY_TAG;
2171 drbd_msg_put_info(from_attrs_err_to_txt(err));
2176 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2177 if (rv < SS_SUCCESS)
2178 retcode = rv; /* FIXME: Type mismatch. */
2182 drbd_adm_finish(info, retcode);
2186 void resync_after_online_grow(struct drbd_conf *mdev)
2188 int iass; /* I am sync source */
2190 dev_info(DEV, "Resync of new storage after online grow\n");
2191 if (mdev->state.role != mdev->state.peer)
2192 iass = (mdev->state.role == R_PRIMARY);
2194 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
2197 drbd_start_resync(mdev, C_SYNC_SOURCE);
2199 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2202 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2204 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2205 struct resize_parms rs;
2206 struct drbd_conf *mdev;
2207 enum drbd_ret_code retcode;
2208 enum determine_dev_size dd;
2209 enum dds_flags ddsf;
2213 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2214 if (!adm_ctx.reply_skb)
2216 if (retcode != NO_ERROR)
2219 memset(&rs, 0, sizeof(struct resize_parms));
2220 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2221 err = resize_parms_from_attrs(&rs, info);
2223 retcode = ERR_MANDATORY_TAG;
2224 drbd_msg_put_info(from_attrs_err_to_txt(err));
2229 mdev = adm_ctx.mdev;
2230 if (mdev->state.conn > C_CONNECTED) {
2231 retcode = ERR_RESIZE_RESYNC;
2235 if (mdev->state.role == R_SECONDARY &&
2236 mdev->state.peer == R_SECONDARY) {
2237 retcode = ERR_NO_PRIMARY;
2241 if (!get_ldev(mdev)) {
2242 retcode = ERR_NO_DISK;
2246 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2247 retcode = ERR_NEED_APV_93;
2252 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2254 if (u_size != (sector_t)rs.resize_size) {
2255 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2256 if (!new_disk_conf) {
2257 retcode = ERR_NOMEM;
2262 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2263 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2265 if (new_disk_conf) {
2266 mutex_lock(&mdev->tconn->conf_update);
2267 old_disk_conf = mdev->ldev->disk_conf;
2268 *new_disk_conf = *old_disk_conf;
2269 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2270 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2271 mutex_unlock(&mdev->tconn->conf_update);
2273 kfree(old_disk_conf);
2276 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2277 dd = drbd_determine_dev_size(mdev, ddsf);
2280 if (dd == dev_size_error) {
2281 retcode = ERR_NOMEM_BITMAP;
2285 if (mdev->state.conn == C_CONNECTED) {
2287 set_bit(RESIZE_PENDING, &mdev->flags);
2289 drbd_send_uuids(mdev);
2290 drbd_send_sizes(mdev, 1, ddsf);
2294 drbd_adm_finish(info, retcode);
2298 void drbd_set_res_opts_defaults(struct res_opts *r)
2300 return set_res_opts_defaults(r);
2303 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2305 enum drbd_ret_code retcode;
2306 cpumask_var_t new_cpu_mask;
2307 struct drbd_tconn *tconn;
2308 struct res_opts res_opts;
2311 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2312 if (!adm_ctx.reply_skb)
2314 if (retcode != NO_ERROR)
2316 tconn = adm_ctx.tconn;
2318 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
2319 retcode = ERR_NOMEM;
2320 drbd_msg_put_info("unable to allocate cpumask");
2324 res_opts = tconn->res_opts;
2325 if (should_set_defaults(info))
2326 set_res_opts_defaults(&res_opts);
2328 err = res_opts_from_attrs(&res_opts, info);
2330 retcode = ERR_MANDATORY_TAG;
2331 drbd_msg_put_info(from_attrs_err_to_txt(err));
2335 /* silently ignore cpu mask on UP kernel */
2336 if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
2337 err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
2338 cpumask_bits(new_cpu_mask), nr_cpu_ids);
2340 conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
2341 retcode = ERR_CPU_MASK_PARSE;
2347 tconn->res_opts = res_opts;
2349 if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
2350 cpumask_copy(tconn->cpu_mask, new_cpu_mask);
2351 drbd_calc_cpu_mask(tconn);
2352 tconn->receiver.reset_cpu_mask = 1;
2353 tconn->asender.reset_cpu_mask = 1;
2354 tconn->worker.reset_cpu_mask = 1;
2358 free_cpumask_var(new_cpu_mask);
2360 drbd_adm_finish(info, retcode);
2364 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2366 struct drbd_conf *mdev;
2367 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2369 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2370 if (!adm_ctx.reply_skb)
2372 if (retcode != NO_ERROR)
2375 mdev = adm_ctx.mdev;
2377 /* If there is still bitmap IO pending, probably because of a previous
2378 * resync just being finished, wait for it before requesting a new resync. */
2379 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2381 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2383 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2384 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2386 while (retcode == SS_NEED_CONNECTION) {
2387 spin_lock_irq(&mdev->tconn->req_lock);
2388 if (mdev->state.conn < C_CONNECTED)
2389 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2390 spin_unlock_irq(&mdev->tconn->req_lock);
2392 if (retcode != SS_NEED_CONNECTION)
2395 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2399 drbd_adm_finish(info, retcode);
2403 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2407 rv = drbd_bmio_set_n_write(mdev);
2408 drbd_suspend_al(mdev);
2412 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2413 union drbd_state mask, union drbd_state val)
2415 enum drbd_ret_code retcode;
2417 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2418 if (!adm_ctx.reply_skb)
2420 if (retcode != NO_ERROR)
2423 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2425 drbd_adm_finish(info, retcode);
2429 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2431 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2434 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2436 enum drbd_ret_code retcode;
2438 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2439 if (!adm_ctx.reply_skb)
2441 if (retcode != NO_ERROR)
2444 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2445 retcode = ERR_PAUSE_IS_SET;
2447 drbd_adm_finish(info, retcode);
2451 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2453 union drbd_dev_state s;
2454 enum drbd_ret_code retcode;
2456 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2457 if (!adm_ctx.reply_skb)
2459 if (retcode != NO_ERROR)
2462 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2463 s = adm_ctx.mdev->state;
2464 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2465 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2466 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2468 retcode = ERR_PAUSE_IS_CLEAR;
2473 drbd_adm_finish(info, retcode);
2477 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2479 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2482 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2484 struct drbd_conf *mdev;
2485 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2487 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2488 if (!adm_ctx.reply_skb)
2490 if (retcode != NO_ERROR)
2493 mdev = adm_ctx.mdev;
2494 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2495 drbd_uuid_new_current(mdev);
2496 clear_bit(NEW_CUR_UUID, &mdev->flags);
2498 drbd_suspend_io(mdev);
2499 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2500 if (retcode == SS_SUCCESS) {
2501 if (mdev->state.conn < C_CONNECTED)
2502 tl_clear(mdev->tconn);
2503 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2504 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2506 drbd_resume_io(mdev);
2509 drbd_adm_finish(info, retcode);
2513 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2515 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2518 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2521 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2523 goto nla_put_failure;
2524 if (vnr != VOLUME_UNSPECIFIED)
2525 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2526 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2527 nla_nest_end(skb, nla);
2532 nla_nest_cancel(skb, nla);
2536 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2537 const struct sib_info *sib)
2539 struct state_info *si = NULL; /* for sizeof(si->member); */
2540 struct net_conf *nc;
2544 int exclude_sensitive;
2546 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2547 * to. So we better exclude_sensitive information.
2549 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2550 * in the context of the requesting user process. Exclude sensitive
2551 * information, unless current has superuser.
2553 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2554 * relies on the current implementation of netlink_dump(), which
2555 * executes the dump callback successively from netlink_recvmsg(),
2556 * always in the context of the receiving process */
2557 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2559 got_ldev = get_ldev(mdev);
2561 /* We need to add connection name and volume number information still.
2562 * Minor number is in drbd_genlmsghdr. */
2563 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2564 goto nla_put_failure;
2566 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2567 goto nla_put_failure;
2571 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2572 goto nla_put_failure;
2574 nc = rcu_dereference(mdev->tconn->net_conf);
2576 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2579 goto nla_put_failure;
2581 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2583 goto nla_put_failure;
2584 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2585 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2586 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2587 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2590 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2591 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2592 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2593 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2594 if (C_SYNC_SOURCE <= mdev->state.conn &&
2595 C_PAUSED_SYNC_T >= mdev->state.conn) {
2596 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2597 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2602 switch(sib->sib_reason) {
2603 case SIB_SYNC_PROGRESS:
2604 case SIB_GET_STATUS_REPLY:
2606 case SIB_STATE_CHANGE:
2607 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2608 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2610 case SIB_HELPER_POST:
2612 T_helper_exit_code, sib->helper_exit_code);
2614 case SIB_HELPER_PRE:
2615 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2619 nla_nest_end(skb, nla);
2629 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2631 enum drbd_ret_code retcode;
2634 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2635 if (!adm_ctx.reply_skb)
2637 if (retcode != NO_ERROR)
2640 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2642 nlmsg_free(adm_ctx.reply_skb);
2646 drbd_adm_finish(info, retcode);
2650 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2652 struct drbd_conf *mdev;
2653 struct drbd_genlmsghdr *dh;
2654 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2655 struct drbd_tconn *tconn = NULL;
2656 struct drbd_tconn *tmp;
2657 unsigned volume = cb->args[1];
2659 /* Open coded, deferred, iteration:
2660 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2661 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2665 * where tconn is cb->args[0];
2666 * and i is cb->args[1];
2668 * cb->args[2] indicates if we shall loop over all resources,
2669 * or just dump all volumes of a single resource.
2671 * This may miss entries inserted after this dump started,
2672 * or entries deleted before they are reached.
2674 * We need to make sure the mdev won't disappear while
2675 * we are looking at it, and revalidate our iterators
2676 * on each iteration.
2679 /* synchronize with conn_create()/conn_destroy() */
2681 /* revalidate iterator position */
2682 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2684 /* first iteration */
2696 mdev = idr_get_next(&tconn->volumes, &volume);
2698 /* No more volumes to dump on this tconn.
2699 * Advance tconn iterator. */
2700 pos = list_entry_rcu(tconn->all_tconn.next,
2701 struct drbd_tconn, all_tconn);
2702 /* Did we dump any volume on this tconn yet? */
2704 /* If we reached the end of the list,
2705 * or only a single resource dump was requested,
2707 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2715 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2716 cb->nlh->nlmsg_seq, &drbd_genl_family,
2717 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2722 /* this is a tconn without a single volume */
2724 dh->ret_code = NO_ERROR;
2725 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2726 genlmsg_cancel(skb, dh);
2728 genlmsg_end(skb, dh);
2732 D_ASSERT(mdev->vnr == volume);
2733 D_ASSERT(mdev->tconn == tconn);
2735 dh->minor = mdev_to_minor(mdev);
2736 dh->ret_code = NO_ERROR;
2738 if (nla_put_status_info(skb, mdev, NULL)) {
2739 genlmsg_cancel(skb, dh);
2742 genlmsg_end(skb, dh);
2747 /* where to start the next iteration */
2748 cb->args[0] = (long)pos;
2749 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2751 /* No more tconns/volumes/minors found results in an empty skb.
2752 * Which will terminate the dump. */
2757 * Request status of all resources, or of all volumes within a single resource.
2759 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2760 * Which means we cannot use the family->attrbuf or other such members, because
2761 * dump is NOT protected by the genl_lock(). During dump, we only have access
2762 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2764 * Once things are setup properly, we call into get_one_status().
2766 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2768 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2770 const char *conn_name;
2771 struct drbd_tconn *tconn;
2773 /* Is this a followup call? */
2775 /* ... of a single resource dump,
2776 * and the resource iterator has been advanced already? */
2777 if (cb->args[2] && cb->args[2] != cb->args[0])
2778 return 0; /* DONE. */
2782 /* First call (from netlink_dump_start). We need to figure out
2783 * which resource(s) the user wants us to dump. */
2784 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2785 nlmsg_attrlen(cb->nlh, hdrlen),
2786 DRBD_NLA_CFG_CONTEXT);
2788 /* No explicit context given. Dump all. */
2791 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
2792 /* context given, but no name present? */
2795 conn_name = nla_data(nla);
2796 tconn = conn_get_by_name(conn_name);
2801 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2803 /* prime iterators, and set "filter" mode mark:
2804 * only dump this tconn. */
2805 cb->args[0] = (long)tconn;
2806 /* cb->args[1] = 0; passed in this way. */
2807 cb->args[2] = (long)tconn;
2810 return get_one_status(skb, cb);
2813 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2815 enum drbd_ret_code retcode;
2816 struct timeout_parms tp;
2819 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2820 if (!adm_ctx.reply_skb)
2822 if (retcode != NO_ERROR)
2826 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2827 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2830 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2832 nlmsg_free(adm_ctx.reply_skb);
2836 drbd_adm_finish(info, retcode);
2840 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2842 struct drbd_conf *mdev;
2843 enum drbd_ret_code retcode;
2845 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2846 if (!adm_ctx.reply_skb)
2848 if (retcode != NO_ERROR)
2851 mdev = adm_ctx.mdev;
2852 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2853 /* resume from last known position, if possible */
2854 struct start_ov_parms parms =
2855 { .ov_start_sector = mdev->ov_start_sector };
2856 int err = start_ov_parms_from_attrs(&parms, info);
2858 retcode = ERR_MANDATORY_TAG;
2859 drbd_msg_put_info(from_attrs_err_to_txt(err));
2862 /* w_make_ov_request expects position to be aligned */
2863 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2865 /* If there is still bitmap IO pending, e.g. previous resync or verify
2866 * just being finished, wait for it before requesting a new resync. */
2867 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2868 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2870 drbd_adm_finish(info, retcode);
2875 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2877 struct drbd_conf *mdev;
2878 enum drbd_ret_code retcode;
2879 int skip_initial_sync = 0;
2881 struct new_c_uuid_parms args;
2883 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2884 if (!adm_ctx.reply_skb)
2886 if (retcode != NO_ERROR)
2889 mdev = adm_ctx.mdev;
2890 memset(&args, 0, sizeof(args));
2891 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2892 err = new_c_uuid_parms_from_attrs(&args, info);
2894 retcode = ERR_MANDATORY_TAG;
2895 drbd_msg_put_info(from_attrs_err_to_txt(err));
2900 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2902 if (!get_ldev(mdev)) {
2903 retcode = ERR_NO_DISK;
2907 /* this is "skip initial sync", assume to be clean */
2908 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2909 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2910 dev_info(DEV, "Preparing to skip initial sync\n");
2911 skip_initial_sync = 1;
2912 } else if (mdev->state.conn != C_STANDALONE) {
2913 retcode = ERR_CONNECTED;
2917 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2918 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2920 if (args.clear_bm) {
2921 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2922 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2924 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2925 retcode = ERR_IO_MD_DISK;
2927 if (skip_initial_sync) {
2928 drbd_send_uuids_skip_initial_sync(mdev);
2929 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2930 drbd_print_uuids(mdev, "cleared bitmap UUID");
2931 spin_lock_irq(&mdev->tconn->req_lock);
2932 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2934 spin_unlock_irq(&mdev->tconn->req_lock);
2942 mutex_unlock(mdev->state_mutex);
2944 drbd_adm_finish(info, retcode);
2948 static enum drbd_ret_code
2949 drbd_check_conn_name(const char *name)
2951 if (!name || !name[0]) {
2952 drbd_msg_put_info("connection name missing");
2953 return ERR_MANDATORY_TAG;
2955 /* if we want to use these in sysfs/configfs/debugfs some day,
2956 * we must not allow slashes */
2957 if (strchr(name, '/')) {
2958 drbd_msg_put_info("invalid connection name");
2959 return ERR_INVALID_REQUEST;
2964 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2966 enum drbd_ret_code retcode;
2968 retcode = drbd_adm_prepare(skb, info, 0);
2969 if (!adm_ctx.reply_skb)
2971 if (retcode != NO_ERROR)
2974 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2975 if (retcode != NO_ERROR)
2978 if (adm_ctx.tconn) {
2979 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
2980 retcode = ERR_INVALID_REQUEST;
2981 drbd_msg_put_info("connection exists");
2983 /* else: still NO_ERROR */
2987 if (!conn_create(adm_ctx.conn_name))
2988 retcode = ERR_NOMEM;
2990 drbd_adm_finish(info, retcode);
2994 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2996 struct drbd_genlmsghdr *dh = info->userhdr;
2997 enum drbd_ret_code retcode;
2999 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3000 if (!adm_ctx.reply_skb)
3002 if (retcode != NO_ERROR)
3005 /* FIXME drop minor_count parameter, limit to MINORMASK */
3006 if (dh->minor >= minor_count) {
3007 drbd_msg_put_info("requested minor out of range");
3008 retcode = ERR_INVALID_REQUEST;
3011 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3012 drbd_msg_put_info("requested volume id out of range");
3013 retcode = ERR_INVALID_REQUEST;
3017 /* drbd_adm_prepare made sure already
3018 * that mdev->tconn and mdev->vnr match the request. */
3020 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3021 retcode = ERR_MINOR_EXISTS;
3022 /* else: still NO_ERROR */
3026 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3028 drbd_adm_finish(info, retcode);
3032 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3034 if (mdev->state.disk == D_DISKLESS &&
3035 /* no need to be mdev->state.conn == C_STANDALONE &&
3036 * we may want to delete a minor from a live replication group.
3038 mdev->state.role == R_SECONDARY) {
3039 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3040 idr_remove(&minors, mdev_to_minor(mdev));
3041 del_gendisk(mdev->vdisk);
3043 kref_put(&mdev->kref, &drbd_minor_destroy);
3046 return ERR_MINOR_CONFIGURED;
3049 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3051 enum drbd_ret_code retcode;
3053 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3054 if (!adm_ctx.reply_skb)
3056 if (retcode != NO_ERROR)
3059 retcode = adm_delete_minor(adm_ctx.mdev);
3061 drbd_adm_finish(info, retcode);
3065 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3067 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3068 struct drbd_conf *mdev;
3071 retcode = drbd_adm_prepare(skb, info, 0);
3072 if (!adm_ctx.reply_skb)
3074 if (retcode != NO_ERROR)
3077 if (!adm_ctx.tconn) {
3078 retcode = ERR_CONN_NOT_KNOWN;
3083 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3084 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3085 if (retcode < SS_SUCCESS) {
3086 drbd_msg_put_info("failed to demote");
3091 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3092 if (retcode < SS_SUCCESS) {
3093 drbd_msg_put_info("failed to disconnect");
3098 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3099 retcode = adm_detach(mdev);
3100 if (retcode < SS_SUCCESS) {
3101 drbd_msg_put_info("failed to detach");
3106 /* If we reach this, all volumes (of this tconn) are Secondary,
3107 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3108 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3109 drbd_thread_stop(&adm_ctx.tconn->worker);
3111 /* Now, nothing can fail anymore */
3113 /* delete volumes */
3114 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3115 retcode = adm_delete_minor(mdev);
3116 if (retcode != NO_ERROR) {
3117 /* "can not happen" */
3118 drbd_msg_put_info("failed to delete volume");
3123 /* delete connection */
3124 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3125 list_del_rcu(&adm_ctx.tconn->all_tconn);
3127 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3131 /* "can not happen" */
3132 retcode = ERR_CONN_IN_USE;
3133 drbd_msg_put_info("failed to delete connection");
3137 drbd_adm_finish(info, retcode);
3141 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
3143 enum drbd_ret_code retcode;
3145 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
3146 if (!adm_ctx.reply_skb)
3148 if (retcode != NO_ERROR)
3151 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3152 list_del_rcu(&adm_ctx.tconn->all_tconn);
3154 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3158 retcode = ERR_CONN_IN_USE;
3161 if (retcode == NO_ERROR)
3162 drbd_thread_stop(&adm_ctx.tconn->worker);
3164 drbd_adm_finish(info, retcode);
3168 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3170 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3171 struct sk_buff *msg;
3172 struct drbd_genlmsghdr *d_out;
3176 seq = atomic_inc_return(&drbd_genl_seq);
3177 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3182 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3183 if (!d_out) /* cannot happen, but anyways. */
3184 goto nla_put_failure;
3185 d_out->minor = mdev_to_minor(mdev);
3186 d_out->ret_code = NO_ERROR;
3188 if (nla_put_status_info(msg, mdev, sib))
3189 goto nla_put_failure;
3190 genlmsg_end(msg, d_out);
3191 err = drbd_genl_multicast_events(msg, 0);
3192 /* msg has been consumed or freed in netlink_broadcast() */
3193 if (err && err != -ESRCH)
3201 dev_err(DEV, "Error %d while broadcasting event. "
3202 "Event seq:%u sib_reason:%u\n",
3203 err, seq, sib->sib_reason);