4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
52 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
71 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
73 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
75 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
77 #include <linux/drbd_genl_api.h>
79 #include <linux/genl_magic_func.h>
81 /* used blkdev_get_by_path, to claim our meta data device(s) */
82 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
84 /* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
88 static struct drbd_config_context {
89 /* assigned from drbd_genlmsghdr */
91 /* assigned from request attributes, if present */
93 #define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
97 struct nlattr *my_addr;
98 struct nlattr *peer_addr;
101 struct sk_buff *reply_skb;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr *reply_dh;
104 /* resolved from attributes, if possible */
105 struct drbd_conf *mdev;
106 struct drbd_tconn *tconn;
109 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
111 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
112 if (genlmsg_reply(skb, info))
113 printk(KERN_ERR "drbd: error sending genl reply\n");
116 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
118 int drbd_msg_put_info(const char *info)
120 struct sk_buff *skb = adm_ctx.reply_skb;
124 if (!info || !info[0])
127 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
131 err = nla_put_string(skb, T_info_text, info);
133 nla_nest_cancel(skb, nla);
136 nla_nest_end(skb, nla);
140 /* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
145 #define DRBD_ADM_NEED_MINOR 1
146 #define DRBD_ADM_NEED_RESOURCE 2
147 #define DRBD_ADM_NEED_CONNECTION 4
148 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
151 struct drbd_genlmsghdr *d_in = info->userhdr;
152 const u8 cmd = info->genlhdr->cmd;
155 memset(&adm_ctx, 0, sizeof(adm_ctx));
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd != DRBD_ADM_GET_STATUS
159 && security_netlink_recv(skb, CAP_SYS_ADMIN))
162 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
163 if (!adm_ctx.reply_skb) {
168 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
169 info, &drbd_genl_family, 0, cmd);
170 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
172 if (!adm_ctx.reply_dh) {
177 adm_ctx.reply_dh->minor = d_in->minor;
178 adm_ctx.reply_dh->ret_code = NO_ERROR;
180 adm_ctx.volume = VOLUME_UNSPECIFIED;
181 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
183 /* parse and validate only */
184 err = drbd_cfg_context_from_attrs(NULL, info);
188 /* It was present, and valid,
189 * copy it over to the reply skb. */
190 err = nla_put_nohdr(adm_ctx.reply_skb,
191 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
192 info->attrs[DRBD_NLA_CFG_CONTEXT]);
196 /* and assign stuff to the global adm_ctx */
197 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
199 adm_ctx.volume = nla_get_u32(nla);
200 nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
202 adm_ctx.resource_name = nla_data(nla);
203 adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
204 adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
205 if ((adm_ctx.my_addr &&
206 nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
207 (adm_ctx.peer_addr &&
208 nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
214 adm_ctx.minor = d_in->minor;
215 adm_ctx.mdev = minor_to_mdev(d_in->minor);
216 adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
218 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
219 drbd_msg_put_info("unknown minor");
220 return ERR_MINOR_INVALID;
222 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
223 drbd_msg_put_info("unknown resource");
224 return ERR_INVALID_REQUEST;
227 if (flags & DRBD_ADM_NEED_CONNECTION) {
228 if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
229 drbd_msg_put_info("no resource name expected");
230 return ERR_INVALID_REQUEST;
233 drbd_msg_put_info("no minor number expected");
234 return ERR_INVALID_REQUEST;
236 if (adm_ctx.my_addr && adm_ctx.peer_addr)
237 adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
238 nla_len(adm_ctx.my_addr),
239 nla_data(adm_ctx.peer_addr),
240 nla_len(adm_ctx.peer_addr));
241 if (!adm_ctx.tconn) {
242 drbd_msg_put_info("unknown connection");
243 return ERR_INVALID_REQUEST;
247 /* some more paranoia, if the request was over-determined */
248 if (adm_ctx.mdev && adm_ctx.tconn &&
249 adm_ctx.mdev->tconn != adm_ctx.tconn) {
250 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
251 adm_ctx.minor, adm_ctx.resource_name,
252 adm_ctx.mdev->tconn->name);
253 drbd_msg_put_info("minor exists in different resource");
254 return ERR_INVALID_REQUEST;
257 adm_ctx.volume != VOLUME_UNSPECIFIED &&
258 adm_ctx.volume != adm_ctx.mdev->vnr) {
259 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
260 adm_ctx.minor, adm_ctx.volume,
261 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
262 drbd_msg_put_info("minor exists as different volume");
263 return ERR_INVALID_REQUEST;
269 nlmsg_free(adm_ctx.reply_skb);
270 adm_ctx.reply_skb = NULL;
274 static int drbd_adm_finish(struct genl_info *info, int retcode)
277 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
278 adm_ctx.tconn = NULL;
281 if (!adm_ctx.reply_skb)
284 adm_ctx.reply_dh->ret_code = retcode;
285 drbd_adm_send_reply(adm_ctx.reply_skb, info);
289 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
293 /* FIXME: A future version will not allow this case. */
294 if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
297 switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
300 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
301 &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
305 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
306 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
310 snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
311 &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
313 snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
316 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
318 char *envp[] = { "HOME=/",
320 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
321 (char[20]) { }, /* address family */
322 (char[60]) { }, /* address */
325 char *argv[] = {usermode_helper, cmd, mb, NULL };
326 struct drbd_tconn *tconn = mdev->tconn;
330 if (current == tconn->worker.task)
331 set_bit(CALLBACK_PENDING, &tconn->flags);
333 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
334 setup_khelper_env(tconn, envp);
336 /* The helper may take some time.
337 * write out any unsynced meta data changes now */
340 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
341 sib.sib_reason = SIB_HELPER_PRE;
342 sib.helper_name = cmd;
343 drbd_bcast_event(mdev, &sib);
344 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
346 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
347 usermode_helper, cmd, mb,
348 (ret >> 8) & 0xff, ret);
350 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
351 usermode_helper, cmd, mb,
352 (ret >> 8) & 0xff, ret);
353 sib.sib_reason = SIB_HELPER_POST;
354 sib.helper_exit_code = ret;
355 drbd_bcast_event(mdev, &sib);
357 if (current == tconn->worker.task)
358 clear_bit(CALLBACK_PENDING, &tconn->flags);
360 if (ret < 0) /* Ignore any ERRNOs we got. */
366 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
368 char *envp[] = { "HOME=/",
370 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
371 (char[20]) { }, /* address family */
372 (char[60]) { }, /* address */
374 char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
377 setup_khelper_env(tconn, envp);
380 conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
381 /* TODO: conn_bcast_event() ?? */
383 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
385 conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
386 usermode_helper, cmd, tconn->name,
387 (ret >> 8) & 0xff, ret);
389 conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
390 usermode_helper, cmd, tconn->name,
391 (ret >> 8) & 0xff, ret);
392 /* TODO: conn_bcast_event() ?? */
394 if (ret < 0) /* Ignore any ERRNOs we got. */
400 static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
402 enum drbd_fencing_p fp = FP_NOT_AVAIL;
403 struct drbd_conf *mdev;
407 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
408 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
409 fp = max_t(enum drbd_fencing_p, fp,
410 rcu_dereference(mdev->ldev->disk_conf)->fencing);
419 bool conn_try_outdate_peer(struct drbd_tconn *tconn)
421 union drbd_state mask = { };
422 union drbd_state val = { };
423 enum drbd_fencing_p fp;
427 if (tconn->cstate >= C_WF_REPORT_PARAMS) {
428 conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
432 fp = highest_fencing_policy(tconn);
435 conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
442 r = conn_khelper(tconn, "fence-peer");
444 switch ((r>>8) & 0xff) {
445 case 3: /* peer is inconsistent */
446 ex_to_string = "peer is inconsistent or worse";
448 val.pdsk = D_INCONSISTENT;
450 case 4: /* peer got outdated, or was already outdated */
451 ex_to_string = "peer was fenced";
453 val.pdsk = D_OUTDATED;
455 case 5: /* peer was down */
456 if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
457 /* we will(have) create(d) a new UUID anyways... */
458 ex_to_string = "peer is unreachable, assumed to be dead";
460 val.pdsk = D_OUTDATED;
462 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
465 case 6: /* Peer is primary, voluntarily outdate myself.
466 * This is useful when an unconnected R_SECONDARY is asked to
467 * become R_PRIMARY, but finds the other peer being active. */
468 ex_to_string = "peer is active";
469 conn_warn(tconn, "Peer is primary, outdating myself.\n");
471 val.disk = D_OUTDATED;
474 if (fp != FP_STONITH)
475 conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
476 ex_to_string = "peer was stonithed";
478 val.pdsk = D_OUTDATED;
481 /* The script is broken ... */
482 conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
483 return false; /* Eventually leave IO frozen */
486 conn_info(tconn, "fence-peer helper returned %d (%s)\n",
487 (r>>8) & 0xff, ex_to_string);
492 conn_request_state(tconn, mask, val, CS_VERBOSE);
493 here, because we might were able to re-establish the connection in the
495 spin_lock_irq(&tconn->req_lock);
496 if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
497 _conn_request_state(tconn, mask, val, CS_VERBOSE);
498 spin_unlock_irq(&tconn->req_lock);
500 return conn_highest_pdsk(tconn) <= D_OUTDATED;
503 static int _try_outdate_peer_async(void *data)
505 struct drbd_tconn *tconn = (struct drbd_tconn *)data;
507 conn_try_outdate_peer(tconn);
509 kref_put(&tconn->kref, &conn_destroy);
513 void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
515 struct task_struct *opa;
517 kref_get(&tconn->kref);
518 opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
520 conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
521 kref_put(&tconn->kref, &conn_destroy);
526 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
528 const int max_tries = 4;
529 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
533 union drbd_state mask, val;
535 if (new_role == R_PRIMARY)
536 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
538 mutex_lock(mdev->state_mutex);
540 mask.i = 0; mask.role = R_MASK;
541 val.i = 0; val.role = new_role;
543 while (try++ < max_tries) {
544 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
546 /* in case we first succeeded to outdate,
547 * but now suddenly could establish a connection */
548 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
554 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
555 (mdev->state.disk < D_UP_TO_DATE &&
556 mdev->state.disk >= D_INCONSISTENT)) {
558 val.disk = D_UP_TO_DATE;
563 if (rv == SS_NO_UP_TO_DATE_DISK &&
564 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
565 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
567 if (conn_try_outdate_peer(mdev->tconn)) {
568 val.disk = D_UP_TO_DATE;
574 if (rv == SS_NOTHING_TO_DO)
576 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
577 if (!conn_try_outdate_peer(mdev->tconn) && force) {
578 dev_warn(DEV, "Forced into split brain situation!\n");
580 val.pdsk = D_OUTDATED;
585 if (rv == SS_TWO_PRIMARIES) {
586 /* Maybe the peer is detected as dead very soon...
587 retry at most once more in this case. */
590 nc = rcu_dereference(mdev->tconn->net_conf);
591 timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
593 schedule_timeout_interruptible(timeo);
598 if (rv < SS_SUCCESS) {
599 rv = _drbd_request_state(mdev, mask, val,
600 CS_VERBOSE + CS_WAIT_COMPLETE);
611 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
613 /* Wait until nothing is on the fly :) */
614 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
616 /* FIXME also wait for all pending P_BARRIER_ACK? */
618 if (new_role == R_SECONDARY) {
619 set_disk_ro(mdev->vdisk, true);
620 if (get_ldev(mdev)) {
621 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
625 mutex_lock(&mdev->tconn->conf_update);
626 nc = mdev->tconn->net_conf;
628 nc->discard_my_data = 0; /* without copy; single bit op is atomic */
629 mutex_unlock(&mdev->tconn->conf_update);
631 set_disk_ro(mdev->vdisk, false);
632 if (get_ldev(mdev)) {
633 if (((mdev->state.conn < C_CONNECTED ||
634 mdev->state.pdsk <= D_FAILED)
635 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
636 drbd_uuid_new_current(mdev);
638 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
643 /* writeout of activity log covered areas of the bitmap
644 * to stable storage done in after state change already */
646 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
647 /* if this was forced, we should consider sync */
649 drbd_send_uuids(mdev);
650 drbd_send_current_state(mdev);
655 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
657 mutex_unlock(mdev->state_mutex);
661 static const char *from_attrs_err_to_txt(int err)
663 return err == -ENOMSG ? "required attribute missing" :
664 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
665 err == -EEXIST ? "can not change invariant setting" :
666 "invalid attribute value";
669 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
671 struct set_role_parms parms;
673 enum drbd_ret_code retcode;
675 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
676 if (!adm_ctx.reply_skb)
678 if (retcode != NO_ERROR)
681 memset(&parms, 0, sizeof(parms));
682 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
683 err = set_role_parms_from_attrs(&parms, info);
685 retcode = ERR_MANDATORY_TAG;
686 drbd_msg_put_info(from_attrs_err_to_txt(err));
691 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
692 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
694 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
696 drbd_adm_finish(info, retcode);
700 /* initializes the md.*_offset members, so we are able to find
701 * the on disk meta data */
702 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
703 struct drbd_backing_dev *bdev)
705 sector_t md_size_sect = 0;
709 meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
711 switch (meta_dev_idx) {
713 /* v07 style fixed size indexed meta data */
714 bdev->md.md_size_sect = MD_RESERVED_SECT;
715 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
716 bdev->md.al_offset = MD_AL_OFFSET;
717 bdev->md.bm_offset = MD_BM_OFFSET;
719 case DRBD_MD_INDEX_FLEX_EXT:
720 /* just occupy the full device; unit: sectors */
721 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
722 bdev->md.md_offset = 0;
723 bdev->md.al_offset = MD_AL_OFFSET;
724 bdev->md.bm_offset = MD_BM_OFFSET;
726 case DRBD_MD_INDEX_INTERNAL:
727 case DRBD_MD_INDEX_FLEX_INT:
728 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
729 /* al size is still fixed */
730 bdev->md.al_offset = -MD_AL_SECTORS;
731 /* we need (slightly less than) ~ this much bitmap sectors: */
732 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
733 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
734 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
735 md_size_sect = ALIGN(md_size_sect, 8);
737 /* plus the "drbd meta data super block",
738 * and the activity log; */
739 md_size_sect += MD_BM_OFFSET;
741 bdev->md.md_size_sect = md_size_sect;
742 /* bitmap offset is adjusted by 'super' block size */
743 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
749 /* input size is expected to be in KB */
750 char *ppsize(char *buf, unsigned long long size)
752 /* Needs 9 bytes at max including trailing NUL:
753 * -1ULL ==> "16384 EB" */
754 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
756 while (size >= 10000 && base < sizeof(units)-1) {
758 size = (size >> 10) + !!(size & (1<<9));
761 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
766 /* there is still a theoretical deadlock when called from receiver
767 * on an D_INCONSISTENT R_PRIMARY:
768 * remote READ does inc_ap_bio, receiver would need to receive answer
769 * packet from remote to dec_ap_bio again.
770 * receiver receive_sizes(), comes here,
771 * waits for ap_bio_cnt == 0. -> deadlock.
772 * but this cannot happen, actually, because:
773 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
774 * (not connected, or bad/no disk on peer):
775 * see drbd_fail_request_early, ap_bio_cnt is zero.
776 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
777 * peer may not initiate a resize.
779 /* Note these are not to be confused with
780 * drbd_adm_suspend_io/drbd_adm_resume_io,
781 * which are (sub) state changes triggered by admin (drbdsetup),
782 * and can be long lived.
783 * This changes an mdev->flag, is triggered by drbd internals,
784 * and should be short-lived. */
785 void drbd_suspend_io(struct drbd_conf *mdev)
787 set_bit(SUSPEND_IO, &mdev->flags);
788 if (drbd_suspended(mdev))
790 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
793 void drbd_resume_io(struct drbd_conf *mdev)
795 clear_bit(SUSPEND_IO, &mdev->flags);
796 wake_up(&mdev->misc_wait);
800 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
801 * @mdev: DRBD device.
803 * Returns 0 on success, negative return values indicate errors.
804 * You should call drbd_md_sync() after calling this function.
806 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
808 sector_t prev_first_sect, prev_size; /* previous meta location */
809 sector_t la_size, u_size;
813 int md_moved, la_size_changed;
814 enum determine_dev_size rv = unchanged;
817 * application request passes inc_ap_bio,
818 * but then cannot get an AL-reference.
819 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
822 * Suspend IO right here.
823 * still lock the act_log to not trigger ASSERTs there.
825 drbd_suspend_io(mdev);
827 /* no wait necessary anymore, actually we could assert that */
828 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
830 prev_first_sect = drbd_md_first_sector(mdev->ldev);
831 prev_size = mdev->ldev->md.md_size_sect;
832 la_size = mdev->ldev->md.la_size_sect;
834 /* TODO: should only be some assert here, not (re)init... */
835 drbd_md_set_sector_offsets(mdev, mdev->ldev);
838 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
840 size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
842 if (drbd_get_capacity(mdev->this_bdev) != size ||
843 drbd_bm_capacity(mdev) != size) {
845 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
847 /* currently there is only one error: ENOMEM! */
848 size = drbd_bm_capacity(mdev)>>1;
850 dev_err(DEV, "OUT OF MEMORY! "
851 "Could not allocate bitmap!\n");
853 dev_err(DEV, "BM resizing failed. "
854 "Leaving size unchanged at size = %lu KB\n",
855 (unsigned long)size);
859 /* racy, see comments above. */
860 drbd_set_my_capacity(mdev, size);
861 mdev->ldev->md.la_size_sect = size;
862 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
863 (unsigned long long)size>>1);
865 if (rv == dev_size_error)
868 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
870 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
871 || prev_size != mdev->ldev->md.md_size_sect;
873 if (la_size_changed || md_moved) {
876 drbd_al_shrink(mdev); /* All extents inactive. */
877 dev_info(DEV, "Writing the whole bitmap, %s\n",
878 la_size_changed && md_moved ? "size changed and md moved" :
879 la_size_changed ? "size changed" : "md moved");
880 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
881 err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
882 "size changed", BM_LOCKED_MASK);
887 drbd_md_mark_dirty(mdev);
895 lc_unlock(mdev->act_log);
896 wake_up(&mdev->al_wait);
897 drbd_resume_io(mdev);
903 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
904 sector_t u_size, int assume_peer_has_space)
906 sector_t p_size = mdev->p_size; /* partner's disk size. */
907 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
908 sector_t m_size; /* my size */
911 m_size = drbd_get_max_capacity(bdev);
913 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
914 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
918 if (p_size && m_size) {
919 size = min_t(sector_t, p_size, m_size);
923 if (m_size && m_size < size)
925 if (p_size && p_size < size)
936 dev_err(DEV, "Both nodes diskless!\n");
940 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
941 (unsigned long)u_size>>1, (unsigned long)size>>1);
950 * drbd_check_al_size() - Ensures that the AL is of the right size
951 * @mdev: DRBD device.
953 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
954 * failed, and 0 on success. You should call drbd_md_sync() after you called
957 static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
959 struct lru_cache *n, *t;
960 struct lc_element *e;
965 mdev->act_log->nr_elements == dc->al_extents)
970 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
971 dc->al_extents, sizeof(struct lc_element), 0);
974 dev_err(DEV, "Cannot allocate act_log lru!\n");
977 spin_lock_irq(&mdev->al_lock);
979 for (i = 0; i < t->nr_elements; i++) {
980 e = lc_element_by_index(t, i);
982 dev_err(DEV, "refcnt(%d)==%d\n",
983 e->lc_number, e->refcnt);
989 spin_unlock_irq(&mdev->al_lock);
991 dev_err(DEV, "Activity log still in use!\n");
998 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
1002 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
1004 struct request_queue * const q = mdev->rq_queue;
1005 int max_hw_sectors = max_bio_size >> 9;
1006 int max_segments = 0;
1008 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1009 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1011 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
1013 max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
1018 blk_queue_logical_block_size(q, 512);
1019 blk_queue_max_hw_sectors(q, max_hw_sectors);
1020 /* This is the workaround for "bio would need to, but cannot, be split" */
1021 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
1022 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
1024 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1025 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
1027 blk_queue_stack_limits(q, b);
1029 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
1030 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1031 q->backing_dev_info.ra_pages,
1032 b->backing_dev_info.ra_pages);
1033 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
1039 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
1041 int now, new, local, peer;
1043 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
1044 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
1045 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
1047 if (get_ldev_if_state(mdev, D_ATTACHING)) {
1048 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
1049 mdev->local_max_bio_size = local;
1053 /* We may ignore peer limits if the peer is modern enough.
1054 Because new from 8.3.8 onwards the peer can use multiple
1055 BIOs for a single peer_request */
1056 if (mdev->state.conn >= C_CONNECTED) {
1057 if (mdev->tconn->agreed_pro_version < 94)
1058 peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
1059 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1060 else if (mdev->tconn->agreed_pro_version == 94)
1061 peer = DRBD_MAX_SIZE_H80_PACKET;
1062 else if (mdev->tconn->agreed_pro_version < 100)
1063 peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
1065 peer = DRBD_MAX_BIO_SIZE;
1068 new = min_t(int, local, peer);
1070 if (mdev->state.role == R_PRIMARY && new < now)
1071 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
1074 dev_info(DEV, "max BIO size = %u\n", new);
1076 drbd_setup_queue_param(mdev, new);
1079 /* Starts the worker thread */
1080 static void conn_reconfig_start(struct drbd_tconn *tconn)
1082 drbd_thread_start(&tconn->worker);
1083 conn_flush_workqueue(tconn);
1086 /* if still unconfigured, stops worker again. */
1087 static void conn_reconfig_done(struct drbd_tconn *tconn)
1090 spin_lock_irq(&tconn->req_lock);
1091 stop_threads = conn_all_vols_unconf(tconn) &&
1092 tconn->cstate == C_STANDALONE;
1093 spin_unlock_irq(&tconn->req_lock);
1095 /* asender is implicitly stopped by receiver
1096 * in conn_disconnect() */
1097 drbd_thread_stop(&tconn->receiver);
1098 drbd_thread_stop(&tconn->worker);
1102 /* Make sure IO is suspended before calling this function(). */
1103 static void drbd_suspend_al(struct drbd_conf *mdev)
1107 if (!lc_try_lock(mdev->act_log)) {
1108 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1112 drbd_al_shrink(mdev);
1113 spin_lock_irq(&mdev->tconn->req_lock);
1114 if (mdev->state.conn < C_CONNECTED)
1115 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1116 spin_unlock_irq(&mdev->tconn->req_lock);
1117 lc_unlock(mdev->act_log);
1120 dev_info(DEV, "Suspended AL updates\n");
1124 static bool should_set_defaults(struct genl_info *info)
1126 unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
1127 return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
1130 static void enforce_disk_conf_limits(struct disk_conf *dc)
1132 if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
1133 dc->al_extents = DRBD_AL_EXTENTS_MIN;
1134 if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
1135 dc->al_extents = DRBD_AL_EXTENTS_MAX;
1137 if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
1138 dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
1141 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
1143 enum drbd_ret_code retcode;
1144 struct drbd_conf *mdev;
1145 struct disk_conf *new_disk_conf, *old_disk_conf;
1146 struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
1149 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1150 if (!adm_ctx.reply_skb)
1152 if (retcode != NO_ERROR)
1155 mdev = adm_ctx.mdev;
1157 /* we also need a disk
1158 * to change the options on */
1159 if (!get_ldev(mdev)) {
1160 retcode = ERR_NO_DISK;
1164 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
1165 if (!new_disk_conf) {
1166 retcode = ERR_NOMEM;
1170 mutex_lock(&mdev->tconn->conf_update);
1171 old_disk_conf = mdev->ldev->disk_conf;
1172 *new_disk_conf = *old_disk_conf;
1173 if (should_set_defaults(info))
1174 set_disk_conf_defaults(new_disk_conf);
1176 err = disk_conf_from_attrs_for_change(new_disk_conf, info);
1177 if (err && err != -ENOMSG) {
1178 retcode = ERR_MANDATORY_TAG;
1179 drbd_msg_put_info(from_attrs_err_to_txt(err));
1182 if (!expect(new_disk_conf->resync_rate >= 1))
1183 new_disk_conf->resync_rate = 1;
1185 enforce_disk_conf_limits(new_disk_conf);
1187 fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1188 if (fifo_size != mdev->rs_plan_s->size) {
1189 new_plan = fifo_alloc(fifo_size);
1191 dev_err(DEV, "kmalloc of fifo_buffer failed");
1192 retcode = ERR_NOMEM;
1197 drbd_suspend_io(mdev);
1198 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
1199 drbd_al_shrink(mdev);
1200 err = drbd_check_al_size(mdev, new_disk_conf);
1201 lc_unlock(mdev->act_log);
1202 wake_up(&mdev->al_wait);
1203 drbd_resume_io(mdev);
1206 retcode = ERR_NOMEM;
1210 write_lock_irq(&global_state_lock);
1211 retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
1212 if (retcode == NO_ERROR) {
1213 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
1214 drbd_resync_after_changed(mdev);
1216 write_unlock_irq(&global_state_lock);
1218 if (retcode != NO_ERROR)
1222 old_plan = mdev->rs_plan_s;
1223 rcu_assign_pointer(mdev->rs_plan_s, new_plan);
1226 mutex_unlock(&mdev->tconn->conf_update);
1228 if (new_disk_conf->al_updates)
1229 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1231 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1233 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1237 if (mdev->state.conn >= C_CONNECTED)
1238 drbd_send_sync_param(mdev);
1241 kfree(old_disk_conf);
1243 mod_timer(&mdev->request_timer, jiffies + HZ);
1247 mutex_unlock(&mdev->tconn->conf_update);
1249 kfree(new_disk_conf);
1254 drbd_adm_finish(info, retcode);
1258 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1260 struct drbd_conf *mdev;
1262 enum drbd_ret_code retcode;
1263 enum determine_dev_size dd;
1264 sector_t max_possible_sectors;
1265 sector_t min_md_device_sectors;
1266 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1267 struct disk_conf *new_disk_conf = NULL;
1268 struct block_device *bdev;
1269 struct lru_cache *resync_lru = NULL;
1270 struct fifo_buffer *new_plan = NULL;
1271 union drbd_state ns, os;
1272 enum drbd_state_rv rv;
1273 struct net_conf *nc;
1275 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1276 if (!adm_ctx.reply_skb)
1278 if (retcode != NO_ERROR)
1281 mdev = adm_ctx.mdev;
1282 conn_reconfig_start(mdev->tconn);
1284 /* if you want to reconfigure, please tear down first */
1285 if (mdev->state.disk > D_DISKLESS) {
1286 retcode = ERR_DISK_CONFIGURED;
1289 /* It may just now have detached because of IO error. Make sure
1290 * drbd_ldev_destroy is done already, we may end up here very fast,
1291 * e.g. if someone calls attach from the on-io-error handler,
1292 * to realize a "hot spare" feature (not that I'd recommend that) */
1293 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1295 /* make sure there is no leftover from previous force-detach attempts */
1296 clear_bit(FORCE_DETACH, &mdev->flags);
1298 /* and no leftover from previously aborted resync or verify, either */
1300 mdev->rs_failed = 0;
1301 atomic_set(&mdev->rs_pending_cnt, 0);
1303 /* allocation not in the IO path, drbdsetup context */
1304 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1306 retcode = ERR_NOMEM;
1309 spin_lock_init(&nbc->md.uuid_lock);
1311 new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
1312 if (!new_disk_conf) {
1313 retcode = ERR_NOMEM;
1316 nbc->disk_conf = new_disk_conf;
1318 set_disk_conf_defaults(new_disk_conf);
1319 err = disk_conf_from_attrs(new_disk_conf, info);
1321 retcode = ERR_MANDATORY_TAG;
1322 drbd_msg_put_info(from_attrs_err_to_txt(err));
1326 enforce_disk_conf_limits(new_disk_conf);
1328 new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
1330 retcode = ERR_NOMEM;
1334 if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1335 retcode = ERR_MD_IDX_INVALID;
1340 nc = rcu_dereference(mdev->tconn->net_conf);
1342 if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
1344 retcode = ERR_STONITH_AND_PROT_A;
1350 bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
1351 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1353 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
1355 retcode = ERR_OPEN_DISK;
1358 nbc->backing_bdev = bdev;
1361 * meta_dev_idx >= 0: external fixed size, possibly multiple
1362 * drbd sharing one meta device. TODO in that case, paranoia
1363 * check that [md_bdev, meta_dev_idx] is not yet used by some
1364 * other drbd minor! (if you use drbd.conf + drbdadm, that
1365 * should check it for you already; but if you don't, or
1366 * someone fooled it, we need to double check here)
1368 bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
1369 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1370 (new_disk_conf->meta_dev_idx < 0) ?
1371 (void *)mdev : (void *)drbd_m_holder);
1373 dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
1375 retcode = ERR_OPEN_MD_DISK;
1378 nbc->md_bdev = bdev;
1380 if ((nbc->backing_bdev == nbc->md_bdev) !=
1381 (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1382 new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1383 retcode = ERR_MD_IDX_INVALID;
1387 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1388 1, 61, sizeof(struct bm_extent),
1389 offsetof(struct bm_extent, lce));
1391 retcode = ERR_NOMEM;
1395 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1396 drbd_md_set_sector_offsets(mdev, nbc);
1398 if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
1399 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1400 (unsigned long long) drbd_get_max_capacity(nbc),
1401 (unsigned long long) new_disk_conf->disk_size);
1402 retcode = ERR_DISK_TOO_SMALL;
1406 if (new_disk_conf->meta_dev_idx < 0) {
1407 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1408 /* at least one MB, otherwise it does not make sense */
1409 min_md_device_sectors = (2<<10);
1411 max_possible_sectors = DRBD_MAX_SECTORS;
1412 min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
1415 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1416 retcode = ERR_MD_DISK_TOO_SMALL;
1417 dev_warn(DEV, "refusing attach: md-device too small, "
1418 "at least %llu sectors needed for this meta-disk type\n",
1419 (unsigned long long) min_md_device_sectors);
1423 /* Make sure the new disk is big enough
1424 * (we may currently be R_PRIMARY with no local disk...) */
1425 if (drbd_get_max_capacity(nbc) <
1426 drbd_get_capacity(mdev->this_bdev)) {
1427 retcode = ERR_DISK_TOO_SMALL;
1431 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1433 if (nbc->known_size > max_possible_sectors) {
1434 dev_warn(DEV, "==> truncating very big lower level device "
1435 "to currently maximum possible %llu sectors <==\n",
1436 (unsigned long long) max_possible_sectors);
1437 if (new_disk_conf->meta_dev_idx >= 0)
1438 dev_warn(DEV, "==>> using internal or flexible "
1439 "meta data may help <<==\n");
1442 drbd_suspend_io(mdev);
1443 /* also wait for the last barrier ack. */
1444 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1445 * We need a way to either ignore barrier acks for barriers sent before a device
1446 * was attached, or a way to wait for all pending barrier acks to come in.
1447 * As barriers are counted per resource,
1448 * we'd need to suspend io on all devices of a resource.
1450 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
1451 /* and for any other previously queued work */
1452 drbd_flush_workqueue(mdev);
1454 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1455 retcode = rv; /* FIXME: Type mismatch. */
1456 drbd_resume_io(mdev);
1457 if (rv < SS_SUCCESS)
1460 if (!get_ldev_if_state(mdev, D_ATTACHING))
1461 goto force_diskless;
1463 drbd_md_set_sector_offsets(mdev, nbc);
1465 if (!mdev->bitmap) {
1466 if (drbd_bm_init(mdev)) {
1467 retcode = ERR_NOMEM;
1468 goto force_diskless_dec;
1472 retcode = drbd_md_read(mdev, nbc);
1473 if (retcode != NO_ERROR)
1474 goto force_diskless_dec;
1476 if (mdev->state.conn < C_CONNECTED &&
1477 mdev->state.role == R_PRIMARY &&
1478 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1479 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1480 (unsigned long long)mdev->ed_uuid);
1481 retcode = ERR_DATA_NOT_CURRENT;
1482 goto force_diskless_dec;
1485 /* Since we are diskless, fix the activity log first... */
1486 if (drbd_check_al_size(mdev, new_disk_conf)) {
1487 retcode = ERR_NOMEM;
1488 goto force_diskless_dec;
1491 /* Prevent shrinking of consistent devices ! */
1492 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1493 drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
1494 dev_warn(DEV, "refusing to truncate a consistent device\n");
1495 retcode = ERR_DISK_TOO_SMALL;
1496 goto force_diskless_dec;
1499 /* Reset the "barriers don't work" bits here, then force meta data to
1500 * be written, to ensure we determine if barriers are supported. */
1501 if (new_disk_conf->md_flushes)
1502 clear_bit(MD_NO_FUA, &mdev->flags);
1504 set_bit(MD_NO_FUA, &mdev->flags);
1506 /* Point of no return reached.
1507 * Devices and memory are no longer released by error cleanup below.
1508 * now mdev takes over responsibility, and the state engine should
1509 * clean it up somewhere. */
1510 D_ASSERT(mdev->ldev == NULL);
1512 mdev->resync = resync_lru;
1513 mdev->rs_plan_s = new_plan;
1516 new_disk_conf = NULL;
1519 drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
1521 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1522 set_bit(CRASHED_PRIMARY, &mdev->flags);
1524 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1526 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1527 !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
1528 set_bit(CRASHED_PRIMARY, &mdev->flags);
1535 drbd_reconsider_max_bio_size(mdev);
1537 /* If I am currently not R_PRIMARY,
1538 * but meta data primary indicator is set,
1539 * I just now recover from a hard crash,
1540 * and have been R_PRIMARY before that crash.
1542 * Now, if I had no connection before that crash
1543 * (have been degraded R_PRIMARY), chances are that
1544 * I won't find my peer now either.
1546 * In that case, and _only_ in that case,
1547 * we use the degr-wfc-timeout instead of the default,
1548 * so we can automatically recover from a crash of a
1549 * degraded but active "cluster" after a certain timeout.
1551 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1552 if (mdev->state.role != R_PRIMARY &&
1553 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1554 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1555 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1557 dd = drbd_determine_dev_size(mdev, 0);
1558 if (dd == dev_size_error) {
1559 retcode = ERR_NOMEM_BITMAP;
1560 goto force_diskless_dec;
1561 } else if (dd == grew)
1562 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1564 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
1565 (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
1566 drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
1567 dev_info(DEV, "Assuming that all blocks are out of sync "
1568 "(aka FullSync)\n");
1569 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1570 "set_n_write from attaching", BM_LOCKED_MASK)) {
1571 retcode = ERR_IO_MD_DISK;
1572 goto force_diskless_dec;
1575 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1576 "read from attaching", BM_LOCKED_MASK)) {
1577 retcode = ERR_IO_MD_DISK;
1578 goto force_diskless_dec;
1582 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1583 drbd_suspend_al(mdev); /* IO is still suspended here... */
1585 spin_lock_irq(&mdev->tconn->req_lock);
1586 os = drbd_read_state(mdev);
1588 /* If MDF_CONSISTENT is not set go into inconsistent state,
1589 otherwise investigate MDF_WasUpToDate...
1590 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1591 otherwise into D_CONSISTENT state.
1593 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1594 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1595 ns.disk = D_CONSISTENT;
1597 ns.disk = D_OUTDATED;
1599 ns.disk = D_INCONSISTENT;
1602 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1603 ns.pdsk = D_OUTDATED;
1606 if (ns.disk == D_CONSISTENT &&
1607 (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
1608 ns.disk = D_UP_TO_DATE;
1610 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1611 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1612 this point, because drbd_request_state() modifies these
1615 if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
1616 mdev->ldev->md.flags &= MDF_AL_DISABLED;
1618 mdev->ldev->md.flags |= MDF_AL_DISABLED;
1622 /* In case we are C_CONNECTED postpone any decision on the new disk
1623 state after the negotiation phase. */
1624 if (mdev->state.conn == C_CONNECTED) {
1625 mdev->new_state_tmp.i = ns.i;
1627 ns.disk = D_NEGOTIATING;
1629 /* We expect to receive up-to-date UUIDs soon.
1630 To avoid a race in receive_state, free p_uuid while
1631 holding req_lock. I.e. atomic with the state change */
1632 kfree(mdev->p_uuid);
1633 mdev->p_uuid = NULL;
1636 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1637 spin_unlock_irq(&mdev->tconn->req_lock);
1639 if (rv < SS_SUCCESS)
1640 goto force_diskless_dec;
1642 mod_timer(&mdev->request_timer, jiffies + HZ);
1644 if (mdev->state.role == R_PRIMARY)
1645 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1647 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1649 drbd_md_mark_dirty(mdev);
1652 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1654 conn_reconfig_done(mdev->tconn);
1655 drbd_adm_finish(info, retcode);
1661 drbd_force_state(mdev, NS(disk, D_DISKLESS));
1664 conn_reconfig_done(mdev->tconn);
1666 if (nbc->backing_bdev)
1667 blkdev_put(nbc->backing_bdev,
1668 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1670 blkdev_put(nbc->md_bdev,
1671 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1674 kfree(new_disk_conf);
1675 lc_destroy(resync_lru);
1679 drbd_adm_finish(info, retcode);
1683 static int adm_detach(struct drbd_conf *mdev, int force)
1685 enum drbd_state_rv retcode;
1689 set_bit(FORCE_DETACH, &mdev->flags);
1690 drbd_force_state(mdev, NS(disk, D_FAILED));
1691 retcode = SS_SUCCESS;
1695 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1696 drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
1697 retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
1698 drbd_md_put_buffer(mdev);
1699 /* D_FAILED will transition to DISKLESS. */
1700 ret = wait_event_interruptible(mdev->misc_wait,
1701 mdev->state.disk != D_FAILED);
1702 drbd_resume_io(mdev);
1703 if ((int)retcode == (int)SS_IS_DISKLESS)
1704 retcode = SS_NOTHING_TO_DO;
1711 /* Detaching the disk is a process in multiple stages. First we need to lock
1712 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1713 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1714 * internal references as well.
1715 * Only then we have finally detached. */
1716 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1718 enum drbd_ret_code retcode;
1719 struct detach_parms parms = { };
1722 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1723 if (!adm_ctx.reply_skb)
1725 if (retcode != NO_ERROR)
1728 if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
1729 err = detach_parms_from_attrs(&parms, info);
1731 retcode = ERR_MANDATORY_TAG;
1732 drbd_msg_put_info(from_attrs_err_to_txt(err));
1737 retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
1739 drbd_adm_finish(info, retcode);
1743 static bool conn_resync_running(struct drbd_tconn *tconn)
1745 struct drbd_conf *mdev;
1750 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1751 if (mdev->state.conn == C_SYNC_SOURCE ||
1752 mdev->state.conn == C_SYNC_TARGET ||
1753 mdev->state.conn == C_PAUSED_SYNC_S ||
1754 mdev->state.conn == C_PAUSED_SYNC_T) {
1764 static bool conn_ov_running(struct drbd_tconn *tconn)
1766 struct drbd_conf *mdev;
1771 idr_for_each_entry(&tconn->volumes, mdev, vnr) {
1772 if (mdev->state.conn == C_VERIFY_S ||
1773 mdev->state.conn == C_VERIFY_T) {
1783 static enum drbd_ret_code
1784 _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
1786 struct drbd_conf *mdev;
1789 if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
1790 if (new_conf->wire_protocol != old_conf->wire_protocol)
1791 return ERR_NEED_APV_100;
1793 if (new_conf->two_primaries != old_conf->two_primaries)
1794 return ERR_NEED_APV_100;
1796 if (!new_conf->integrity_alg != !old_conf->integrity_alg)
1797 return ERR_NEED_APV_100;
1799 if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
1800 return ERR_NEED_APV_100;
1803 if (!new_conf->two_primaries &&
1804 conn_highest_role(tconn) == R_PRIMARY &&
1805 conn_highest_peer(tconn) == R_PRIMARY)
1806 return ERR_NEED_ALLOW_TWO_PRI;
1808 if (new_conf->two_primaries &&
1809 (new_conf->wire_protocol != DRBD_PROT_C))
1810 return ERR_NOT_PROTO_C;
1812 idr_for_each_entry(&tconn->volumes, mdev, i) {
1813 if (get_ldev(mdev)) {
1814 enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
1816 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
1817 return ERR_STONITH_AND_PROT_A;
1819 if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
1820 return ERR_DISCARD_IMPOSSIBLE;
1823 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
1824 return ERR_CONG_NOT_PROTO_A;
1829 static enum drbd_ret_code
1830 check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
1832 static enum drbd_ret_code rv;
1833 struct drbd_conf *mdev;
1837 rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
1840 /* tconn->volumes protected by genl_lock() here */
1841 idr_for_each_entry(&tconn->volumes, mdev, i) {
1842 if (!mdev->bitmap) {
1843 if(drbd_bm_init(mdev))
1852 struct crypto_hash *verify_tfm;
1853 struct crypto_hash *csums_tfm;
1854 struct crypto_hash *cram_hmac_tfm;
1855 struct crypto_hash *integrity_tfm;
1859 alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
1864 *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
1873 static enum drbd_ret_code
1874 alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
1876 char hmac_name[CRYPTO_MAX_ALG_NAME];
1877 enum drbd_ret_code rv;
1879 rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
1883 rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
1887 rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
1891 if (new_conf->cram_hmac_alg[0] != 0) {
1892 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1893 new_conf->cram_hmac_alg);
1895 rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
1902 static void free_crypto(struct crypto *crypto)
1904 crypto_free_hash(crypto->cram_hmac_tfm);
1905 crypto_free_hash(crypto->integrity_tfm);
1906 crypto_free_hash(crypto->csums_tfm);
1907 crypto_free_hash(crypto->verify_tfm);
1910 int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
1912 enum drbd_ret_code retcode;
1913 struct drbd_tconn *tconn;
1914 struct net_conf *old_conf, *new_conf = NULL;
1916 int ovr; /* online verify running */
1917 int rsr; /* re-sync running */
1918 struct crypto crypto = { };
1920 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
1921 if (!adm_ctx.reply_skb)
1923 if (retcode != NO_ERROR)
1926 tconn = adm_ctx.tconn;
1928 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1930 retcode = ERR_NOMEM;
1934 conn_reconfig_start(tconn);
1936 mutex_lock(&tconn->data.mutex);
1937 mutex_lock(&tconn->conf_update);
1938 old_conf = tconn->net_conf;
1941 drbd_msg_put_info("net conf missing, try connect");
1942 retcode = ERR_INVALID_REQUEST;
1946 *new_conf = *old_conf;
1947 if (should_set_defaults(info))
1948 set_net_conf_defaults(new_conf);
1950 err = net_conf_from_attrs_for_change(new_conf, info);
1951 if (err && err != -ENOMSG) {
1952 retcode = ERR_MANDATORY_TAG;
1953 drbd_msg_put_info(from_attrs_err_to_txt(err));
1957 retcode = check_net_options(tconn, new_conf);
1958 if (retcode != NO_ERROR)
1961 /* re-sync running */
1962 rsr = conn_resync_running(tconn);
1963 if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
1964 retcode = ERR_CSUMS_RESYNC_RUNNING;
1968 /* online verify running */
1969 ovr = conn_ov_running(tconn);
1970 if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
1971 retcode = ERR_VERIFY_RUNNING;
1975 retcode = alloc_crypto(&crypto, new_conf);
1976 if (retcode != NO_ERROR)
1979 rcu_assign_pointer(tconn->net_conf, new_conf);
1982 crypto_free_hash(tconn->csums_tfm);
1983 tconn->csums_tfm = crypto.csums_tfm;
1984 crypto.csums_tfm = NULL;
1987 crypto_free_hash(tconn->verify_tfm);
1988 tconn->verify_tfm = crypto.verify_tfm;
1989 crypto.verify_tfm = NULL;
1992 crypto_free_hash(tconn->integrity_tfm);
1993 tconn->integrity_tfm = crypto.integrity_tfm;
1994 if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
1995 /* Do this without trying to take tconn->data.mutex again. */
1996 __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
1998 crypto_free_hash(tconn->cram_hmac_tfm);
1999 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2001 mutex_unlock(&tconn->conf_update);
2002 mutex_unlock(&tconn->data.mutex);
2006 if (tconn->cstate >= C_WF_REPORT_PARAMS)
2007 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
2012 mutex_unlock(&tconn->conf_update);
2013 mutex_unlock(&tconn->data.mutex);
2014 free_crypto(&crypto);
2017 conn_reconfig_done(tconn);
2019 drbd_adm_finish(info, retcode);
2023 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
2025 struct drbd_conf *mdev;
2026 struct net_conf *old_conf, *new_conf = NULL;
2027 struct crypto crypto = { };
2028 struct drbd_tconn *tconn;
2029 enum drbd_ret_code retcode;
2033 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2035 if (!adm_ctx.reply_skb)
2037 if (retcode != NO_ERROR)
2039 if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
2040 drbd_msg_put_info("connection endpoint(s) missing");
2041 retcode = ERR_INVALID_REQUEST;
2045 /* No need for _rcu here. All reconfiguration is
2046 * strictly serialized on genl_lock(). We are protected against
2047 * concurrent reconfiguration/addition/deletion */
2048 list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
2049 if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
2050 !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
2051 retcode = ERR_LOCAL_ADDR;
2055 if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
2056 !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
2057 retcode = ERR_PEER_ADDR;
2062 tconn = adm_ctx.tconn;
2063 conn_reconfig_start(tconn);
2065 if (tconn->cstate > C_STANDALONE) {
2066 retcode = ERR_NET_CONFIGURED;
2070 /* allocation not in the IO path, drbdsetup / netlink process context */
2071 new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
2073 retcode = ERR_NOMEM;
2077 set_net_conf_defaults(new_conf);
2079 err = net_conf_from_attrs(new_conf, info);
2080 if (err && err != -ENOMSG) {
2081 retcode = ERR_MANDATORY_TAG;
2082 drbd_msg_put_info(from_attrs_err_to_txt(err));
2086 retcode = check_net_options(tconn, new_conf);
2087 if (retcode != NO_ERROR)
2090 retcode = alloc_crypto(&crypto, new_conf);
2091 if (retcode != NO_ERROR)
2094 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
2096 conn_flush_workqueue(tconn);
2098 mutex_lock(&tconn->conf_update);
2099 old_conf = tconn->net_conf;
2101 retcode = ERR_NET_CONFIGURED;
2102 mutex_unlock(&tconn->conf_update);
2105 rcu_assign_pointer(tconn->net_conf, new_conf);
2107 conn_free_crypto(tconn);
2108 tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
2109 tconn->integrity_tfm = crypto.integrity_tfm;
2110 tconn->csums_tfm = crypto.csums_tfm;
2111 tconn->verify_tfm = crypto.verify_tfm;
2113 tconn->my_addr_len = nla_len(adm_ctx.my_addr);
2114 memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
2115 tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
2116 memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
2118 mutex_unlock(&tconn->conf_update);
2121 idr_for_each_entry(&tconn->volumes, mdev, i) {
2127 retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
2129 conn_reconfig_done(tconn);
2130 drbd_adm_finish(info, retcode);
2134 free_crypto(&crypto);
2137 conn_reconfig_done(tconn);
2139 drbd_adm_finish(info, retcode);
2143 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
2145 enum drbd_state_rv rv;
2147 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2148 force ? CS_HARD : 0);
2151 case SS_NOTHING_TO_DO:
2153 case SS_ALREADY_STANDALONE:
2155 case SS_PRIMARY_NOP:
2156 /* Our state checking code wants to see the peer outdated. */
2157 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2158 pdsk, D_OUTDATED), CS_VERBOSE);
2160 case SS_CW_FAILED_BY_PEER:
2161 /* The peer probably wants to see us outdated. */
2162 rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
2163 disk, D_OUTDATED), 0);
2164 if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
2165 rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
2170 /* no special handling necessary */
2173 if (rv >= SS_SUCCESS) {
2174 enum drbd_state_rv rv2;
2175 /* No one else can reconfigure the network while I am here.
2176 * The state handling only uses drbd_thread_stop_nowait(),
2177 * we want to really wait here until the receiver is no more.
2179 drbd_thread_stop(&adm_ctx.tconn->receiver);
2181 /* Race breaker. This additional state change request may be
2182 * necessary, if this was a forced disconnect during a receiver
2183 * restart. We may have "killed" the receiver thread just
2184 * after drbdd_init() returned. Typically, we should be
2185 * C_STANDALONE already, now, and this becomes a no-op.
2187 rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
2188 CS_VERBOSE | CS_HARD);
2189 if (rv2 < SS_SUCCESS)
2191 "unexpected rv2=%d in conn_try_disconnect()\n",
2197 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
2199 struct disconnect_parms parms;
2200 struct drbd_tconn *tconn;
2201 enum drbd_state_rv rv;
2202 enum drbd_ret_code retcode;
2205 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
2206 if (!adm_ctx.reply_skb)
2208 if (retcode != NO_ERROR)
2211 tconn = adm_ctx.tconn;
2212 memset(&parms, 0, sizeof(parms));
2213 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
2214 err = disconnect_parms_from_attrs(&parms, info);
2216 retcode = ERR_MANDATORY_TAG;
2217 drbd_msg_put_info(from_attrs_err_to_txt(err));
2222 rv = conn_try_disconnect(tconn, parms.force_disconnect);
2223 if (rv < SS_SUCCESS)
2224 retcode = rv; /* FIXME: Type mismatch. */
2228 drbd_adm_finish(info, retcode);
2232 void resync_after_online_grow(struct drbd_conf *mdev)
2234 int iass; /* I am sync source */
2236 dev_info(DEV, "Resync of new storage after online grow\n");
2237 if (mdev->state.role != mdev->state.peer)
2238 iass = (mdev->state.role == R_PRIMARY);
2240 iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
2243 drbd_start_resync(mdev, C_SYNC_SOURCE);
2245 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
2248 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
2250 struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
2251 struct resize_parms rs;
2252 struct drbd_conf *mdev;
2253 enum drbd_ret_code retcode;
2254 enum determine_dev_size dd;
2255 enum dds_flags ddsf;
2259 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2260 if (!adm_ctx.reply_skb)
2262 if (retcode != NO_ERROR)
2265 memset(&rs, 0, sizeof(struct resize_parms));
2266 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
2267 err = resize_parms_from_attrs(&rs, info);
2269 retcode = ERR_MANDATORY_TAG;
2270 drbd_msg_put_info(from_attrs_err_to_txt(err));
2275 mdev = adm_ctx.mdev;
2276 if (mdev->state.conn > C_CONNECTED) {
2277 retcode = ERR_RESIZE_RESYNC;
2281 if (mdev->state.role == R_SECONDARY &&
2282 mdev->state.peer == R_SECONDARY) {
2283 retcode = ERR_NO_PRIMARY;
2287 if (!get_ldev(mdev)) {
2288 retcode = ERR_NO_DISK;
2292 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
2293 retcode = ERR_NEED_APV_93;
2298 u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
2300 if (u_size != (sector_t)rs.resize_size) {
2301 new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
2302 if (!new_disk_conf) {
2303 retcode = ERR_NOMEM;
2308 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
2309 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
2311 if (new_disk_conf) {
2312 mutex_lock(&mdev->tconn->conf_update);
2313 old_disk_conf = mdev->ldev->disk_conf;
2314 *new_disk_conf = *old_disk_conf;
2315 new_disk_conf->disk_size = (sector_t)rs.resize_size;
2316 rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
2317 mutex_unlock(&mdev->tconn->conf_update);
2319 kfree(old_disk_conf);
2322 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
2323 dd = drbd_determine_dev_size(mdev, ddsf);
2326 if (dd == dev_size_error) {
2327 retcode = ERR_NOMEM_BITMAP;
2331 if (mdev->state.conn == C_CONNECTED) {
2333 set_bit(RESIZE_PENDING, &mdev->flags);
2335 drbd_send_uuids(mdev);
2336 drbd_send_sizes(mdev, 1, ddsf);
2340 drbd_adm_finish(info, retcode);
2348 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
2350 enum drbd_ret_code retcode;
2351 struct drbd_tconn *tconn;
2352 struct res_opts res_opts;
2355 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
2356 if (!adm_ctx.reply_skb)
2358 if (retcode != NO_ERROR)
2360 tconn = adm_ctx.tconn;
2362 res_opts = tconn->res_opts;
2363 if (should_set_defaults(info))
2364 set_res_opts_defaults(&res_opts);
2366 err = res_opts_from_attrs(&res_opts, info);
2367 if (err && err != -ENOMSG) {
2368 retcode = ERR_MANDATORY_TAG;
2369 drbd_msg_put_info(from_attrs_err_to_txt(err));
2373 err = set_resource_options(tconn, &res_opts);
2375 retcode = ERR_INVALID_REQUEST;
2377 retcode = ERR_NOMEM;
2381 drbd_adm_finish(info, retcode);
2385 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2387 struct drbd_conf *mdev;
2388 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2390 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2391 if (!adm_ctx.reply_skb)
2393 if (retcode != NO_ERROR)
2396 mdev = adm_ctx.mdev;
2398 /* If there is still bitmap IO pending, probably because of a previous
2399 * resync just being finished, wait for it before requesting a new resync.
2400 * Also wait for it's after_state_ch(). */
2401 drbd_suspend_io(mdev);
2402 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2403 drbd_flush_workqueue(mdev);
2405 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2407 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2408 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2410 while (retcode == SS_NEED_CONNECTION) {
2411 spin_lock_irq(&mdev->tconn->req_lock);
2412 if (mdev->state.conn < C_CONNECTED)
2413 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2414 spin_unlock_irq(&mdev->tconn->req_lock);
2416 if (retcode != SS_NEED_CONNECTION)
2419 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2421 drbd_resume_io(mdev);
2424 drbd_adm_finish(info, retcode);
2428 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2429 union drbd_state mask, union drbd_state val)
2431 enum drbd_ret_code retcode;
2433 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2434 if (!adm_ctx.reply_skb)
2436 if (retcode != NO_ERROR)
2439 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2441 drbd_adm_finish(info, retcode);
2445 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2449 rv = drbd_bmio_set_n_write(mdev);
2450 drbd_suspend_al(mdev);
2454 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2456 int retcode; /* drbd_ret_code, drbd_state_rv */
2457 struct drbd_conf *mdev;
2459 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2460 if (!adm_ctx.reply_skb)
2462 if (retcode != NO_ERROR)
2465 mdev = adm_ctx.mdev;
2467 /* If there is still bitmap IO pending, probably because of a previous
2468 * resync just being finished, wait for it before requesting a new resync.
2469 * Also wait for it's after_state_ch(). */
2470 drbd_suspend_io(mdev);
2471 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2472 drbd_flush_workqueue(mdev);
2474 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
2475 if (retcode < SS_SUCCESS) {
2476 if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
2477 /* The peer will get a resync upon connect anyways.
2478 * Just make that into a full resync. */
2479 retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
2480 if (retcode >= SS_SUCCESS) {
2481 if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
2482 "set_n_write from invalidate_peer",
2483 BM_LOCKED_SET_ALLOWED))
2484 retcode = ERR_IO_MD_DISK;
2487 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
2489 drbd_resume_io(mdev);
2492 drbd_adm_finish(info, retcode);
2496 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2498 enum drbd_ret_code retcode;
2500 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2501 if (!adm_ctx.reply_skb)
2503 if (retcode != NO_ERROR)
2506 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2507 retcode = ERR_PAUSE_IS_SET;
2509 drbd_adm_finish(info, retcode);
2513 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2515 union drbd_dev_state s;
2516 enum drbd_ret_code retcode;
2518 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2519 if (!adm_ctx.reply_skb)
2521 if (retcode != NO_ERROR)
2524 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2525 s = adm_ctx.mdev->state;
2526 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2527 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2528 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2530 retcode = ERR_PAUSE_IS_CLEAR;
2535 drbd_adm_finish(info, retcode);
2539 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2541 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2544 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2546 struct drbd_conf *mdev;
2547 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2549 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2550 if (!adm_ctx.reply_skb)
2552 if (retcode != NO_ERROR)
2555 mdev = adm_ctx.mdev;
2556 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2557 drbd_uuid_new_current(mdev);
2558 clear_bit(NEW_CUR_UUID, &mdev->flags);
2560 drbd_suspend_io(mdev);
2561 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2562 if (retcode == SS_SUCCESS) {
2563 if (mdev->state.conn < C_CONNECTED)
2564 tl_clear(mdev->tconn);
2565 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2566 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2568 drbd_resume_io(mdev);
2571 drbd_adm_finish(info, retcode);
2575 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2577 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2580 int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
2583 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2585 goto nla_put_failure;
2586 if (vnr != VOLUME_UNSPECIFIED &&
2587 nla_put_u32(skb, T_ctx_volume, vnr))
2588 goto nla_put_failure;
2589 if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
2590 goto nla_put_failure;
2591 if (tconn->my_addr_len &&
2592 nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
2593 goto nla_put_failure;
2594 if (tconn->peer_addr_len &&
2595 nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
2596 goto nla_put_failure;
2597 nla_nest_end(skb, nla);
2602 nla_nest_cancel(skb, nla);
2606 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2607 const struct sib_info *sib)
2609 struct state_info *si = NULL; /* for sizeof(si->member); */
2610 struct net_conf *nc;
2614 int exclude_sensitive;
2616 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2617 * to. So we better exclude_sensitive information.
2619 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2620 * in the context of the requesting user process. Exclude sensitive
2621 * information, unless current has superuser.
2623 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2624 * relies on the current implementation of netlink_dump(), which
2625 * executes the dump callback successively from netlink_recvmsg(),
2626 * always in the context of the receiving process */
2627 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2629 got_ldev = get_ldev(mdev);
2631 /* We need to add connection name and volume number information still.
2632 * Minor number is in drbd_genlmsghdr. */
2633 if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
2634 goto nla_put_failure;
2636 if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
2637 goto nla_put_failure;
2641 if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
2642 goto nla_put_failure;
2644 nc = rcu_dereference(mdev->tconn->net_conf);
2646 err = net_conf_to_skb(skb, nc, exclude_sensitive);
2649 goto nla_put_failure;
2651 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2653 goto nla_put_failure;
2654 if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
2655 nla_put_u32(skb, T_current_state, mdev->state.i) ||
2656 nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
2657 nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
2658 nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
2659 nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
2660 nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
2661 nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
2662 nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
2663 nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
2664 nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
2665 nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
2666 nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
2667 goto nla_put_failure;
2672 spin_lock_irq(&mdev->ldev->md.uuid_lock);
2673 err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2674 spin_unlock_irq(&mdev->ldev->md.uuid_lock);
2677 goto nla_put_failure;
2679 if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
2680 nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
2681 nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
2682 goto nla_put_failure;
2683 if (C_SYNC_SOURCE <= mdev->state.conn &&
2684 C_PAUSED_SYNC_T >= mdev->state.conn) {
2685 if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
2686 nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
2687 goto nla_put_failure;
2692 switch(sib->sib_reason) {
2693 case SIB_SYNC_PROGRESS:
2694 case SIB_GET_STATUS_REPLY:
2696 case SIB_STATE_CHANGE:
2697 if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
2698 nla_put_u32(skb, T_new_state, sib->ns.i))
2699 goto nla_put_failure;
2701 case SIB_HELPER_POST:
2702 if (nla_put_u32(skb, T_helper_exit_code,
2703 sib->helper_exit_code))
2704 goto nla_put_failure;
2706 case SIB_HELPER_PRE:
2707 if (nla_put_string(skb, T_helper, sib->helper_name))
2708 goto nla_put_failure;
2712 nla_nest_end(skb, nla);
2722 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2724 enum drbd_ret_code retcode;
2727 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2728 if (!adm_ctx.reply_skb)
2730 if (retcode != NO_ERROR)
2733 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2735 nlmsg_free(adm_ctx.reply_skb);
2739 drbd_adm_finish(info, retcode);
2743 int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
2745 struct drbd_conf *mdev;
2746 struct drbd_genlmsghdr *dh;
2747 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2748 struct drbd_tconn *tconn = NULL;
2749 struct drbd_tconn *tmp;
2750 unsigned volume = cb->args[1];
2752 /* Open coded, deferred, iteration:
2753 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2754 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2758 * where tconn is cb->args[0];
2759 * and i is cb->args[1];
2761 * cb->args[2] indicates if we shall loop over all resources,
2762 * or just dump all volumes of a single resource.
2764 * This may miss entries inserted after this dump started,
2765 * or entries deleted before they are reached.
2767 * We need to make sure the mdev won't disappear while
2768 * we are looking at it, and revalidate our iterators
2769 * on each iteration.
2772 /* synchronize with conn_create()/conn_destroy() */
2774 /* revalidate iterator position */
2775 list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
2777 /* first iteration */
2789 mdev = idr_get_next(&tconn->volumes, &volume);
2791 /* No more volumes to dump on this tconn.
2792 * Advance tconn iterator. */
2793 pos = list_entry_rcu(tconn->all_tconn.next,
2794 struct drbd_tconn, all_tconn);
2795 /* Did we dump any volume on this tconn yet? */
2797 /* If we reached the end of the list,
2798 * or only a single resource dump was requested,
2800 if (&pos->all_tconn == &drbd_tconns || cb->args[2])
2808 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2809 cb->nlh->nlmsg_seq, &drbd_genl_family,
2810 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2815 /* This is a tconn without a single volume.
2816 * Suprisingly enough, it may have a network
2818 struct net_conf *nc;
2820 dh->ret_code = NO_ERROR;
2821 if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
2823 nc = rcu_dereference(tconn->net_conf);
2824 if (nc && net_conf_to_skb(skb, nc, 1) != 0)
2829 D_ASSERT(mdev->vnr == volume);
2830 D_ASSERT(mdev->tconn == tconn);
2832 dh->minor = mdev_to_minor(mdev);
2833 dh->ret_code = NO_ERROR;
2835 if (nla_put_status_info(skb, mdev, NULL)) {
2837 genlmsg_cancel(skb, dh);
2841 genlmsg_end(skb, dh);
2846 /* where to start the next iteration */
2847 cb->args[0] = (long)pos;
2848 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2850 /* No more tconns/volumes/minors found results in an empty skb.
2851 * Which will terminate the dump. */
2856 * Request status of all resources, or of all volumes within a single resource.
2858 * This is a dump, as the answer may not fit in a single reply skb otherwise.
2859 * Which means we cannot use the family->attrbuf or other such members, because
2860 * dump is NOT protected by the genl_lock(). During dump, we only have access
2861 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
2863 * Once things are setup properly, we call into get_one_status().
2865 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2867 const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
2869 const char *resource_name;
2870 struct drbd_tconn *tconn;
2873 /* Is this a followup call? */
2875 /* ... of a single resource dump,
2876 * and the resource iterator has been advanced already? */
2877 if (cb->args[2] && cb->args[2] != cb->args[0])
2878 return 0; /* DONE. */
2882 /* First call (from netlink_dump_start). We need to figure out
2883 * which resource(s) the user wants us to dump. */
2884 nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
2885 nlmsg_attrlen(cb->nlh, hdrlen),
2886 DRBD_NLA_CFG_CONTEXT);
2888 /* No explicit context given. Dump all. */
2891 maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
2892 nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
2894 return PTR_ERR(nla);
2895 /* context given, but no name present? */
2898 resource_name = nla_data(nla);
2899 tconn = conn_get_by_name(resource_name);
2904 kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
2906 /* prime iterators, and set "filter" mode mark:
2907 * only dump this tconn. */
2908 cb->args[0] = (long)tconn;
2909 /* cb->args[1] = 0; passed in this way. */
2910 cb->args[2] = (long)tconn;
2913 return get_one_status(skb, cb);
2916 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2918 enum drbd_ret_code retcode;
2919 struct timeout_parms tp;
2922 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2923 if (!adm_ctx.reply_skb)
2925 if (retcode != NO_ERROR)
2929 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2930 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2933 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2935 nlmsg_free(adm_ctx.reply_skb);
2939 drbd_adm_finish(info, retcode);
2943 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2945 struct drbd_conf *mdev;
2946 enum drbd_ret_code retcode;
2947 struct start_ov_parms parms;
2949 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2950 if (!adm_ctx.reply_skb)
2952 if (retcode != NO_ERROR)
2955 mdev = adm_ctx.mdev;
2957 /* resume from last known position, if possible */
2958 parms.ov_start_sector = mdev->ov_start_sector;
2959 parms.ov_stop_sector = ULLONG_MAX;
2960 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2961 int err = start_ov_parms_from_attrs(&parms, info);
2963 retcode = ERR_MANDATORY_TAG;
2964 drbd_msg_put_info(from_attrs_err_to_txt(err));
2968 /* w_make_ov_request expects position to be aligned */
2969 mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
2970 mdev->ov_stop_sector = parms.ov_stop_sector;
2972 /* If there is still bitmap IO pending, e.g. previous resync or verify
2973 * just being finished, wait for it before requesting a new resync. */
2974 drbd_suspend_io(mdev);
2975 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2976 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2977 drbd_resume_io(mdev);
2979 drbd_adm_finish(info, retcode);
2984 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2986 struct drbd_conf *mdev;
2987 enum drbd_ret_code retcode;
2988 int skip_initial_sync = 0;
2990 struct new_c_uuid_parms args;
2992 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2993 if (!adm_ctx.reply_skb)
2995 if (retcode != NO_ERROR)
2998 mdev = adm_ctx.mdev;
2999 memset(&args, 0, sizeof(args));
3000 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
3001 err = new_c_uuid_parms_from_attrs(&args, info);
3003 retcode = ERR_MANDATORY_TAG;
3004 drbd_msg_put_info(from_attrs_err_to_txt(err));
3009 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
3011 if (!get_ldev(mdev)) {
3012 retcode = ERR_NO_DISK;
3016 /* this is "skip initial sync", assume to be clean */
3017 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
3018 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
3019 dev_info(DEV, "Preparing to skip initial sync\n");
3020 skip_initial_sync = 1;
3021 } else if (mdev->state.conn != C_STANDALONE) {
3022 retcode = ERR_CONNECTED;
3026 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
3027 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
3029 if (args.clear_bm) {
3030 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
3031 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
3033 dev_err(DEV, "Writing bitmap failed with %d\n",err);
3034 retcode = ERR_IO_MD_DISK;
3036 if (skip_initial_sync) {
3037 drbd_send_uuids_skip_initial_sync(mdev);
3038 _drbd_uuid_set(mdev, UI_BITMAP, 0);
3039 drbd_print_uuids(mdev, "cleared bitmap UUID");
3040 spin_lock_irq(&mdev->tconn->req_lock);
3041 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
3043 spin_unlock_irq(&mdev->tconn->req_lock);
3051 mutex_unlock(mdev->state_mutex);
3053 drbd_adm_finish(info, retcode);
3057 static enum drbd_ret_code
3058 drbd_check_resource_name(const char *name)
3060 if (!name || !name[0]) {
3061 drbd_msg_put_info("resource name missing");
3062 return ERR_MANDATORY_TAG;
3064 /* if we want to use these in sysfs/configfs/debugfs some day,
3065 * we must not allow slashes */
3066 if (strchr(name, '/')) {
3067 drbd_msg_put_info("invalid resource name");
3068 return ERR_INVALID_REQUEST;
3073 int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
3075 enum drbd_ret_code retcode;
3076 struct res_opts res_opts;
3079 retcode = drbd_adm_prepare(skb, info, 0);
3080 if (!adm_ctx.reply_skb)
3082 if (retcode != NO_ERROR)
3085 set_res_opts_defaults(&res_opts);
3086 err = res_opts_from_attrs(&res_opts, info);
3087 if (err && err != -ENOMSG) {
3088 retcode = ERR_MANDATORY_TAG;
3089 drbd_msg_put_info(from_attrs_err_to_txt(err));
3093 retcode = drbd_check_resource_name(adm_ctx.resource_name);
3094 if (retcode != NO_ERROR)
3097 if (adm_ctx.tconn) {
3098 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
3099 retcode = ERR_INVALID_REQUEST;
3100 drbd_msg_put_info("resource exists");
3102 /* else: still NO_ERROR */
3106 if (!conn_create(adm_ctx.resource_name, &res_opts))
3107 retcode = ERR_NOMEM;
3109 drbd_adm_finish(info, retcode);
3113 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
3115 struct drbd_genlmsghdr *dh = info->userhdr;
3116 enum drbd_ret_code retcode;
3118 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3119 if (!adm_ctx.reply_skb)
3121 if (retcode != NO_ERROR)
3124 if (dh->minor > MINORMASK) {
3125 drbd_msg_put_info("requested minor out of range");
3126 retcode = ERR_INVALID_REQUEST;
3129 if (adm_ctx.volume > DRBD_VOLUME_MAX) {
3130 drbd_msg_put_info("requested volume id out of range");
3131 retcode = ERR_INVALID_REQUEST;
3135 /* drbd_adm_prepare made sure already
3136 * that mdev->tconn and mdev->vnr match the request. */
3138 if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
3139 retcode = ERR_MINOR_EXISTS;
3140 /* else: still NO_ERROR */
3144 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
3146 drbd_adm_finish(info, retcode);
3150 static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
3152 if (mdev->state.disk == D_DISKLESS &&
3153 /* no need to be mdev->state.conn == C_STANDALONE &&
3154 * we may want to delete a minor from a live replication group.
3156 mdev->state.role == R_SECONDARY) {
3157 _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
3158 CS_VERBOSE + CS_WAIT_COMPLETE);
3159 idr_remove(&mdev->tconn->volumes, mdev->vnr);
3160 idr_remove(&minors, mdev_to_minor(mdev));
3161 del_gendisk(mdev->vdisk);
3163 kref_put(&mdev->kref, &drbd_minor_destroy);
3166 return ERR_MINOR_CONFIGURED;
3169 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
3171 enum drbd_ret_code retcode;
3173 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
3174 if (!adm_ctx.reply_skb)
3176 if (retcode != NO_ERROR)
3179 retcode = adm_delete_minor(adm_ctx.mdev);
3181 drbd_adm_finish(info, retcode);
3185 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
3187 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3188 struct drbd_conf *mdev;
3191 retcode = drbd_adm_prepare(skb, info, 0);
3192 if (!adm_ctx.reply_skb)
3194 if (retcode != NO_ERROR)
3197 if (!adm_ctx.tconn) {
3198 retcode = ERR_RES_NOT_KNOWN;
3203 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3204 retcode = drbd_set_role(mdev, R_SECONDARY, 0);
3205 if (retcode < SS_SUCCESS) {
3206 drbd_msg_put_info("failed to demote");
3211 retcode = conn_try_disconnect(adm_ctx.tconn, 0);
3212 if (retcode < SS_SUCCESS) {
3213 drbd_msg_put_info("failed to disconnect");
3218 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3219 retcode = adm_detach(mdev, 0);
3220 if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
3221 drbd_msg_put_info("failed to detach");
3226 /* If we reach this, all volumes (of this tconn) are Secondary,
3227 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3228 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3229 drbd_thread_stop(&adm_ctx.tconn->worker);
3231 /* Now, nothing can fail anymore */
3233 /* delete volumes */
3234 idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
3235 retcode = adm_delete_minor(mdev);
3236 if (retcode != NO_ERROR) {
3237 /* "can not happen" */
3238 drbd_msg_put_info("failed to delete volume");
3243 /* delete connection */
3244 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3245 list_del_rcu(&adm_ctx.tconn->all_tconn);
3247 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3251 /* "can not happen" */
3252 retcode = ERR_RES_IN_USE;
3253 drbd_msg_put_info("failed to delete connection");
3257 drbd_adm_finish(info, retcode);
3261 int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
3263 enum drbd_ret_code retcode;
3265 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
3266 if (!adm_ctx.reply_skb)
3268 if (retcode != NO_ERROR)
3271 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
3272 list_del_rcu(&adm_ctx.tconn->all_tconn);
3274 kref_put(&adm_ctx.tconn->kref, &conn_destroy);
3278 retcode = ERR_RES_IN_USE;
3281 if (retcode == NO_ERROR)
3282 drbd_thread_stop(&adm_ctx.tconn->worker);
3284 drbd_adm_finish(info, retcode);
3288 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
3290 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
3291 struct sk_buff *msg;
3292 struct drbd_genlmsghdr *d_out;
3296 seq = atomic_inc_return(&drbd_genl_seq);
3297 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
3302 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
3303 if (!d_out) /* cannot happen, but anyways. */
3304 goto nla_put_failure;
3305 d_out->minor = mdev_to_minor(mdev);
3306 d_out->ret_code = NO_ERROR;
3308 if (nla_put_status_info(msg, mdev, sib))
3309 goto nla_put_failure;
3310 genlmsg_end(msg, d_out);
3311 err = drbd_genl_multicast_events(msg, 0);
3312 /* msg has been consumed or freed in netlink_broadcast() */
3313 if (err && err != -ESRCH)
3321 dev_err(DEV, "Error %d while broadcasting event. "
3322 "Event seq:%u sib_reason:%u\n",
3323 err, seq, sib->sib_reason);