4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
48 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
51 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
53 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
54 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
55 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
56 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
57 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
58 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
59 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
60 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info);
61 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info);
62 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info);
63 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info);
64 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
65 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
66 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
67 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
68 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info);
69 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
70 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
72 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
74 #include <linux/drbd_genl_api.h>
75 #include <linux/genl_magic_func.h>
77 /* used blkdev_get_by_path, to claim our meta data device(s) */
78 static char *drbd_m_holder = "Hands off! this is DRBD's meta data device.";
80 /* Configuration is strictly serialized, because generic netlink message
81 * processing is strictly serialized by the genl_lock().
82 * Which means we can use one static global drbd_config_context struct.
84 static struct drbd_config_context {
85 /* assigned from drbd_genlmsghdr */
87 /* assigned from request attributes, if present */
89 #define VOLUME_UNSPECIFIED (-1U)
90 /* pointer into the request skb,
91 * limited lifetime! */
95 struct sk_buff *reply_skb;
96 /* pointer into reply buffer */
97 struct drbd_genlmsghdr *reply_dh;
98 /* resolved from attributes, if possible */
99 struct drbd_conf *mdev;
100 struct drbd_tconn *tconn;
103 static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
105 genlmsg_end(skb, genlmsg_data(nlmsg_data(nlmsg_hdr(skb))));
106 if (genlmsg_reply(skb, info))
107 printk(KERN_ERR "drbd: error sending genl reply\n");
110 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
111 * reason it could fail was no space in skb, and there are 4k available. */
112 int drbd_msg_put_info(const char *info)
114 struct sk_buff *skb = adm_ctx.reply_skb;
118 if (!info || !info[0])
121 nla = nla_nest_start(skb, DRBD_NLA_CFG_REPLY);
125 err = nla_put_string(skb, T_info_text, info);
127 nla_nest_cancel(skb, nla);
130 nla_nest_end(skb, nla);
134 /* This would be a good candidate for a "pre_doit" hook,
135 * and per-family private info->pointers.
136 * But we need to stay compatible with older kernels.
137 * If it returns successfully, adm_ctx members are valid.
139 #define DRBD_ADM_NEED_MINOR 1
140 #define DRBD_ADM_NEED_CONN 2
141 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
144 struct drbd_genlmsghdr *d_in = info->userhdr;
145 const u8 cmd = info->genlhdr->cmd;
148 memset(&adm_ctx, 0, sizeof(adm_ctx));
150 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
151 if (cmd != DRBD_ADM_GET_STATUS
152 && security_netlink_recv(skb, CAP_SYS_ADMIN))
155 adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
156 if (!adm_ctx.reply_skb)
159 adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
160 info, &drbd_genl_family, 0, cmd);
161 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
163 if (!adm_ctx.reply_dh)
166 adm_ctx.reply_dh->minor = d_in->minor;
167 adm_ctx.reply_dh->ret_code = NO_ERROR;
169 if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
171 /* parse and validate only */
172 err = drbd_cfg_context_from_attrs(NULL, info->attrs);
176 /* It was present, and valid,
177 * copy it over to the reply skb. */
178 err = nla_put_nohdr(adm_ctx.reply_skb,
179 info->attrs[DRBD_NLA_CFG_CONTEXT]->nla_len,
180 info->attrs[DRBD_NLA_CFG_CONTEXT]);
184 /* and assign stuff to the global adm_ctx */
185 nla = nested_attr_tb[__nla_type(T_ctx_volume)];
186 adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
187 nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
189 adm_ctx.conn_name = nla_data(nla);
191 adm_ctx.volume = VOLUME_UNSPECIFIED;
193 adm_ctx.minor = d_in->minor;
194 adm_ctx.mdev = minor_to_mdev(d_in->minor);
195 adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
197 if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
198 drbd_msg_put_info("unknown minor");
199 return ERR_MINOR_INVALID;
201 if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
202 drbd_msg_put_info("unknown connection");
203 return ERR_INVALID_REQUEST;
206 /* some more paranoia, if the request was over-determined */
208 adm_ctx.volume != VOLUME_UNSPECIFIED &&
209 adm_ctx.volume != adm_ctx.mdev->vnr) {
210 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
211 adm_ctx.minor, adm_ctx.volume,
212 adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
213 drbd_msg_put_info("over-determined configuration context mismatch");
214 return ERR_INVALID_REQUEST;
216 if (adm_ctx.mdev && adm_ctx.tconn &&
217 adm_ctx.mdev->tconn != adm_ctx.tconn) {
218 pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
219 adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
220 drbd_msg_put_info("over-determined configuration context mismatch");
221 return ERR_INVALID_REQUEST;
226 nlmsg_free(adm_ctx.reply_skb);
227 adm_ctx.reply_skb = NULL;
231 static int drbd_adm_finish(struct genl_info *info, int retcode)
234 const char *conn_name = NULL;
236 if (!adm_ctx.reply_skb)
239 adm_ctx.reply_dh->ret_code = retcode;
241 nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
243 nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
245 conn_name = nla_data(nla);
248 drbd_adm_send_reply(adm_ctx.reply_skb, info);
252 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
254 char *envp[] = { "HOME=/",
256 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
257 NULL, /* Will be set to address family */
258 NULL, /* Will be set to address */
260 char mb[12], af[20], ad[60], *afs;
261 char *argv[] = {usermode_helper, cmd, mb, NULL };
265 snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
267 if (get_net_conf(mdev->tconn)) {
268 switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
271 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
272 &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
276 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
277 &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
281 snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
282 &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
284 snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
287 put_net_conf(mdev->tconn);
290 /* The helper may take some time.
291 * write out any unsynced meta data changes now */
294 dev_info(DEV, "helper command: %s %s %s\n", usermode_helper, cmd, mb);
295 sib.sib_reason = SIB_HELPER_PRE;
296 sib.helper_name = cmd;
297 drbd_bcast_event(mdev, &sib);
298 ret = call_usermodehelper(usermode_helper, argv, envp, 1);
300 dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
301 usermode_helper, cmd, mb,
302 (ret >> 8) & 0xff, ret);
304 dev_info(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
305 usermode_helper, cmd, mb,
306 (ret >> 8) & 0xff, ret);
307 sib.sib_reason = SIB_HELPER_POST;
308 sib.helper_exit_code = ret;
309 drbd_bcast_event(mdev, &sib);
311 if (ret < 0) /* Ignore any ERRNOs we got. */
317 enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
321 enum drbd_disk_state nps;
322 enum drbd_fencing_p fp;
324 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
326 if (get_ldev_if_state(mdev, D_CONSISTENT)) {
327 fp = mdev->ldev->dc.fencing;
330 dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
331 nps = mdev->state.pdsk;
335 r = drbd_khelper(mdev, "fence-peer");
337 switch ((r>>8) & 0xff) {
338 case 3: /* peer is inconsistent */
339 ex_to_string = "peer is inconsistent or worse";
340 nps = D_INCONSISTENT;
342 case 4: /* peer got outdated, or was already outdated */
343 ex_to_string = "peer was fenced";
346 case 5: /* peer was down */
347 if (mdev->state.disk == D_UP_TO_DATE) {
348 /* we will(have) create(d) a new UUID anyways... */
349 ex_to_string = "peer is unreachable, assumed to be dead";
352 ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
353 nps = mdev->state.pdsk;
356 case 6: /* Peer is primary, voluntarily outdate myself.
357 * This is useful when an unconnected R_SECONDARY is asked to
358 * become R_PRIMARY, but finds the other peer being active. */
359 ex_to_string = "peer is active";
360 dev_warn(DEV, "Peer is primary, outdating myself.\n");
362 _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
365 if (fp != FP_STONITH)
366 dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
367 ex_to_string = "peer was stonithed";
371 /* The script is broken ... */
373 dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
377 dev_info(DEV, "fence-peer helper returned %d (%s)\n",
378 (r>>8) & 0xff, ex_to_string);
381 if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
382 /* The handler was not successful... unfreeze here, the
383 state engine can not unfreeze... */
384 _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
390 static int _try_outdate_peer_async(void *data)
392 struct drbd_conf *mdev = (struct drbd_conf *)data;
393 enum drbd_disk_state nps;
396 nps = drbd_try_outdate_peer(mdev);
399 drbd_request_state(mdev, NS(pdsk, nps));
400 here, because we might were able to re-establish the connection
401 in the meantime. This can only partially be solved in the state's
402 engine is_valid_state() and is_valid_state_transition()
405 nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
406 pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
407 therefore we have to have the pre state change check here.
409 spin_lock_irq(&mdev->tconn->req_lock);
411 if (ns.conn < C_WF_REPORT_PARAMS) {
413 _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
415 spin_unlock_irq(&mdev->tconn->req_lock);
420 void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
422 struct task_struct *opa;
424 opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
426 dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
430 drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
432 const int max_tries = 4;
433 enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
436 union drbd_state mask, val;
437 enum drbd_disk_state nps;
439 if (new_role == R_PRIMARY)
440 request_ping(mdev->tconn); /* Detect a dead peer ASAP */
442 mutex_lock(mdev->state_mutex);
444 mask.i = 0; mask.role = R_MASK;
445 val.i = 0; val.role = new_role;
447 while (try++ < max_tries) {
448 rv = _drbd_request_state(mdev, mask, val, CS_WAIT_COMPLETE);
450 /* in case we first succeeded to outdate,
451 * but now suddenly could establish a connection */
452 if (rv == SS_CW_FAILED_BY_PEER && mask.pdsk != 0) {
458 if (rv == SS_NO_UP_TO_DATE_DISK && force &&
459 (mdev->state.disk < D_UP_TO_DATE &&
460 mdev->state.disk >= D_INCONSISTENT)) {
462 val.disk = D_UP_TO_DATE;
467 if (rv == SS_NO_UP_TO_DATE_DISK &&
468 mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
469 D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
470 nps = drbd_try_outdate_peer(mdev);
472 if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
473 val.disk = D_UP_TO_DATE;
483 if (rv == SS_NOTHING_TO_DO)
485 if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
486 nps = drbd_try_outdate_peer(mdev);
488 if (force && nps > D_OUTDATED) {
489 dev_warn(DEV, "Forced into split brain situation!\n");
498 if (rv == SS_TWO_PRIMARIES) {
499 /* Maybe the peer is detected as dead very soon...
500 retry at most once more in this case. */
501 schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
506 if (rv < SS_SUCCESS) {
507 rv = _drbd_request_state(mdev, mask, val,
508 CS_VERBOSE + CS_WAIT_COMPLETE);
519 dev_warn(DEV, "Forced to consider local data as UpToDate!\n");
521 /* Wait until nothing is on the fly :) */
522 wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
524 if (new_role == R_SECONDARY) {
525 set_disk_ro(mdev->vdisk, true);
526 if (get_ldev(mdev)) {
527 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
531 if (get_net_conf(mdev->tconn)) {
532 mdev->tconn->net_conf->want_lose = 0;
533 put_net_conf(mdev->tconn);
535 set_disk_ro(mdev->vdisk, false);
536 if (get_ldev(mdev)) {
537 if (((mdev->state.conn < C_CONNECTED ||
538 mdev->state.pdsk <= D_FAILED)
539 && mdev->ldev->md.uuid[UI_BITMAP] == 0) || forced)
540 drbd_uuid_new_current(mdev);
542 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
547 /* writeout of activity log covered areas of the bitmap
548 * to stable storage done in after state change already */
550 if (mdev->state.conn >= C_WF_REPORT_PARAMS) {
551 /* if this was forced, we should consider sync */
553 drbd_send_uuids(mdev);
554 drbd_send_state(mdev);
559 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
561 mutex_unlock(mdev->state_mutex);
565 static const char *from_attrs_err_to_txt(int err)
567 return err == -ENOMSG ? "required attribute missing" :
568 err == -EOPNOTSUPP ? "unknown mandatory attribute" :
569 "invalid attribute value";
572 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
574 struct set_role_parms parms;
576 enum drbd_ret_code retcode;
578 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
579 if (!adm_ctx.reply_skb)
581 if (retcode != NO_ERROR)
584 memset(&parms, 0, sizeof(parms));
585 if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
586 err = set_role_parms_from_attrs(&parms, info->attrs);
588 retcode = ERR_MANDATORY_TAG;
589 drbd_msg_put_info(from_attrs_err_to_txt(err));
594 if (info->genlhdr->cmd == DRBD_ADM_PRIMARY)
595 retcode = drbd_set_role(adm_ctx.mdev, R_PRIMARY, parms.assume_uptodate);
597 retcode = drbd_set_role(adm_ctx.mdev, R_SECONDARY, 0);
599 drbd_adm_finish(info, retcode);
603 /* initializes the md.*_offset members, so we are able to find
604 * the on disk meta data */
605 static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
606 struct drbd_backing_dev *bdev)
608 sector_t md_size_sect = 0;
609 switch (bdev->dc.meta_dev_idx) {
611 /* v07 style fixed size indexed meta data */
612 bdev->md.md_size_sect = MD_RESERVED_SECT;
613 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
614 bdev->md.al_offset = MD_AL_OFFSET;
615 bdev->md.bm_offset = MD_BM_OFFSET;
617 case DRBD_MD_INDEX_FLEX_EXT:
618 /* just occupy the full device; unit: sectors */
619 bdev->md.md_size_sect = drbd_get_capacity(bdev->md_bdev);
620 bdev->md.md_offset = 0;
621 bdev->md.al_offset = MD_AL_OFFSET;
622 bdev->md.bm_offset = MD_BM_OFFSET;
624 case DRBD_MD_INDEX_INTERNAL:
625 case DRBD_MD_INDEX_FLEX_INT:
626 bdev->md.md_offset = drbd_md_ss__(mdev, bdev);
627 /* al size is still fixed */
628 bdev->md.al_offset = -MD_AL_SECTORS;
629 /* we need (slightly less than) ~ this much bitmap sectors: */
630 md_size_sect = drbd_get_capacity(bdev->backing_bdev);
631 md_size_sect = ALIGN(md_size_sect, BM_SECT_PER_EXT);
632 md_size_sect = BM_SECT_TO_EXT(md_size_sect);
633 md_size_sect = ALIGN(md_size_sect, 8);
635 /* plus the "drbd meta data super block",
636 * and the activity log; */
637 md_size_sect += MD_BM_OFFSET;
639 bdev->md.md_size_sect = md_size_sect;
640 /* bitmap offset is adjusted by 'super' block size */
641 bdev->md.bm_offset = -md_size_sect + MD_AL_OFFSET;
646 /* input size is expected to be in KB */
647 char *ppsize(char *buf, unsigned long long size)
649 /* Needs 9 bytes at max including trailing NUL:
650 * -1ULL ==> "16384 EB" */
651 static char units[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
653 while (size >= 10000 && base < sizeof(units)-1) {
655 size = (size >> 10) + !!(size & (1<<9));
658 sprintf(buf, "%u %cB", (unsigned)size, units[base]);
663 /* there is still a theoretical deadlock when called from receiver
664 * on an D_INCONSISTENT R_PRIMARY:
665 * remote READ does inc_ap_bio, receiver would need to receive answer
666 * packet from remote to dec_ap_bio again.
667 * receiver receive_sizes(), comes here,
668 * waits for ap_bio_cnt == 0. -> deadlock.
669 * but this cannot happen, actually, because:
670 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
671 * (not connected, or bad/no disk on peer):
672 * see drbd_fail_request_early, ap_bio_cnt is zero.
673 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
674 * peer may not initiate a resize.
676 /* Note these are not to be confused with
677 * drbd_adm_suspend_io/drbd_adm_resume_io,
678 * which are (sub) state changes triggered by admin (drbdsetup),
679 * and can be long lived.
680 * This changes an mdev->flag, is triggered by drbd internals,
681 * and should be short-lived. */
682 void drbd_suspend_io(struct drbd_conf *mdev)
684 set_bit(SUSPEND_IO, &mdev->flags);
685 if (is_susp(mdev->state))
687 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
690 void drbd_resume_io(struct drbd_conf *mdev)
692 clear_bit(SUSPEND_IO, &mdev->flags);
693 wake_up(&mdev->misc_wait);
697 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
698 * @mdev: DRBD device.
700 * Returns 0 on success, negative return values indicate errors.
701 * You should call drbd_md_sync() after calling this function.
703 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
705 sector_t prev_first_sect, prev_size; /* previous meta location */
710 int md_moved, la_size_changed;
711 enum determine_dev_size rv = unchanged;
714 * application request passes inc_ap_bio,
715 * but then cannot get an AL-reference.
716 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
719 * Suspend IO right here.
720 * still lock the act_log to not trigger ASSERTs there.
722 drbd_suspend_io(mdev);
724 /* no wait necessary anymore, actually we could assert that */
725 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
727 prev_first_sect = drbd_md_first_sector(mdev->ldev);
728 prev_size = mdev->ldev->md.md_size_sect;
729 la_size = mdev->ldev->md.la_size_sect;
731 /* TODO: should only be some assert here, not (re)init... */
732 drbd_md_set_sector_offsets(mdev, mdev->ldev);
734 size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
736 if (drbd_get_capacity(mdev->this_bdev) != size ||
737 drbd_bm_capacity(mdev) != size) {
739 err = drbd_bm_resize(mdev, size, !(flags & DDSF_NO_RESYNC));
741 /* currently there is only one error: ENOMEM! */
742 size = drbd_bm_capacity(mdev)>>1;
744 dev_err(DEV, "OUT OF MEMORY! "
745 "Could not allocate bitmap!\n");
747 dev_err(DEV, "BM resizing failed. "
748 "Leaving size unchanged at size = %lu KB\n",
749 (unsigned long)size);
753 /* racy, see comments above. */
754 drbd_set_my_capacity(mdev, size);
755 mdev->ldev->md.la_size_sect = size;
756 dev_info(DEV, "size = %s (%llu KB)\n", ppsize(ppb, size>>1),
757 (unsigned long long)size>>1);
759 if (rv == dev_size_error)
762 la_size_changed = (la_size != mdev->ldev->md.la_size_sect);
764 md_moved = prev_first_sect != drbd_md_first_sector(mdev->ldev)
765 || prev_size != mdev->ldev->md.md_size_sect;
767 if (la_size_changed || md_moved) {
770 drbd_al_shrink(mdev); /* All extents inactive. */
771 dev_info(DEV, "Writing the whole bitmap, %s\n",
772 la_size_changed && md_moved ? "size changed and md moved" :
773 la_size_changed ? "size changed" : "md moved");
774 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
775 err = drbd_bitmap_io(mdev, &drbd_bm_write,
776 "size changed", BM_LOCKED_MASK);
781 drbd_md_mark_dirty(mdev);
789 lc_unlock(mdev->act_log);
790 wake_up(&mdev->al_wait);
791 drbd_resume_io(mdev);
797 drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
799 sector_t p_size = mdev->p_size; /* partner's disk size. */
800 sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
801 sector_t m_size; /* my size */
802 sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
805 m_size = drbd_get_max_capacity(bdev);
807 if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
808 dev_warn(DEV, "Resize while not connected was forced by the user!\n");
812 if (p_size && m_size) {
813 size = min_t(sector_t, p_size, m_size);
817 if (m_size && m_size < size)
819 if (p_size && p_size < size)
830 dev_err(DEV, "Both nodes diskless!\n");
834 dev_err(DEV, "Requested disk size is too big (%lu > %lu)\n",
835 (unsigned long)u_size>>1, (unsigned long)size>>1);
844 * drbd_check_al_size() - Ensures that the AL is of the right size
845 * @mdev: DRBD device.
847 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
848 * failed, and 0 on success. You should call drbd_md_sync() after you called
851 static int drbd_check_al_size(struct drbd_conf *mdev)
853 struct lru_cache *n, *t;
854 struct lc_element *e;
858 if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
859 mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
862 mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
867 n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
868 mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
871 dev_err(DEV, "Cannot allocate act_log lru!\n");
874 spin_lock_irq(&mdev->al_lock);
876 for (i = 0; i < t->nr_elements; i++) {
877 e = lc_element_by_index(t, i);
879 dev_err(DEV, "refcnt(%d)==%d\n",
880 e->lc_number, e->refcnt);
886 spin_unlock_irq(&mdev->al_lock);
888 dev_err(DEV, "Activity log still in use!\n");
895 drbd_md_mark_dirty(mdev); /* we changed mdev->act_log->nr_elemens */
899 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
901 struct request_queue * const q = mdev->rq_queue;
902 int max_hw_sectors = max_bio_size >> 9;
903 int max_segments = 0;
905 if (get_ldev_if_state(mdev, D_ATTACHING)) {
906 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
908 max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
909 max_segments = mdev->ldev->dc.max_bio_bvecs;
913 blk_queue_logical_block_size(q, 512);
914 blk_queue_max_hw_sectors(q, max_hw_sectors);
915 /* This is the workaround for "bio would need to, but cannot, be split" */
916 blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS);
917 blk_queue_segment_boundary(q, PAGE_CACHE_SIZE-1);
919 if (get_ldev_if_state(mdev, D_ATTACHING)) {
920 struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
922 blk_queue_stack_limits(q, b);
924 if (q->backing_dev_info.ra_pages != b->backing_dev_info.ra_pages) {
925 dev_info(DEV, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
926 q->backing_dev_info.ra_pages,
927 b->backing_dev_info.ra_pages);
928 q->backing_dev_info.ra_pages = b->backing_dev_info.ra_pages;
934 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
936 int now, new, local, peer;
938 now = queue_max_hw_sectors(mdev->rq_queue) << 9;
939 local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
940 peer = mdev->peer_max_bio_size; /* Eventually last known value, from meta data */
942 if (get_ldev_if_state(mdev, D_ATTACHING)) {
943 local = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
944 mdev->local_max_bio_size = local;
948 /* We may ignore peer limits if the peer is modern enough.
949 Because new from 8.3.8 onwards the peer can use multiple
950 BIOs for a single peer_request */
951 if (mdev->state.conn >= C_CONNECTED) {
952 if (mdev->tconn->agreed_pro_version < 94)
953 peer = mdev->peer_max_bio_size;
954 else if (mdev->tconn->agreed_pro_version == 94)
955 peer = DRBD_MAX_SIZE_H80_PACKET;
956 else /* drbd 8.3.8 onwards */
957 peer = DRBD_MAX_BIO_SIZE;
960 new = min_t(int, local, peer);
962 if (mdev->state.role == R_PRIMARY && new < now)
963 dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
966 dev_info(DEV, "max BIO size = %u\n", new);
968 drbd_setup_queue_param(mdev, new);
971 /* serialize deconfig (worker exiting, doing cleanup)
972 * and reconfig (drbdsetup disk, drbdsetup net)
974 * Wait for a potentially exiting worker, then restart it,
975 * or start a new one. Flush any pending work, there may still be an
976 * after_state_change queued.
978 static void conn_reconfig_start(struct drbd_tconn *tconn)
980 wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
981 wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
982 drbd_thread_start(&tconn->worker);
983 conn_flush_workqueue(tconn);
986 /* if still unconfigured, stops worker again.
987 * if configured now, clears CONFIG_PENDING.
988 * wakes potential waiters */
989 static void conn_reconfig_done(struct drbd_tconn *tconn)
991 spin_lock_irq(&tconn->req_lock);
992 if (conn_all_vols_unconf(tconn)) {
993 set_bit(OBJECT_DYING, &tconn->flags);
994 drbd_thread_stop_nowait(&tconn->worker);
996 clear_bit(CONFIG_PENDING, &tconn->flags);
997 spin_unlock_irq(&tconn->req_lock);
998 wake_up(&tconn->ping_wait);
1001 /* Make sure IO is suspended before calling this function(). */
1002 static void drbd_suspend_al(struct drbd_conf *mdev)
1006 if (!lc_try_lock(mdev->act_log)) {
1007 dev_warn(DEV, "Failed to lock al in drbd_suspend_al()\n");
1011 drbd_al_shrink(mdev);
1012 spin_lock_irq(&mdev->tconn->req_lock);
1013 if (mdev->state.conn < C_CONNECTED)
1014 s = !test_and_set_bit(AL_SUSPENDED, &mdev->flags);
1015 spin_unlock_irq(&mdev->tconn->req_lock);
1016 lc_unlock(mdev->act_log);
1019 dev_info(DEV, "Suspended AL updates\n");
1022 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
1024 struct drbd_conf *mdev;
1026 enum drbd_ret_code retcode;
1027 enum determine_dev_size dd;
1028 sector_t max_possible_sectors;
1029 sector_t min_md_device_sectors;
1030 struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
1031 struct block_device *bdev;
1032 struct lru_cache *resync_lru = NULL;
1033 union drbd_state ns, os;
1034 enum drbd_state_rv rv;
1035 int cp_discovered = 0;
1037 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1038 if (!adm_ctx.reply_skb)
1040 if (retcode != NO_ERROR)
1043 mdev = adm_ctx.mdev;
1044 conn_reconfig_start(mdev->tconn);
1046 /* if you want to reconfigure, please tear down first */
1047 if (mdev->state.disk > D_DISKLESS) {
1048 retcode = ERR_DISK_CONFIGURED;
1051 /* It may just now have detached because of IO error. Make sure
1052 * drbd_ldev_destroy is done already, we may end up here very fast,
1053 * e.g. if someone calls attach from the on-io-error handler,
1054 * to realize a "hot spare" feature (not that I'd recommend that) */
1055 wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
1057 /* allocation not in the IO path, drbdsetup context */
1058 nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
1060 retcode = ERR_NOMEM;
1064 nbc->dc.disk_size = DRBD_DISK_SIZE_SECT_DEF;
1065 nbc->dc.on_io_error = DRBD_ON_IO_ERROR_DEF;
1066 nbc->dc.fencing = DRBD_FENCING_DEF;
1067 nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
1069 err = disk_conf_from_attrs(&nbc->dc, info->attrs);
1071 retcode = ERR_MANDATORY_TAG;
1072 drbd_msg_put_info(from_attrs_err_to_txt(err));
1076 if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
1077 retcode = ERR_MD_IDX_INVALID;
1081 if (get_net_conf(mdev->tconn)) {
1082 int prot = mdev->tconn->net_conf->wire_protocol;
1083 put_net_conf(mdev->tconn);
1084 if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
1085 retcode = ERR_STONITH_AND_PROT_A;
1090 bdev = blkdev_get_by_path(nbc->dc.backing_dev,
1091 FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
1093 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
1095 retcode = ERR_OPEN_DISK;
1098 nbc->backing_bdev = bdev;
1101 * meta_dev_idx >= 0: external fixed size, possibly multiple
1102 * drbd sharing one meta device. TODO in that case, paranoia
1103 * check that [md_bdev, meta_dev_idx] is not yet used by some
1104 * other drbd minor! (if you use drbd.conf + drbdadm, that
1105 * should check it for you already; but if you don't, or
1106 * someone fooled it, we need to double check here)
1108 bdev = blkdev_get_by_path(nbc->dc.meta_dev,
1109 FMODE_READ | FMODE_WRITE | FMODE_EXCL,
1110 ((int)nbc->dc.meta_dev_idx < 0) ?
1111 (void *)mdev : (void *)drbd_m_holder);
1113 dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
1115 retcode = ERR_OPEN_MD_DISK;
1118 nbc->md_bdev = bdev;
1120 if ((nbc->backing_bdev == nbc->md_bdev) !=
1121 (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1122 nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
1123 retcode = ERR_MD_IDX_INVALID;
1127 resync_lru = lc_create("resync", drbd_bm_ext_cache,
1128 1, 61, sizeof(struct bm_extent),
1129 offsetof(struct bm_extent, lce));
1131 retcode = ERR_NOMEM;
1135 /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
1136 drbd_md_set_sector_offsets(mdev, nbc);
1138 if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
1139 dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
1140 (unsigned long long) drbd_get_max_capacity(nbc),
1141 (unsigned long long) nbc->dc.disk_size);
1142 retcode = ERR_DISK_TO_SMALL;
1146 if ((int)nbc->dc.meta_dev_idx < 0) {
1147 max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
1148 /* at least one MB, otherwise it does not make sense */
1149 min_md_device_sectors = (2<<10);
1151 max_possible_sectors = DRBD_MAX_SECTORS;
1152 min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
1155 if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
1156 retcode = ERR_MD_DISK_TO_SMALL;
1157 dev_warn(DEV, "refusing attach: md-device too small, "
1158 "at least %llu sectors needed for this meta-disk type\n",
1159 (unsigned long long) min_md_device_sectors);
1163 /* Make sure the new disk is big enough
1164 * (we may currently be R_PRIMARY with no local disk...) */
1165 if (drbd_get_max_capacity(nbc) <
1166 drbd_get_capacity(mdev->this_bdev)) {
1167 retcode = ERR_DISK_TO_SMALL;
1171 nbc->known_size = drbd_get_capacity(nbc->backing_bdev);
1173 if (nbc->known_size > max_possible_sectors) {
1174 dev_warn(DEV, "==> truncating very big lower level device "
1175 "to currently maximum possible %llu sectors <==\n",
1176 (unsigned long long) max_possible_sectors);
1177 if ((int)nbc->dc.meta_dev_idx >= 0)
1178 dev_warn(DEV, "==>> using internal or flexible "
1179 "meta data may help <<==\n");
1182 drbd_suspend_io(mdev);
1183 /* also wait for the last barrier ack. */
1184 wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
1185 /* and for any other previously queued work */
1186 drbd_flush_workqueue(mdev);
1188 rv = _drbd_request_state(mdev, NS(disk, D_ATTACHING), CS_VERBOSE);
1189 retcode = rv; /* FIXME: Type mismatch. */
1190 drbd_resume_io(mdev);
1191 if (rv < SS_SUCCESS)
1194 if (!get_ldev_if_state(mdev, D_ATTACHING))
1195 goto force_diskless;
1197 drbd_md_set_sector_offsets(mdev, nbc);
1199 if (!mdev->bitmap) {
1200 if (drbd_bm_init(mdev)) {
1201 retcode = ERR_NOMEM;
1202 goto force_diskless_dec;
1206 retcode = drbd_md_read(mdev, nbc);
1207 if (retcode != NO_ERROR)
1208 goto force_diskless_dec;
1210 if (mdev->state.conn < C_CONNECTED &&
1211 mdev->state.role == R_PRIMARY &&
1212 (mdev->ed_uuid & ~((u64)1)) != (nbc->md.uuid[UI_CURRENT] & ~((u64)1))) {
1213 dev_err(DEV, "Can only attach to data with current UUID=%016llX\n",
1214 (unsigned long long)mdev->ed_uuid);
1215 retcode = ERR_DATA_NOT_CURRENT;
1216 goto force_diskless_dec;
1219 /* Since we are diskless, fix the activity log first... */
1220 if (drbd_check_al_size(mdev)) {
1221 retcode = ERR_NOMEM;
1222 goto force_diskless_dec;
1225 /* Prevent shrinking of consistent devices ! */
1226 if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
1227 drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
1228 dev_warn(DEV, "refusing to truncate a consistent device\n");
1229 retcode = ERR_DISK_TO_SMALL;
1230 goto force_diskless_dec;
1233 if (!drbd_al_read_log(mdev, nbc)) {
1234 retcode = ERR_IO_MD_DISK;
1235 goto force_diskless_dec;
1238 /* Reset the "barriers don't work" bits here, then force meta data to
1239 * be written, to ensure we determine if barriers are supported. */
1240 if (nbc->dc.no_md_flush)
1241 set_bit(MD_NO_FUA, &mdev->flags);
1243 clear_bit(MD_NO_FUA, &mdev->flags);
1245 /* Point of no return reached.
1246 * Devices and memory are no longer released by error cleanup below.
1247 * now mdev takes over responsibility, and the state engine should
1248 * clean it up somewhere. */
1249 D_ASSERT(mdev->ldev == NULL);
1251 mdev->resync = resync_lru;
1255 mdev->write_ordering = WO_bdev_flush;
1256 drbd_bump_write_ordering(mdev, WO_bdev_flush);
1258 if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
1259 set_bit(CRASHED_PRIMARY, &mdev->flags);
1261 clear_bit(CRASHED_PRIMARY, &mdev->flags);
1263 if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1264 !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
1265 set_bit(CRASHED_PRIMARY, &mdev->flags);
1274 drbd_reconsider_max_bio_size(mdev);
1276 /* If I am currently not R_PRIMARY,
1277 * but meta data primary indicator is set,
1278 * I just now recover from a hard crash,
1279 * and have been R_PRIMARY before that crash.
1281 * Now, if I had no connection before that crash
1282 * (have been degraded R_PRIMARY), chances are that
1283 * I won't find my peer now either.
1285 * In that case, and _only_ in that case,
1286 * we use the degr-wfc-timeout instead of the default,
1287 * so we can automatically recover from a crash of a
1288 * degraded but active "cluster" after a certain timeout.
1290 clear_bit(USE_DEGR_WFC_T, &mdev->flags);
1291 if (mdev->state.role != R_PRIMARY &&
1292 drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
1293 !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
1294 set_bit(USE_DEGR_WFC_T, &mdev->flags);
1296 dd = drbd_determine_dev_size(mdev, 0);
1297 if (dd == dev_size_error) {
1298 retcode = ERR_NOMEM_BITMAP;
1299 goto force_diskless_dec;
1300 } else if (dd == grew)
1301 set_bit(RESYNC_AFTER_NEG, &mdev->flags);
1303 if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
1304 dev_info(DEV, "Assuming that all blocks are out of sync "
1305 "(aka FullSync)\n");
1306 if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
1307 "set_n_write from attaching", BM_LOCKED_MASK)) {
1308 retcode = ERR_IO_MD_DISK;
1309 goto force_diskless_dec;
1312 if (drbd_bitmap_io(mdev, &drbd_bm_read,
1313 "read from attaching", BM_LOCKED_MASK) < 0) {
1314 retcode = ERR_IO_MD_DISK;
1315 goto force_diskless_dec;
1319 if (cp_discovered) {
1320 drbd_al_apply_to_bm(mdev);
1321 if (drbd_bitmap_io(mdev, &drbd_bm_write,
1322 "crashed primary apply AL", BM_LOCKED_MASK)) {
1323 retcode = ERR_IO_MD_DISK;
1324 goto force_diskless_dec;
1328 if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
1329 drbd_suspend_al(mdev); /* IO is still suspended here... */
1331 spin_lock_irq(&mdev->tconn->req_lock);
1334 /* If MDF_CONSISTENT is not set go into inconsistent state,
1335 otherwise investigate MDF_WasUpToDate...
1336 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1337 otherwise into D_CONSISTENT state.
1339 if (drbd_md_test_flag(mdev->ldev, MDF_CONSISTENT)) {
1340 if (drbd_md_test_flag(mdev->ldev, MDF_WAS_UP_TO_DATE))
1341 ns.disk = D_CONSISTENT;
1343 ns.disk = D_OUTDATED;
1345 ns.disk = D_INCONSISTENT;
1348 if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
1349 ns.pdsk = D_OUTDATED;
1351 if ( ns.disk == D_CONSISTENT &&
1352 (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
1353 ns.disk = D_UP_TO_DATE;
1355 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1356 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1357 this point, because drbd_request_state() modifies these
1360 /* In case we are C_CONNECTED postpone any decision on the new disk
1361 state after the negotiation phase. */
1362 if (mdev->state.conn == C_CONNECTED) {
1363 mdev->new_state_tmp.i = ns.i;
1365 ns.disk = D_NEGOTIATING;
1367 /* We expect to receive up-to-date UUIDs soon.
1368 To avoid a race in receive_state, free p_uuid while
1369 holding req_lock. I.e. atomic with the state change */
1370 kfree(mdev->p_uuid);
1371 mdev->p_uuid = NULL;
1374 rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
1376 spin_unlock_irq(&mdev->tconn->req_lock);
1378 if (rv < SS_SUCCESS)
1379 goto force_diskless_dec;
1381 if (mdev->state.role == R_PRIMARY)
1382 mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
1384 mdev->ldev->md.uuid[UI_CURRENT] &= ~(u64)1;
1386 drbd_md_mark_dirty(mdev);
1389 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1391 conn_reconfig_done(mdev->tconn);
1392 drbd_adm_finish(info, retcode);
1398 drbd_force_state(mdev, NS(disk, D_FAILED));
1400 conn_reconfig_done(mdev->tconn);
1403 if (nbc->backing_bdev)
1404 blkdev_put(nbc->backing_bdev,
1405 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1407 blkdev_put(nbc->md_bdev,
1408 FMODE_READ | FMODE_WRITE | FMODE_EXCL);
1411 lc_destroy(resync_lru);
1413 drbd_adm_finish(info, retcode);
1417 /* Detaching the disk is a process in multiple stages. First we need to lock
1418 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1419 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1420 * internal references as well.
1421 * Only then we have finally detached. */
1422 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
1424 struct drbd_conf *mdev;
1425 enum drbd_ret_code retcode;
1427 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1428 if (!adm_ctx.reply_skb)
1430 if (retcode != NO_ERROR)
1433 mdev = adm_ctx.mdev;
1434 drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
1435 retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
1436 wait_event(mdev->misc_wait,
1437 mdev->state.disk != D_DISKLESS ||
1438 !atomic_read(&mdev->local_cnt));
1439 drbd_resume_io(mdev);
1441 drbd_adm_finish(info, retcode);
1445 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
1447 char hmac_name[CRYPTO_MAX_ALG_NAME];
1448 struct drbd_conf *mdev;
1449 struct net_conf *new_conf = NULL;
1450 struct crypto_hash *tfm = NULL;
1451 struct crypto_hash *integrity_w_tfm = NULL;
1452 struct crypto_hash *integrity_r_tfm = NULL;
1453 void *int_dig_out = NULL;
1454 void *int_dig_in = NULL;
1455 void *int_dig_vv = NULL;
1456 struct drbd_tconn *oconn;
1457 struct drbd_tconn *tconn;
1458 struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
1459 enum drbd_ret_code retcode;
1463 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1464 if (!adm_ctx.reply_skb)
1466 if (retcode != NO_ERROR)
1469 tconn = adm_ctx.tconn;
1470 conn_reconfig_start(tconn);
1472 if (tconn->cstate > C_STANDALONE) {
1473 retcode = ERR_NET_CONFIGURED;
1477 /* allocation not in the IO path, cqueue thread context */
1478 new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
1480 retcode = ERR_NOMEM;
1484 new_conf->timeout = DRBD_TIMEOUT_DEF;
1485 new_conf->try_connect_int = DRBD_CONNECT_INT_DEF;
1486 new_conf->ping_int = DRBD_PING_INT_DEF;
1487 new_conf->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
1488 new_conf->max_buffers = DRBD_MAX_BUFFERS_DEF;
1489 new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
1490 new_conf->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
1491 new_conf->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
1492 new_conf->ko_count = DRBD_KO_COUNT_DEF;
1493 new_conf->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
1494 new_conf->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
1495 new_conf->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
1496 new_conf->want_lose = 0;
1497 new_conf->two_primaries = 0;
1498 new_conf->wire_protocol = DRBD_PROT_C;
1499 new_conf->ping_timeo = DRBD_PING_TIMEO_DEF;
1500 new_conf->rr_conflict = DRBD_RR_CONFLICT_DEF;
1501 new_conf->on_congestion = DRBD_ON_CONGESTION_DEF;
1502 new_conf->cong_extents = DRBD_CONG_EXTENTS_DEF;
1504 err = net_conf_from_attrs(new_conf, info->attrs);
1506 retcode = ERR_MANDATORY_TAG;
1507 drbd_msg_put_info(from_attrs_err_to_txt(err));
1511 if (new_conf->two_primaries
1512 && (new_conf->wire_protocol != DRBD_PROT_C)) {
1513 retcode = ERR_NOT_PROTO_C;
1517 idr_for_each_entry(&tconn->volumes, mdev, i) {
1518 if (get_ldev(mdev)) {
1519 enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
1521 if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
1522 retcode = ERR_STONITH_AND_PROT_A;
1526 if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
1527 retcode = ERR_DISCARD;
1530 if (!mdev->bitmap) {
1531 if(drbd_bm_init(mdev)) {
1532 retcode = ERR_NOMEM;
1538 if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
1539 retcode = ERR_CONG_NOT_PROTO_A;
1545 new_my_addr = (struct sockaddr *)&new_conf->my_addr;
1546 new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
1548 /* No need to take drbd_cfg_mutex here. All reconfiguration is
1549 * strictly serialized on genl_lock(). We are protected against
1550 * concurrent reconfiguration/addition/deletion */
1551 list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
1554 if (get_net_conf(oconn)) {
1555 taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
1556 if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
1557 !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
1558 retcode = ERR_LOCAL_ADDR;
1560 taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
1561 if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
1562 !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
1563 retcode = ERR_PEER_ADDR;
1565 put_net_conf(oconn);
1566 if (retcode != NO_ERROR)
1571 if (new_conf->cram_hmac_alg[0] != 0) {
1572 snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
1573 new_conf->cram_hmac_alg);
1574 tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
1577 retcode = ERR_AUTH_ALG;
1581 if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
1582 retcode = ERR_AUTH_ALG_ND;
1587 if (new_conf->integrity_alg[0]) {
1588 integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1589 if (IS_ERR(integrity_w_tfm)) {
1590 integrity_w_tfm = NULL;
1591 retcode=ERR_INTEGRITY_ALG;
1595 if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
1596 retcode=ERR_INTEGRITY_ALG_ND;
1600 integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
1601 if (IS_ERR(integrity_r_tfm)) {
1602 integrity_r_tfm = NULL;
1603 retcode=ERR_INTEGRITY_ALG;
1608 ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
1610 /* allocation not in the IO path, cqueue thread context */
1611 if (integrity_w_tfm) {
1612 i = crypto_hash_digestsize(integrity_w_tfm);
1613 int_dig_out = kmalloc(i, GFP_KERNEL);
1615 retcode = ERR_NOMEM;
1618 int_dig_in = kmalloc(i, GFP_KERNEL);
1620 retcode = ERR_NOMEM;
1623 int_dig_vv = kmalloc(i, GFP_KERNEL);
1625 retcode = ERR_NOMEM;
1630 conn_flush_workqueue(tconn);
1631 spin_lock_irq(&tconn->req_lock);
1632 if (tconn->net_conf != NULL) {
1633 retcode = ERR_NET_CONFIGURED;
1634 spin_unlock_irq(&tconn->req_lock);
1637 tconn->net_conf = new_conf;
1639 crypto_free_hash(tconn->cram_hmac_tfm);
1640 tconn->cram_hmac_tfm = tfm;
1642 crypto_free_hash(tconn->integrity_w_tfm);
1643 tconn->integrity_w_tfm = integrity_w_tfm;
1645 crypto_free_hash(tconn->integrity_r_tfm);
1646 tconn->integrity_r_tfm = integrity_r_tfm;
1648 kfree(tconn->int_dig_out);
1649 kfree(tconn->int_dig_in);
1650 kfree(tconn->int_dig_vv);
1651 tconn->int_dig_out=int_dig_out;
1652 tconn->int_dig_in=int_dig_in;
1653 tconn->int_dig_vv=int_dig_vv;
1654 retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
1655 spin_unlock_irq(&tconn->req_lock);
1657 idr_for_each_entry(&tconn->volumes, mdev, i) {
1660 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
1662 conn_reconfig_done(tconn);
1663 drbd_adm_finish(info, retcode);
1670 crypto_free_hash(tfm);
1671 crypto_free_hash(integrity_w_tfm);
1672 crypto_free_hash(integrity_r_tfm);
1675 conn_reconfig_done(tconn);
1677 drbd_adm_finish(info, retcode);
1681 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
1683 struct disconnect_parms parms;
1684 struct drbd_tconn *tconn;
1685 enum drbd_ret_code retcode;
1688 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
1689 if (!adm_ctx.reply_skb)
1691 if (retcode != NO_ERROR)
1694 tconn = adm_ctx.tconn;
1695 memset(&parms, 0, sizeof(parms));
1696 if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
1697 err = disconnect_parms_from_attrs(&parms, info->attrs);
1699 retcode = ERR_MANDATORY_TAG;
1700 drbd_msg_put_info(from_attrs_err_to_txt(err));
1705 if (parms.force_disconnect) {
1706 spin_lock_irq(&tconn->req_lock);
1707 if (tconn->cstate >= C_WF_CONNECTION)
1708 _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1709 spin_unlock_irq(&tconn->req_lock);
1713 retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
1715 if (retcode == SS_NOTHING_TO_DO)
1717 else if (retcode == SS_ALREADY_STANDALONE)
1719 else if (retcode == SS_PRIMARY_NOP) {
1720 /* Our state checking code wants to see the peer outdated. */
1721 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1722 pdsk, D_OUTDATED), CS_VERBOSE);
1723 } else if (retcode == SS_CW_FAILED_BY_PEER) {
1724 /* The peer probably wants to see us outdated. */
1725 retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
1726 disk, D_OUTDATED), 0);
1727 if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
1728 conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
1729 retcode = SS_SUCCESS;
1733 if (retcode < SS_SUCCESS)
1736 if (wait_event_interruptible(tconn->ping_wait,
1737 tconn->cstate != C_DISCONNECTING)) {
1738 /* Do not test for mdev->state.conn == C_STANDALONE, since
1739 someone else might connect us in the mean time! */
1747 drbd_adm_finish(info, retcode);
1751 void resync_after_online_grow(struct drbd_conf *mdev)
1753 int iass; /* I am sync source */
1755 dev_info(DEV, "Resync of new storage after online grow\n");
1756 if (mdev->state.role != mdev->state.peer)
1757 iass = (mdev->state.role == R_PRIMARY);
1759 iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
1762 drbd_start_resync(mdev, C_SYNC_SOURCE);
1764 _drbd_request_state(mdev, NS(conn, C_WF_SYNC_UUID), CS_VERBOSE + CS_SERIALIZE);
1767 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
1769 struct resize_parms rs;
1770 struct drbd_conf *mdev;
1771 enum drbd_ret_code retcode;
1772 enum determine_dev_size dd;
1773 enum dds_flags ddsf;
1776 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1777 if (!adm_ctx.reply_skb)
1779 if (retcode != NO_ERROR)
1782 memset(&rs, 0, sizeof(struct resize_parms));
1783 if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
1784 err = resize_parms_from_attrs(&rs, info->attrs);
1786 retcode = ERR_MANDATORY_TAG;
1787 drbd_msg_put_info(from_attrs_err_to_txt(err));
1792 mdev = adm_ctx.mdev;
1793 if (mdev->state.conn > C_CONNECTED) {
1794 retcode = ERR_RESIZE_RESYNC;
1798 if (mdev->state.role == R_SECONDARY &&
1799 mdev->state.peer == R_SECONDARY) {
1800 retcode = ERR_NO_PRIMARY;
1804 if (!get_ldev(mdev)) {
1805 retcode = ERR_NO_DISK;
1809 if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
1810 retcode = ERR_NEED_APV_93;
1814 if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
1815 mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
1817 mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
1818 ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
1819 dd = drbd_determine_dev_size(mdev, ddsf);
1822 if (dd == dev_size_error) {
1823 retcode = ERR_NOMEM_BITMAP;
1827 if (mdev->state.conn == C_CONNECTED) {
1829 set_bit(RESIZE_PENDING, &mdev->flags);
1831 drbd_send_uuids(mdev);
1832 drbd_send_sizes(mdev, 1, ddsf);
1836 drbd_adm_finish(info, retcode);
1840 int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
1842 struct drbd_conf *mdev;
1843 enum drbd_ret_code retcode;
1845 int ovr; /* online verify running */
1846 int rsr; /* re-sync running */
1847 struct crypto_hash *verify_tfm = NULL;
1848 struct crypto_hash *csums_tfm = NULL;
1849 struct syncer_conf sc;
1850 cpumask_var_t new_cpu_mask;
1851 int *rs_plan_s = NULL;
1854 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
1855 if (!adm_ctx.reply_skb)
1857 if (retcode != NO_ERROR)
1859 mdev = adm_ctx.mdev;
1861 if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
1862 retcode = ERR_NOMEM;
1863 drbd_msg_put_info("unable to allocate cpumask");
1867 if (((struct drbd_genlmsghdr*)info->userhdr)->flags
1868 & DRBD_GENL_F_SET_DEFAULTS) {
1869 memset(&sc, 0, sizeof(struct syncer_conf));
1870 sc.rate = DRBD_RATE_DEF;
1871 sc.after = DRBD_AFTER_DEF;
1872 sc.al_extents = DRBD_AL_EXTENTS_DEF;
1873 sc.on_no_data = DRBD_ON_NO_DATA_DEF;
1874 sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
1875 sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
1876 sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
1877 sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
1878 sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
1880 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
1882 err = syncer_conf_from_attrs(&sc, info->attrs);
1884 retcode = ERR_MANDATORY_TAG;
1885 drbd_msg_put_info(from_attrs_err_to_txt(err));
1889 /* re-sync running */
1890 rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
1891 mdev->state.conn == C_SYNC_TARGET ||
1892 mdev->state.conn == C_PAUSED_SYNC_S ||
1893 mdev->state.conn == C_PAUSED_SYNC_T );
1895 if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
1896 retcode = ERR_CSUMS_RESYNC_RUNNING;
1900 if (!rsr && sc.csums_alg[0]) {
1901 csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
1902 if (IS_ERR(csums_tfm)) {
1904 retcode = ERR_CSUMS_ALG;
1908 if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
1909 retcode = ERR_CSUMS_ALG_ND;
1914 /* online verify running */
1915 ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
1918 if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
1919 retcode = ERR_VERIFY_RUNNING;
1924 if (!ovr && sc.verify_alg[0]) {
1925 verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
1926 if (IS_ERR(verify_tfm)) {
1928 retcode = ERR_VERIFY_ALG;
1932 if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
1933 retcode = ERR_VERIFY_ALG_ND;
1938 /* silently ignore cpu mask on UP kernel */
1939 if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
1940 err = __bitmap_parse(sc.cpu_mask, 32, 0,
1941 cpumask_bits(new_cpu_mask), nr_cpu_ids);
1943 dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
1944 retcode = ERR_CPU_MASK_PARSE;
1949 if (!expect(sc.rate >= 1))
1952 /* clip to allowed range */
1953 if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
1954 sc.al_extents = DRBD_AL_EXTENTS_MIN;
1955 if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
1956 sc.al_extents = DRBD_AL_EXTENTS_MAX;
1958 /* most sanity checks done, try to assign the new sync-after
1959 * dependency. need to hold the global lock in there,
1960 * to avoid a race in the dependency loop check. */
1961 retcode = drbd_alter_sa(mdev, sc.after);
1962 if (retcode != NO_ERROR)
1965 fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
1966 if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
1967 rs_plan_s = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
1969 dev_err(DEV, "kmalloc of fifo_buffer failed");
1970 retcode = ERR_NOMEM;
1975 /* ok, assign the rest of it as well.
1976 * lock against receive_SyncParam() */
1977 spin_lock(&mdev->peer_seq_lock);
1978 mdev->sync_conf = sc;
1981 crypto_free_hash(mdev->csums_tfm);
1982 mdev->csums_tfm = csums_tfm;
1987 crypto_free_hash(mdev->verify_tfm);
1988 mdev->verify_tfm = verify_tfm;
1992 if (fifo_size != mdev->rs_plan_s.size) {
1993 kfree(mdev->rs_plan_s.values);
1994 mdev->rs_plan_s.values = rs_plan_s;
1995 mdev->rs_plan_s.size = fifo_size;
1996 mdev->rs_planed = 0;
2000 spin_unlock(&mdev->peer_seq_lock);
2002 if (get_ldev(mdev)) {
2003 wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
2004 drbd_al_shrink(mdev);
2005 err = drbd_check_al_size(mdev);
2006 lc_unlock(mdev->act_log);
2007 wake_up(&mdev->al_wait);
2013 retcode = ERR_NOMEM;
2018 if (mdev->state.conn >= C_CONNECTED)
2019 drbd_send_sync_param(mdev, &sc);
2021 if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
2022 cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
2023 drbd_calc_cpu_mask(mdev->tconn);
2024 mdev->tconn->receiver.reset_cpu_mask = 1;
2025 mdev->tconn->asender.reset_cpu_mask = 1;
2026 mdev->tconn->worker.reset_cpu_mask = 1;
2029 kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
2032 free_cpumask_var(new_cpu_mask);
2033 crypto_free_hash(csums_tfm);
2034 crypto_free_hash(verify_tfm);
2036 drbd_adm_finish(info, retcode);
2040 int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
2042 struct drbd_conf *mdev;
2043 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2045 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2046 if (!adm_ctx.reply_skb)
2048 if (retcode != NO_ERROR)
2051 mdev = adm_ctx.mdev;
2053 /* If there is still bitmap IO pending, probably because of a previous
2054 * resync just being finished, wait for it before requesting a new resync. */
2055 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2057 retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
2059 if (retcode < SS_SUCCESS && retcode != SS_NEED_CONNECTION)
2060 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2062 while (retcode == SS_NEED_CONNECTION) {
2063 spin_lock_irq(&mdev->tconn->req_lock);
2064 if (mdev->state.conn < C_CONNECTED)
2065 retcode = _drbd_set_state(_NS(mdev, disk, D_INCONSISTENT), CS_VERBOSE, NULL);
2066 spin_unlock_irq(&mdev->tconn->req_lock);
2068 if (retcode != SS_NEED_CONNECTION)
2071 retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
2075 drbd_adm_finish(info, retcode);
2079 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
2083 rv = drbd_bmio_set_n_write(mdev);
2084 drbd_suspend_al(mdev);
2088 static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
2089 union drbd_state mask, union drbd_state val)
2091 enum drbd_ret_code retcode;
2093 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2094 if (!adm_ctx.reply_skb)
2096 if (retcode != NO_ERROR)
2099 retcode = drbd_request_state(adm_ctx.mdev, mask, val);
2101 drbd_adm_finish(info, retcode);
2105 int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
2107 return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
2110 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
2112 enum drbd_ret_code retcode;
2114 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2115 if (!adm_ctx.reply_skb)
2117 if (retcode != NO_ERROR)
2120 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 1)) == SS_NOTHING_TO_DO)
2121 retcode = ERR_PAUSE_IS_SET;
2123 drbd_adm_finish(info, retcode);
2127 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
2130 enum drbd_ret_code retcode;
2132 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2133 if (!adm_ctx.reply_skb)
2135 if (retcode != NO_ERROR)
2138 if (drbd_request_state(adm_ctx.mdev, NS(user_isp, 0)) == SS_NOTHING_TO_DO) {
2139 s = adm_ctx.mdev->state;
2140 if (s.conn == C_PAUSED_SYNC_S || s.conn == C_PAUSED_SYNC_T) {
2141 retcode = s.aftr_isp ? ERR_PIC_AFTER_DEP :
2142 s.peer_isp ? ERR_PIC_PEER_DEP : ERR_PAUSE_IS_CLEAR;
2144 retcode = ERR_PAUSE_IS_CLEAR;
2149 drbd_adm_finish(info, retcode);
2153 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info)
2155 return drbd_adm_simple_request_state(skb, info, NS(susp, 1));
2158 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
2160 struct drbd_conf *mdev;
2161 int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2163 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2164 if (!adm_ctx.reply_skb)
2166 if (retcode != NO_ERROR)
2169 mdev = adm_ctx.mdev;
2170 if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
2171 drbd_uuid_new_current(mdev);
2172 clear_bit(NEW_CUR_UUID, &mdev->flags);
2174 drbd_suspend_io(mdev);
2175 retcode = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
2176 if (retcode == SS_SUCCESS) {
2177 if (mdev->state.conn < C_CONNECTED)
2178 tl_clear(mdev->tconn);
2179 if (mdev->state.disk == D_DISKLESS || mdev->state.disk == D_FAILED)
2180 tl_restart(mdev->tconn, FAIL_FROZEN_DISK_IO);
2182 drbd_resume_io(mdev);
2185 drbd_adm_finish(info, retcode);
2189 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
2191 return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
2194 int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
2197 nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
2199 goto nla_put_failure;
2200 if (vnr != VOLUME_UNSPECIFIED)
2201 NLA_PUT_U32(skb, T_ctx_volume, vnr);
2202 NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
2203 nla_nest_end(skb, nla);
2208 nla_nest_cancel(skb, nla);
2212 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
2213 const struct sib_info *sib)
2215 struct state_info *si = NULL; /* for sizeof(si->member); */
2220 int exclude_sensitive;
2222 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2223 * to. So we better exclude_sensitive information.
2225 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2226 * in the context of the requesting user process. Exclude sensitive
2227 * information, unless current has superuser.
2229 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2230 * relies on the current implementation of netlink_dump(), which
2231 * executes the dump callback successively from netlink_recvmsg(),
2232 * always in the context of the receiving process */
2233 exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
2235 got_ldev = get_ldev(mdev);
2236 got_net = get_net_conf(mdev->tconn);
2238 /* We need to add connection name and volume number information still.
2239 * Minor number is in drbd_genlmsghdr. */
2240 if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
2241 goto nla_put_failure;
2244 if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
2245 goto nla_put_failure;
2247 if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
2248 goto nla_put_failure;
2250 if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
2251 goto nla_put_failure;
2253 nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
2255 goto nla_put_failure;
2256 NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
2257 NLA_PUT_U32(skb, T_current_state, mdev->state.i);
2258 NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
2259 NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
2262 NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
2263 NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
2264 NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
2265 NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
2266 if (C_SYNC_SOURCE <= mdev->state.conn &&
2267 C_PAUSED_SYNC_T >= mdev->state.conn) {
2268 NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
2269 NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
2274 switch(sib->sib_reason) {
2275 case SIB_SYNC_PROGRESS:
2276 case SIB_GET_STATUS_REPLY:
2278 case SIB_STATE_CHANGE:
2279 NLA_PUT_U32(skb, T_prev_state, sib->os.i);
2280 NLA_PUT_U32(skb, T_new_state, sib->ns.i);
2282 case SIB_HELPER_POST:
2284 T_helper_exit_code, sib->helper_exit_code);
2286 case SIB_HELPER_PRE:
2287 NLA_PUT_STRING(skb, T_helper, sib->helper_name);
2291 nla_nest_end(skb, nla);
2299 put_net_conf(mdev->tconn);
2303 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info)
2305 enum drbd_ret_code retcode;
2308 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2309 if (!adm_ctx.reply_skb)
2311 if (retcode != NO_ERROR)
2314 err = nla_put_status_info(adm_ctx.reply_skb, adm_ctx.mdev, NULL);
2316 nlmsg_free(adm_ctx.reply_skb);
2320 drbd_adm_finish(info, retcode);
2324 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
2326 struct drbd_conf *mdev;
2327 struct drbd_genlmsghdr *dh;
2328 struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
2329 struct drbd_tconn *tconn = NULL;
2330 struct drbd_tconn *tmp;
2331 unsigned volume = cb->args[1];
2333 /* Open coded, deferred, iteration:
2334 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2335 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2339 * where tconn is cb->args[0];
2340 * and i is cb->args[1];
2342 * This may miss entries inserted after this dump started,
2343 * or entries deleted before they are reached.
2345 * We need to make sure the mdev won't disappear while
2346 * we are looking at it, and revalidate our iterators
2347 * on each iteration.
2350 /* synchronize with drbd_new_tconn/drbd_free_tconn */
2351 mutex_lock(&drbd_cfg_mutex);
2352 /* synchronize with drbd_delete_device */
2355 /* revalidate iterator position */
2356 list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
2358 /* first iteration */
2369 mdev = idr_get_next(&tconn->volumes, &volume);
2371 /* No more volumes to dump on this tconn.
2372 * Advance tconn iterator. */
2373 pos = list_entry(tconn->all_tconn.next,
2374 struct drbd_tconn, all_tconn);
2375 /* But, did we dump any volume on this tconn yet? */
2383 dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
2384 cb->nlh->nlmsg_seq, &drbd_genl_family,
2385 NLM_F_MULTI, DRBD_ADM_GET_STATUS);
2390 /* this is a tconn without a single volume */
2392 dh->ret_code = NO_ERROR;
2393 if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
2394 genlmsg_cancel(skb, dh);
2396 genlmsg_end(skb, dh);
2400 D_ASSERT(mdev->vnr == volume);
2401 D_ASSERT(mdev->tconn == tconn);
2403 dh->minor = mdev_to_minor(mdev);
2404 dh->ret_code = NO_ERROR;
2406 if (nla_put_status_info(skb, mdev, NULL)) {
2407 genlmsg_cancel(skb, dh);
2410 genlmsg_end(skb, dh);
2415 mutex_unlock(&drbd_cfg_mutex);
2416 /* where to start the next iteration */
2417 cb->args[0] = (long)pos;
2418 cb->args[1] = (pos == tconn) ? volume + 1 : 0;
2420 /* No more tconns/volumes/minors found results in an empty skb.
2421 * Which will terminate the dump. */
2425 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
2427 enum drbd_ret_code retcode;
2428 struct timeout_parms tp;
2431 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2432 if (!adm_ctx.reply_skb)
2434 if (retcode != NO_ERROR)
2438 adm_ctx.mdev->state.pdsk == D_OUTDATED ? UT_PEER_OUTDATED :
2439 test_bit(USE_DEGR_WFC_T, &adm_ctx.mdev->flags) ? UT_DEGRADED :
2442 err = timeout_parms_to_priv_skb(adm_ctx.reply_skb, &tp);
2444 nlmsg_free(adm_ctx.reply_skb);
2448 drbd_adm_finish(info, retcode);
2452 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
2454 struct drbd_conf *mdev;
2455 enum drbd_ret_code retcode;
2457 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2458 if (!adm_ctx.reply_skb)
2460 if (retcode != NO_ERROR)
2463 mdev = adm_ctx.mdev;
2464 if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
2465 /* resume from last known position, if possible */
2466 struct start_ov_parms parms =
2467 { .ov_start_sector = mdev->ov_start_sector };
2468 int err = start_ov_parms_from_attrs(&parms, info->attrs);
2470 retcode = ERR_MANDATORY_TAG;
2471 drbd_msg_put_info(from_attrs_err_to_txt(err));
2474 /* w_make_ov_request expects position to be aligned */
2475 mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
2477 /* If there is still bitmap IO pending, e.g. previous resync or verify
2478 * just being finished, wait for it before requesting a new resync. */
2479 wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
2480 retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
2482 drbd_adm_finish(info, retcode);
2487 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
2489 struct drbd_conf *mdev;
2490 enum drbd_ret_code retcode;
2491 int skip_initial_sync = 0;
2493 struct new_c_uuid_parms args;
2495 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2496 if (!adm_ctx.reply_skb)
2498 if (retcode != NO_ERROR)
2501 mdev = adm_ctx.mdev;
2502 memset(&args, 0, sizeof(args));
2503 if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
2504 err = new_c_uuid_parms_from_attrs(&args, info->attrs);
2506 retcode = ERR_MANDATORY_TAG;
2507 drbd_msg_put_info(from_attrs_err_to_txt(err));
2512 mutex_lock(mdev->state_mutex); /* Protects us against serialized state changes. */
2514 if (!get_ldev(mdev)) {
2515 retcode = ERR_NO_DISK;
2519 /* this is "skip initial sync", assume to be clean */
2520 if (mdev->state.conn == C_CONNECTED && mdev->tconn->agreed_pro_version >= 90 &&
2521 mdev->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
2522 dev_info(DEV, "Preparing to skip initial sync\n");
2523 skip_initial_sync = 1;
2524 } else if (mdev->state.conn != C_STANDALONE) {
2525 retcode = ERR_CONNECTED;
2529 drbd_uuid_set(mdev, UI_BITMAP, 0); /* Rotate UI_BITMAP to History 1, etc... */
2530 drbd_uuid_new_current(mdev); /* New current, previous to UI_BITMAP */
2532 if (args.clear_bm) {
2533 err = drbd_bitmap_io(mdev, &drbd_bmio_clear_n_write,
2534 "clear_n_write from new_c_uuid", BM_LOCKED_MASK);
2536 dev_err(DEV, "Writing bitmap failed with %d\n",err);
2537 retcode = ERR_IO_MD_DISK;
2539 if (skip_initial_sync) {
2540 drbd_send_uuids_skip_initial_sync(mdev);
2541 _drbd_uuid_set(mdev, UI_BITMAP, 0);
2542 drbd_print_uuids(mdev, "cleared bitmap UUID");
2543 spin_lock_irq(&mdev->tconn->req_lock);
2544 _drbd_set_state(_NS2(mdev, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
2546 spin_unlock_irq(&mdev->tconn->req_lock);
2554 mutex_unlock(mdev->state_mutex);
2556 drbd_adm_finish(info, retcode);
2560 static enum drbd_ret_code
2561 drbd_check_conn_name(const char *name)
2563 if (!name || !name[0]) {
2564 drbd_msg_put_info("connection name missing");
2565 return ERR_MANDATORY_TAG;
2567 /* if we want to use these in sysfs/configfs/debugfs some day,
2568 * we must not allow slashes */
2569 if (strchr(name, '/')) {
2570 drbd_msg_put_info("invalid connection name");
2571 return ERR_INVALID_REQUEST;
2576 int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
2578 enum drbd_ret_code retcode;
2580 retcode = drbd_adm_prepare(skb, info, 0);
2581 if (!adm_ctx.reply_skb)
2583 if (retcode != NO_ERROR)
2586 retcode = drbd_check_conn_name(adm_ctx.conn_name);
2587 if (retcode != NO_ERROR)
2590 if (adm_ctx.tconn) {
2591 retcode = ERR_INVALID_REQUEST;
2592 drbd_msg_put_info("connection exists");
2596 if (!drbd_new_tconn(adm_ctx.conn_name))
2597 retcode = ERR_NOMEM;
2599 drbd_adm_finish(info, retcode);
2603 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
2605 struct drbd_genlmsghdr *dh = info->userhdr;
2606 enum drbd_ret_code retcode;
2608 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2609 if (!adm_ctx.reply_skb)
2611 if (retcode != NO_ERROR)
2614 /* FIXME drop minor_count parameter, limit to MINORMASK */
2615 if (dh->minor >= minor_count) {
2616 drbd_msg_put_info("requested minor out of range");
2617 retcode = ERR_INVALID_REQUEST;
2620 /* FIXME we need a define here */
2621 if (adm_ctx.volume >= 256) {
2622 drbd_msg_put_info("requested volume id out of range");
2623 retcode = ERR_INVALID_REQUEST;
2627 retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
2629 drbd_adm_finish(info, retcode);
2633 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
2635 struct drbd_conf *mdev;
2636 enum drbd_ret_code retcode;
2638 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
2639 if (!adm_ctx.reply_skb)
2641 if (retcode != NO_ERROR)
2644 mdev = adm_ctx.mdev;
2645 if (mdev->state.disk == D_DISKLESS &&
2646 mdev->state.conn == C_STANDALONE &&
2647 mdev->state.role == R_SECONDARY) {
2648 drbd_delete_device(mdev_to_minor(mdev));
2651 retcode = ERR_MINOR_CONFIGURED;
2653 drbd_adm_finish(info, retcode);
2657 int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
2659 enum drbd_ret_code retcode;
2661 retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
2662 if (!adm_ctx.reply_skb)
2664 if (retcode != NO_ERROR)
2667 if (conn_lowest_minor(adm_ctx.tconn) < 0) {
2668 drbd_free_tconn(adm_ctx.tconn);
2671 retcode = ERR_CONN_IN_USE;
2675 drbd_adm_finish(info, retcode);
2679 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
2681 static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
2682 struct sk_buff *msg;
2683 struct drbd_genlmsghdr *d_out;
2687 seq = atomic_inc_return(&drbd_genl_seq);
2688 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
2693 d_out = genlmsg_put(msg, 0, seq, &drbd_genl_family, 0, DRBD_EVENT);
2694 if (!d_out) /* cannot happen, but anyways. */
2695 goto nla_put_failure;
2696 d_out->minor = mdev_to_minor(mdev);
2697 d_out->ret_code = 0;
2699 if (nla_put_status_info(msg, mdev, sib))
2700 goto nla_put_failure;
2701 genlmsg_end(msg, d_out);
2702 err = drbd_genl_multicast_events(msg, 0);
2703 /* msg has been consumed or freed in netlink_broadcast() */
2704 if (err && err != -ESRCH)
2712 dev_err(DEV, "Error %d while broadcasting event. "
2713 "Event seq:%u sib_reason:%u\n",
2714 err, seq, sib->sib_reason);