int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
#include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
#include <linux/genl_magic_func.h>
/* used blkdev_get_by_path, to claim our meta data device(s) */
#define VOLUME_UNSPECIFIED (-1U)
/* pointer into the request skb,
* limited lifetime! */
- char *conn_name;
+ char *resource_name;
+ struct nlattr *my_addr;
+ struct nlattr *peer_addr;
/* reply buffer */
struct sk_buff *reply_skb;
* If it returns successfully, adm_ctx members are valid.
*/
#define DRBD_ADM_NEED_MINOR 1
-#define DRBD_ADM_NEED_CONN 2
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
unsigned flags)
{
memset(&adm_ctx, 0, sizeof(adm_ctx));
/* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
- if (cmd != DRBD_ADM_GET_STATUS
- && security_netlink_recv(skb, CAP_SYS_ADMIN))
+ if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
return -EPERM;
adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
adm_ctx.reply_dh->minor = d_in->minor;
adm_ctx.reply_dh->ret_code = NO_ERROR;
+ adm_ctx.volume = VOLUME_UNSPECIFIED;
if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
struct nlattr *nla;
/* parse and validate only */
/* and assign stuff to the global adm_ctx */
nla = nested_attr_tb[__nla_type(T_ctx_volume)];
- adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
- nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
if (nla)
- adm_ctx.conn_name = nla_data(nla);
- } else
- adm_ctx.volume = VOLUME_UNSPECIFIED;
+ adm_ctx.volume = nla_get_u32(nla);
+ nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+ if (nla)
+ adm_ctx.resource_name = nla_data(nla);
+ adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+ adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+ if ((adm_ctx.my_addr &&
+ nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+ (adm_ctx.peer_addr &&
+ nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+ err = -EINVAL;
+ goto fail;
+ }
+ }
adm_ctx.minor = d_in->minor;
adm_ctx.mdev = minor_to_mdev(d_in->minor);
- adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
+ adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
drbd_msg_put_info("unknown minor");
return ERR_MINOR_INVALID;
}
- if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
- drbd_msg_put_info("unknown connection");
+ if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("unknown resource");
return ERR_INVALID_REQUEST;
}
+ if (flags & DRBD_ADM_NEED_CONNECTION) {
+ if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+ drbd_msg_put_info("no resource name expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.mdev) {
+ drbd_msg_put_info("no minor number expected");
+ return ERR_INVALID_REQUEST;
+ }
+ if (adm_ctx.my_addr && adm_ctx.peer_addr)
+ adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+ nla_len(adm_ctx.my_addr),
+ nla_data(adm_ctx.peer_addr),
+ nla_len(adm_ctx.peer_addr));
+ if (!adm_ctx.tconn) {
+ drbd_msg_put_info("unknown connection");
+ return ERR_INVALID_REQUEST;
+ }
+ }
+
/* some more paranoia, if the request was over-determined */
if (adm_ctx.mdev && adm_ctx.tconn &&
adm_ctx.mdev->tconn != adm_ctx.tconn) {
- pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
- adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
- drbd_msg_put_info("minor exists in different connection");
+ pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+ adm_ctx.minor, adm_ctx.resource_name,
+ adm_ctx.mdev->tconn->name);
+ drbd_msg_put_info("minor exists in different resource");
return ERR_INVALID_REQUEST;
}
if (adm_ctx.mdev &&
static int drbd_adm_finish(struct genl_info *info, int retcode)
{
- struct nlattr *nla;
- const char *conn_name = NULL;
-
if (adm_ctx.tconn) {
kref_put(&adm_ctx.tconn->kref, &conn_destroy);
adm_ctx.tconn = NULL;
return -ENOMEM;
adm_ctx.reply_dh->ret_code = retcode;
-
- nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
- if (nla) {
- nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
- if (nla)
- conn_name = nla_data(nla);
- }
-
drbd_adm_send_reply(adm_ctx.reply_skb, info);
return 0;
}
static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
{
char *afs;
- struct net_conf *nc;
- rcu_read_lock();
- nc = rcu_dereference(tconn->net_conf);
- if (nc) {
- switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
- case AF_INET6:
- afs = "ipv6";
- snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
- &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
- break;
- case AF_INET:
- afs = "ipv4";
- snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
- break;
- default:
- afs = "ssocks";
- snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
- &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
- }
- snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+ /* FIXME: A future version will not allow this case. */
+ if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+ return;
+
+ switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+ case AF_INET6:
+ afs = "ipv6";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+ &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+ break;
+ case AF_INET:
+ afs = "ipv4";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+ break;
+ default:
+ afs = "ssocks";
+ snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+ &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
}
- rcu_read_unlock();
+ snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
}
int drbd_khelper(struct drbd_conf *mdev, char *cmd)
NULL };
char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL };
+ struct drbd_tconn *tconn = mdev->tconn;
struct sib_info sib;
int ret;
+ if (current == tconn->worker.task)
+ set_bit(CALLBACK_PENDING, &tconn->flags);
+
snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
- setup_khelper_env(mdev->tconn, envp);
+ setup_khelper_env(tconn, envp);
/* The helper may take some time.
* write out any unsynced meta data changes now */
sib.sib_reason = SIB_HELPER_PRE;
sib.helper_name = cmd;
drbd_bcast_event(mdev, &sib);
- ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, mb,
sib.helper_exit_code = ret;
drbd_bcast_event(mdev, &sib);
+ if (current == tconn->worker.task)
+ clear_bit(CALLBACK_PENDING, &tconn->flags);
+
if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0;
return ret;
}
-static void conn_md_sync(struct drbd_tconn *tconn)
-{
- struct drbd_conf *mdev;
- int vnr;
-
- rcu_read_lock();
- idr_for_each_entry(&tconn->volumes, mdev, vnr) {
- kref_get(&mdev->kref);
- rcu_read_unlock();
- drbd_md_sync(mdev);
- kref_put(&mdev->kref, &drbd_minor_destroy);
- rcu_read_lock();
- }
- rcu_read_unlock();
-}
-
int conn_khelper(struct drbd_tconn *tconn, char *cmd)
{
char *envp[] = { "HOME=/",
conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
/* TODO: conn_bcast_event() ?? */
- ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+ ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret)
conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, tconn->name,
here, because we might were able to re-establish the connection in the
meantime. */
spin_lock_irq(&tconn->req_lock);
- if (tconn->cstate < C_WF_REPORT_PARAMS)
+ if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
_conn_request_state(tconn, mask, val, CS_VERBOSE);
spin_unlock_irq(&tconn->req_lock);
/* Wait until nothing is on the fly :) */
wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
+ /* FIXME also wait for all pending P_BARRIER_ACK? */
+
if (new_role == R_SECONDARY) {
set_disk_ro(mdev->vdisk, true);
if (get_ldev(mdev)) {
/* if this was forced, we should consider sync */
if (forced)
drbd_send_uuids(mdev);
- drbd_send_state(mdev);
+ drbd_send_current_state(mdev);
}
drbd_md_sync(mdev);
la_size_changed && md_moved ? "size changed and md moved" :
la_size_changed ? "size changed" : "md moved");
/* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
- err = drbd_bitmap_io(mdev, &drbd_bm_write,
- "size changed", BM_LOCKED_MASK);
+ err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+ "size changed", BM_LOCKED_MASK);
if (err) {
rv = dev_size_error;
goto out;
static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
{
struct request_queue * const q = mdev->rq_queue;
- int max_hw_sectors = max_bio_size >> 9;
- int max_segments = 0;
+ unsigned int max_hw_sectors = max_bio_size >> 9;
+ unsigned int max_segments = 0;
if (get_ldev_if_state(mdev, D_ATTACHING)) {
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
{
- int now, new, local, peer;
+ unsigned int now, new, local, peer;
now = queue_max_hw_sectors(mdev->rq_queue) << 9;
local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
mdev->local_max_bio_size = local;
put_ldev(mdev);
}
+ local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) {
if (mdev->tconn->agreed_pro_version < 94)
- peer = mdev->peer_max_bio_size;
+ peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
else if (mdev->tconn->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET;
- else /* drbd 8.3.8 onwards */
+ else if (mdev->tconn->agreed_pro_version < 100)
+ peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
+ else
peer = DRBD_MAX_BIO_SIZE;
}
- new = min_t(int, local, peer);
+ new = min(local, peer);
if (mdev->state.role == R_PRIMARY && new < now)
- dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
+ dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now)
dev_info(DEV, "max BIO size = %u\n", new);
{
bool stop_threads;
spin_lock_irq(&tconn->req_lock);
- stop_threads = conn_all_vols_unconf(tconn);
+ stop_threads = conn_all_vols_unconf(tconn) &&
+ tconn->cstate == C_STANDALONE;
spin_unlock_irq(&tconn->req_lock);
if (stop_threads) {
/* asender is implicitly stopped by receiver
}
}
+ drbd_suspend_io(mdev);
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
drbd_al_shrink(mdev);
err = drbd_check_al_size(mdev, new_disk_conf);
lc_unlock(mdev->act_log);
wake_up(&mdev->al_wait);
+ drbd_resume_io(mdev);
if (err) {
retcode = ERR_NOMEM;
}
mutex_unlock(&mdev->tconn->conf_update);
+
+ if (new_disk_conf->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ if (new_disk_conf->md_flushes)
+ clear_bit(MD_NO_FUA, &mdev->flags);
+ else
+ set_bit(MD_NO_FUA, &mdev->flags);
+
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
drbd_md_sync(mdev);
if (mdev->state.conn >= C_CONNECTED)
synchronize_rcu();
kfree(old_disk_conf);
kfree(old_plan);
+ mod_timer(&mdev->request_timer, jiffies + HZ);
goto success;
fail_unlock:
union drbd_state ns, os;
enum drbd_state_rv rv;
struct net_conf *nc;
- int cp_discovered = 0;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
* to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
+ /* make sure there is no leftover from previous force-detach attempts */
+ clear_bit(FORCE_DETACH, &mdev->flags);
+ clear_bit(WAS_IO_ERROR, &mdev->flags);
+ clear_bit(WAS_READ_ERROR, &mdev->flags);
+
+ /* and no leftover from previously aborted resync or verify, either */
+ mdev->rs_total = 0;
+ mdev->rs_failed = 0;
+ atomic_set(&mdev->rs_pending_cnt, 0);
+
/* allocation not in the IO path, drbdsetup context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
if (!nbc) {
retcode = ERR_NOMEM;
goto fail;
}
+ spin_lock_init(&nbc->md.uuid_lock);
+
new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
drbd_suspend_io(mdev);
/* also wait for the last barrier ack. */
+ /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+ * We need a way to either ignore barrier acks for barriers sent before a device
+ * was attached, or a way to wait for all pending barrier acks to come in.
+ * As barriers are counted per resource,
+ * we'd need to suspend io on all devices of a resource.
+ */
wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
/* and for any other previously queued work */
drbd_flush_workqueue(mdev);
goto force_diskless_dec;
}
- if (!drbd_al_read_log(mdev, nbc)) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
-
/* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */
if (new_disk_conf->md_flushes)
new_disk_conf = NULL;
new_plan = NULL;
- mdev->write_ordering = WO_bdev_flush;
- drbd_bump_write_ordering(mdev, WO_bdev_flush);
+ drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags);
clear_bit(CRASHED_PRIMARY, &mdev->flags);
if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
- !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
+ !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
set_bit(CRASHED_PRIMARY, &mdev->flags);
- cp_discovered = 1;
- }
mdev->send_cnt = 0;
mdev->recv_cnt = 0;
} else if (dd == grew)
set_bit(RESYNC_AFTER_NEG, &mdev->flags);
- if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+ if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+ (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+ drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
dev_info(DEV, "Assuming that all blocks are out of sync "
"(aka FullSync)\n");
if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
}
}
- if (cp_discovered) {
- drbd_al_apply_to_bm(mdev);
- if (drbd_bitmap_io(mdev, &drbd_bm_write,
- "crashed primary apply AL", BM_LOCKED_MASK)) {
- retcode = ERR_IO_MD_DISK;
- goto force_diskless_dec;
- }
- }
-
if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
drbd_suspend_al(mdev); /* IO is still suspended here... */
if (ns.disk == D_CONSISTENT &&
(ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
ns.disk = D_UP_TO_DATE;
- rcu_read_unlock();
/* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
this point, because drbd_request_state() modifies these
flags. */
+ if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+ mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+ else
+ mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+ rcu_read_unlock();
+
/* In case we are C_CONNECTED postpone any decision on the new disk
state after the negotiation phase. */
if (mdev->state.conn == C_CONNECTED) {
if (rv < SS_SUCCESS)
goto force_diskless_dec;
+ mod_timer(&mdev->request_timer, jiffies + HZ);
+
if (mdev->state.role == R_PRIMARY)
mdev->ldev->md.uuid[UI_CURRENT] |= (u64)1;
else
force_diskless_dec:
put_ldev(mdev);
force_diskless:
- drbd_force_state(mdev, NS(disk, D_FAILED));
+ drbd_force_state(mdev, NS(disk, D_DISKLESS));
drbd_md_sync(mdev);
fail:
conn_reconfig_done(mdev->tconn);
return 0;
}
-static int adm_detach(struct drbd_conf *mdev)
+static int adm_detach(struct drbd_conf *mdev, int force)
{
enum drbd_state_rv retcode;
int ret;
+
+ if (force) {
+ set_bit(FORCE_DETACH, &mdev->flags);
+ drbd_force_state(mdev, NS(disk, D_FAILED));
+ retcode = SS_SUCCESS;
+ goto out;
+ }
+
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+ drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+ drbd_md_put_buffer(mdev);
/* D_FAILED will transition to DISKLESS. */
ret = wait_event_interruptible(mdev->misc_wait,
mdev->state.disk != D_FAILED);
retcode = SS_NOTHING_TO_DO;
if (ret)
retcode = ERR_INTR;
+out:
return retcode;
}
int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
+ struct detach_parms parms = { };
+ int err;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
if (retcode != NO_ERROR)
goto out;
- retcode = adm_detach(adm_ctx.mdev);
+ if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+ err = detach_parms_from_attrs(&parms, info);
+ if (err) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
+ }
+
+ retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
out:
drbd_adm_finish(info, retcode);
return 0;
if (new_conf->two_primaries != old_conf->two_primaries)
return ERR_NEED_APV_100;
- if (!new_conf->integrity_alg != !old_conf->integrity_alg)
- return ERR_NEED_APV_100;
-
if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
return ERR_NEED_APV_100;
}
return ERR_STONITH_AND_PROT_A;
}
if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
- return ERR_DISCARD;
+ return ERR_DISCARD_IMPOSSIBLE;
}
if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
struct crypto_hash *csums_tfm;
struct crypto_hash *cram_hmac_tfm;
struct crypto_hash *integrity_tfm;
- void *int_dig_in;
- void *int_dig_vv;
};
static int
{
char hmac_name[CRYPTO_MAX_ALG_NAME];
enum drbd_ret_code rv;
- int hash_size;
rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
ERR_CSUMS_ALG);
rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
ERR_AUTH_ALG);
}
- if (crypto->integrity_tfm) {
- hash_size = crypto_hash_digestsize(crypto->integrity_tfm);
- crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
- if (!crypto->int_dig_in)
- return ERR_NOMEM;
- crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
- if (!crypto->int_dig_vv)
- return ERR_NOMEM;
- }
return rv;
}
static void free_crypto(struct crypto *crypto)
{
- kfree(crypto->int_dig_in);
- kfree(crypto->int_dig_vv);
crypto_free_hash(crypto->cram_hmac_tfm);
crypto_free_hash(crypto->integrity_tfm);
crypto_free_hash(crypto->csums_tfm);
int rsr; /* re-sync running */
struct crypto crypto = { };
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
crypto.verify_tfm = NULL;
}
- kfree(tconn->int_dig_in);
- tconn->int_dig_in = crypto.int_dig_in;
- kfree(tconn->int_dig_vv);
- tconn->int_dig_vv = crypto.int_dig_vv;
crypto_free_hash(tconn->integrity_tfm);
tconn->integrity_tfm = crypto.integrity_tfm;
if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
struct drbd_conf *mdev;
struct net_conf *old_conf, *new_conf = NULL;
struct crypto crypto = { };
- struct drbd_tconn *oconn;
struct drbd_tconn *tconn;
- struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
enum drbd_ret_code retcode;
int i;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
+ if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+ drbd_msg_put_info("connection endpoint(s) missing");
+ retcode = ERR_INVALID_REQUEST;
+ goto out;
+ }
+
+ /* No need for _rcu here. All reconfiguration is
+ * strictly serialized on genl_lock(). We are protected against
+ * concurrent reconfiguration/addition/deletion */
+ list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+ if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+ !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+ retcode = ERR_LOCAL_ADDR;
+ goto out;
+ }
+
+ if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+ !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+ retcode = ERR_PEER_ADDR;
+ goto out;
+ }
+ }
tconn = adm_ctx.tconn;
conn_reconfig_start(tconn);
goto fail;
}
- /* allocation not in the IO path, cqueue thread context */
+ /* allocation not in the IO path, drbdsetup / netlink process context */
new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
if (!new_conf) {
retcode = ERR_NOMEM;
set_net_conf_defaults(new_conf);
err = net_conf_from_attrs(new_conf, info);
- if (err) {
+ if (err && err != -ENOMSG) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto fail;
if (retcode != NO_ERROR)
goto fail;
- retcode = NO_ERROR;
-
- new_my_addr = (struct sockaddr *)&new_conf->my_addr;
- new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
-
- /* No need for _rcu here. All reconfiguration is
- * strictly serialized on genl_lock(). We are protected against
- * concurrent reconfiguration/addition/deletion */
- list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
- struct net_conf *nc;
- if (oconn == tconn)
- continue;
-
- rcu_read_lock();
- nc = rcu_dereference(oconn->net_conf);
- if (nc) {
- taken_addr = (struct sockaddr *)&nc->my_addr;
- if (new_conf->my_addr_len == nc->my_addr_len &&
- !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
- retcode = ERR_LOCAL_ADDR;
-
- taken_addr = (struct sockaddr *)&nc->peer_addr;
- if (new_conf->peer_addr_len == nc->peer_addr_len &&
- !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
- retcode = ERR_PEER_ADDR;
- }
- rcu_read_unlock();
- if (retcode != NO_ERROR)
- goto fail;
- }
-
retcode = alloc_crypto(&crypto, new_conf);
if (retcode != NO_ERROR)
goto fail;
rcu_assign_pointer(tconn->net_conf, new_conf);
conn_free_crypto(tconn);
- tconn->int_dig_in = crypto.int_dig_in;
- tconn->int_dig_vv = crypto.int_dig_vv;
tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
tconn->integrity_tfm = crypto.integrity_tfm;
tconn->csums_tfm = crypto.csums_tfm;
tconn->verify_tfm = crypto.verify_tfm;
+ tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+ memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+ tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+ memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
+
mutex_unlock(&tconn->conf_update);
rcu_read_lock();
enum drbd_ret_code retcode;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
if (mdev->state.role != mdev->state.peer)
iass = (mdev->state.role == R_PRIMARY);
else
- iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
+ iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
if (iass)
drbd_start_resync(mdev, C_SYNC_SOURCE);
if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93;
- goto fail;
+ goto fail_ldev;
}
rcu_read_lock();
new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
if (!new_disk_conf) {
retcode = ERR_NOMEM;
- goto fail;
+ goto fail_ldev;
}
}
fail:
drbd_adm_finish(info, retcode);
return 0;
-}
-void drbd_set_res_opts_defaults(struct res_opts *r)
-{
- return set_res_opts_defaults(r);
+ fail_ldev:
+ put_ldev(mdev);
+ goto fail;
}
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
- cpumask_var_t new_cpu_mask;
struct drbd_tconn *tconn;
struct res_opts res_opts;
int err;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto fail;
tconn = adm_ctx.tconn;
- if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
- retcode = ERR_NOMEM;
- drbd_msg_put_info("unable to allocate cpumask");
- goto fail;
- }
-
res_opts = tconn->res_opts;
if (should_set_defaults(info))
set_res_opts_defaults(&res_opts);
goto fail;
}
- /* silently ignore cpu mask on UP kernel */
- if (nr_cpu_ids > 1 && res_opts.cpu_mask[0] != 0) {
- err = __bitmap_parse(res_opts.cpu_mask, 32, 0,
- cpumask_bits(new_cpu_mask), nr_cpu_ids);
- if (err) {
- conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
- retcode = ERR_CPU_MASK_PARSE;
- goto fail;
- }
- }
-
-
- tconn->res_opts = res_opts;
-
- if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
- cpumask_copy(tconn->cpu_mask, new_cpu_mask);
- drbd_calc_cpu_mask(tconn);
- tconn->receiver.reset_cpu_mask = 1;
- tconn->asender.reset_cpu_mask = 1;
- tconn->worker.reset_cpu_mask = 1;
+ err = set_resource_options(tconn, &res_opts);
+ if (err) {
+ retcode = ERR_INVALID_REQUEST;
+ if (err == -ENOMEM)
+ retcode = ERR_NOMEM;
}
fail:
- free_cpumask_var(new_cpu_mask);
-
drbd_adm_finish(info, retcode);
return 0;
}
mdev = adm_ctx.mdev;
/* If there is still bitmap IO pending, probably because of a previous
- * resync just being finished, wait for it before requesting a new resync. */
+ * resync just being finished, wait for it before requesting a new resync.
+ * Also wait for it's after_state_ch(). */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ drbd_flush_workqueue(mdev);
retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
}
+ drbd_resume_io(mdev);
out:
drbd_adm_finish(info, retcode);
return 0;
}
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+ union drbd_state mask, union drbd_state val)
+{
+ enum drbd_ret_code retcode;
+
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+ if (!adm_ctx.reply_skb)
+ return retcode;
+ if (retcode != NO_ERROR)
+ goto out;
+
+ retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+ drbd_adm_finish(info, retcode);
+ return 0;
+}
+
static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
{
int rv;
return rv;
}
-static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
- union drbd_state mask, union drbd_state val)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
{
- enum drbd_ret_code retcode;
+ int retcode; /* drbd_ret_code, drbd_state_rv */
+ struct drbd_conf *mdev;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
if (retcode != NO_ERROR)
goto out;
- retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+ mdev = adm_ctx.mdev;
+
+ /* If there is still bitmap IO pending, probably because of a previous
+ * resync just being finished, wait for it before requesting a new resync.
+ * Also wait for it's after_state_ch(). */
+ drbd_suspend_io(mdev);
+ wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+ drbd_flush_workqueue(mdev);
+
+ retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
+ if (retcode < SS_SUCCESS) {
+ if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
+ /* The peer will get a resync upon connect anyways.
+ * Just make that into a full resync. */
+ retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+ if (retcode >= SS_SUCCESS) {
+ if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+ "set_n_write from invalidate_peer",
+ BM_LOCKED_SET_ALLOWED))
+ retcode = ERR_IO_MD_DISK;
+ }
+ } else
+ retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
+ }
+ drbd_resume_io(mdev);
+
out:
drbd_adm_finish(info, retcode);
return 0;
}
-int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
-{
- return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
-}
-
int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
}
-int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
{
struct nlattr *nla;
nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
if (!nla)
goto nla_put_failure;
- if (vnr != VOLUME_UNSPECIFIED)
- NLA_PUT_U32(skb, T_ctx_volume, vnr);
- NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
+ if (vnr != VOLUME_UNSPECIFIED &&
+ nla_put_u32(skb, T_ctx_volume, vnr))
+ goto nla_put_failure;
+ if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+ goto nla_put_failure;
+ if (tconn->my_addr_len &&
+ nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+ goto nla_put_failure;
+ if (tconn->peer_addr_len &&
+ nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+ goto nla_put_failure;
nla_nest_end(skb, nla);
return 0;
/* We need to add connection name and volume number information still.
* Minor number is in drbd_genlmsghdr. */
- if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
+ if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
goto nla_put_failure;
if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
if (!nla)
goto nla_put_failure;
- NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
- NLA_PUT_U32(skb, T_current_state, mdev->state.i);
- NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
- NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
+ if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+ nla_put_u32(skb, T_current_state, mdev->state.i) ||
+ nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+ nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+ nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+ nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+ nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+ nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+ nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+ nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+ nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+ nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+ nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+ goto nla_put_failure;
if (got_ldev) {
- NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
- NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
- NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
- NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
+ int err;
+
+ spin_lock_irq(&mdev->ldev->md.uuid_lock);
+ err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+ spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+ if (err)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+ nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+ nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+ goto nla_put_failure;
if (C_SYNC_SOURCE <= mdev->state.conn &&
C_PAUSED_SYNC_T >= mdev->state.conn) {
- NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
- NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
+ if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+ nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+ goto nla_put_failure;
}
}
case SIB_GET_STATUS_REPLY:
break;
case SIB_STATE_CHANGE:
- NLA_PUT_U32(skb, T_prev_state, sib->os.i);
- NLA_PUT_U32(skb, T_new_state, sib->ns.i);
+ if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+ nla_put_u32(skb, T_new_state, sib->ns.i))
+ goto nla_put_failure;
break;
case SIB_HELPER_POST:
- NLA_PUT_U32(skb,
- T_helper_exit_code, sib->helper_exit_code);
+ if (nla_put_u32(skb, T_helper_exit_code,
+ sib->helper_exit_code))
+ goto nla_put_failure;
/* fall through */
case SIB_HELPER_PRE:
- NLA_PUT_STRING(skb, T_helper, sib->helper_name);
+ if (nla_put_string(skb, T_helper, sib->helper_name))
+ goto nla_put_failure;
break;
}
}
}
}
- dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
+ dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, &drbd_genl_family,
NLM_F_MULTI, DRBD_ADM_GET_STATUS);
if (!dh)
goto out;
if (!mdev) {
- /* this is a tconn without a single volume */
+ /* This is a tconn without a single volume.
+ * Suprisingly enough, it may have a network
+ * configuration. */
+ struct net_conf *nc;
dh->minor = -1U;
dh->ret_code = NO_ERROR;
- if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
- genlmsg_cancel(skb, dh);
- else
- genlmsg_end(skb, dh);
- goto out;
+ if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+ goto cancel;
+ nc = rcu_dereference(tconn->net_conf);
+ if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+ goto cancel;
+ goto done;
}
D_ASSERT(mdev->vnr == volume);
dh->ret_code = NO_ERROR;
if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
genlmsg_cancel(skb, dh);
goto out;
}
+done:
genlmsg_end(skb, dh);
}
{
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
struct nlattr *nla;
- const char *conn_name;
+ const char *resource_name;
struct drbd_tconn *tconn;
+ int maxtype;
/* Is this a followup call? */
if (cb->args[0]) {
/* No explicit context given. Dump all. */
if (!nla)
goto dump;
- nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
+ maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+ nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+ if (IS_ERR(nla))
+ return PTR_ERR(nla);
/* context given, but no name present? */
if (!nla)
return -EINVAL;
- conn_name = nla_data(nla);
- tconn = conn_get_by_name(conn_name);
+ resource_name = nla_data(nla);
+ tconn = conn_get_by_name(resource_name);
if (!tconn)
return -ENODEV;
{
struct drbd_conf *mdev;
enum drbd_ret_code retcode;
+ struct start_ov_parms parms;
retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
if (!adm_ctx.reply_skb)
goto out;
mdev = adm_ctx.mdev;
+
+ /* resume from last known position, if possible */
+ parms.ov_start_sector = mdev->ov_start_sector;
+ parms.ov_stop_sector = ULLONG_MAX;
if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
- /* resume from last known position, if possible */
- struct start_ov_parms parms =
- { .ov_start_sector = mdev->ov_start_sector };
int err = start_ov_parms_from_attrs(&parms, info);
if (err) {
retcode = ERR_MANDATORY_TAG;
drbd_msg_put_info(from_attrs_err_to_txt(err));
goto out;
}
- /* w_make_ov_request expects position to be aligned */
- mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
}
+ /* w_make_ov_request expects position to be aligned */
+ mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+ mdev->ov_stop_sector = parms.ov_stop_sector;
+
/* If there is still bitmap IO pending, e.g. previous resync or verify
* just being finished, wait for it before requesting a new resync. */
+ drbd_suspend_io(mdev);
wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+ drbd_resume_io(mdev);
out:
drbd_adm_finish(info, retcode);
return 0;
}
static enum drbd_ret_code
-drbd_check_conn_name(const char *name)
+drbd_check_resource_name(const char *name)
{
if (!name || !name[0]) {
- drbd_msg_put_info("connection name missing");
+ drbd_msg_put_info("resource name missing");
return ERR_MANDATORY_TAG;
}
/* if we want to use these in sysfs/configfs/debugfs some day,
* we must not allow slashes */
if (strchr(name, '/')) {
- drbd_msg_put_info("invalid connection name");
+ drbd_msg_put_info("invalid resource name");
return ERR_INVALID_REQUEST;
}
return NO_ERROR;
int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
{
enum drbd_ret_code retcode;
+ struct res_opts res_opts;
+ int err;
retcode = drbd_adm_prepare(skb, info, 0);
if (!adm_ctx.reply_skb)
if (retcode != NO_ERROR)
goto out;
- retcode = drbd_check_conn_name(adm_ctx.conn_name);
+ set_res_opts_defaults(&res_opts);
+ err = res_opts_from_attrs(&res_opts, info);
+ if (err && err != -ENOMSG) {
+ retcode = ERR_MANDATORY_TAG;
+ drbd_msg_put_info(from_attrs_err_to_txt(err));
+ goto out;
+ }
+
+ retcode = drbd_check_resource_name(adm_ctx.resource_name);
if (retcode != NO_ERROR)
goto out;
goto out;
}
- if (!conn_create(adm_ctx.conn_name))
+ if (!conn_create(adm_ctx.resource_name, &res_opts))
retcode = ERR_NOMEM;
out:
drbd_adm_finish(info, retcode);
struct drbd_genlmsghdr *dh = info->userhdr;
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
goto out;
- /* FIXME drop minor_count parameter, limit to MINORMASK */
- if (dh->minor >= minor_count) {
+ if (dh->minor > MINORMASK) {
drbd_msg_put_info("requested minor out of range");
retcode = ERR_INVALID_REQUEST;
goto out;
* we may want to delete a minor from a live replication group.
*/
mdev->state.role == R_SECONDARY) {
+ _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+ CS_VERBOSE + CS_WAIT_COMPLETE);
idr_remove(&mdev->tconn->volumes, mdev->vnr);
idr_remove(&minors, mdev_to_minor(mdev));
del_gendisk(mdev->vdisk);
/* detach */
idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
- retcode = adm_detach(mdev);
- if (retcode < SS_SUCCESS) {
+ retcode = adm_detach(mdev, 0);
+ if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info("failed to detach");
goto out;
}
{
enum drbd_ret_code retcode;
- retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+ retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
if (!adm_ctx.reply_skb)
return retcode;
if (retcode != NO_ERROR)
unsigned seq;
int err = -ENOMEM;
+ if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+ if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+ mdev->rs_last_bcast = jiffies;
+ else
+ return;
+ }
+
seq = atomic_inc_return(&drbd_genl_seq);
msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
if (!msg)