Merge branch 'fixes' of git://git.linaro.org/people/rmk/linux-arm
[firefly-linux-kernel-4.4.55.git] / drivers / block / drbd / drbd_nl.c
index f54d512ffce517f2046d03bb064f4943c77aff7a..2af26fc9528083d23cbde63fbe42128a26757599 100644 (file)
 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
 
-int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
 
 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info);
@@ -65,13 +68,14 @@ int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_suspend_io(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_get_status(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
 /* .dumpit */
 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
 
 #include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
 #include <linux/genl_magic_func.h>
 
 /* used blkdev_get_by_path, to claim our meta data device(s) */
@@ -89,7 +93,9 @@ static struct drbd_config_context {
 #define VOLUME_UNSPECIFIED             (-1U)
        /* pointer into the request skb,
         * limited lifetime! */
-       char *conn_name;
+       char *resource_name;
+       struct nlattr *my_addr;
+       struct nlattr *peer_addr;
 
        /* reply buffer */
        struct sk_buff *reply_skb;
@@ -137,7 +143,8 @@ int drbd_msg_put_info(const char *info)
  * If it returns successfully, adm_ctx members are valid.
  */
 #define DRBD_ADM_NEED_MINOR    1
-#define DRBD_ADM_NEED_CONN     2
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
                unsigned flags)
 {
@@ -148,28 +155,32 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
        memset(&adm_ctx, 0, sizeof(adm_ctx));
 
        /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
-       if (cmd != DRBD_ADM_GET_STATUS
-       && security_netlink_recv(skb, CAP_SYS_ADMIN))
+       if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
               return -EPERM;
 
        adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (!adm_ctx.reply_skb)
+       if (!adm_ctx.reply_skb) {
+               err = -ENOMEM;
                goto fail;
+       }
 
        adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
                                        info, &drbd_genl_family, 0, cmd);
        /* put of a few bytes into a fresh skb of >= 4k will always succeed.
         * but anyways */
-       if (!adm_ctx.reply_dh)
+       if (!adm_ctx.reply_dh) {
+               err = -ENOMEM;
                goto fail;
+       }
 
        adm_ctx.reply_dh->minor = d_in->minor;
        adm_ctx.reply_dh->ret_code = NO_ERROR;
 
+       adm_ctx.volume = VOLUME_UNSPECIFIED;
        if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
                struct nlattr *nla;
                /* parse and validate only */
-               err = drbd_cfg_context_from_attrs(NULL, info->attrs);
+               err = drbd_cfg_context_from_attrs(NULL, info);
                if (err)
                        goto fail;
 
@@ -183,109 +194,143 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
 
                /* and assign stuff to the global adm_ctx */
                nla = nested_attr_tb[__nla_type(T_ctx_volume)];
-               adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
-               nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
                if (nla)
-                       adm_ctx.conn_name = nla_data(nla);
-       } else
-               adm_ctx.volume = VOLUME_UNSPECIFIED;
+                       adm_ctx.volume = nla_get_u32(nla);
+               nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+               if (nla)
+                       adm_ctx.resource_name = nla_data(nla);
+               adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+               adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+               if ((adm_ctx.my_addr &&
+                    nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+                   (adm_ctx.peer_addr &&
+                    nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+                       err = -EINVAL;
+                       goto fail;
+               }
+       }
 
        adm_ctx.minor = d_in->minor;
        adm_ctx.mdev = minor_to_mdev(d_in->minor);
-       adm_ctx.tconn = conn_by_name(adm_ctx.conn_name);
+       adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
 
        if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
                drbd_msg_put_info("unknown minor");
                return ERR_MINOR_INVALID;
        }
-       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
-               drbd_msg_put_info("unknown connection");
+       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+               drbd_msg_put_info("unknown resource");
                return ERR_INVALID_REQUEST;
        }
 
+       if (flags & DRBD_ADM_NEED_CONNECTION) {
+               if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+                       drbd_msg_put_info("no resource name expected");
+                       return ERR_INVALID_REQUEST;
+               }
+               if (adm_ctx.mdev) {
+                       drbd_msg_put_info("no minor number expected");
+                       return ERR_INVALID_REQUEST;
+               }
+               if (adm_ctx.my_addr && adm_ctx.peer_addr)
+                       adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+                                                         nla_len(adm_ctx.my_addr),
+                                                         nla_data(adm_ctx.peer_addr),
+                                                         nla_len(adm_ctx.peer_addr));
+               if (!adm_ctx.tconn) {
+                       drbd_msg_put_info("unknown connection");
+                       return ERR_INVALID_REQUEST;
+               }
+       }
+
        /* some more paranoia, if the request was over-determined */
+       if (adm_ctx.mdev && adm_ctx.tconn &&
+           adm_ctx.mdev->tconn != adm_ctx.tconn) {
+               pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+                               adm_ctx.minor, adm_ctx.resource_name,
+                               adm_ctx.mdev->tconn->name);
+               drbd_msg_put_info("minor exists in different resource");
+               return ERR_INVALID_REQUEST;
+       }
        if (adm_ctx.mdev &&
            adm_ctx.volume != VOLUME_UNSPECIFIED &&
            adm_ctx.volume != adm_ctx.mdev->vnr) {
                pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
                                adm_ctx.minor, adm_ctx.volume,
                                adm_ctx.mdev->vnr, adm_ctx.mdev->tconn->name);
-               drbd_msg_put_info("over-determined configuration context mismatch");
-               return ERR_INVALID_REQUEST;
-       }
-       if (adm_ctx.mdev && adm_ctx.tconn &&
-           adm_ctx.mdev->tconn != adm_ctx.tconn) {
-               pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
-                               adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
-               drbd_msg_put_info("over-determined configuration context mismatch");
+               drbd_msg_put_info("minor exists as different volume");
                return ERR_INVALID_REQUEST;
        }
+
        return NO_ERROR;
 
 fail:
        nlmsg_free(adm_ctx.reply_skb);
        adm_ctx.reply_skb = NULL;
-       return -ENOMEM;
+       return err;
 }
 
 static int drbd_adm_finish(struct genl_info *info, int retcode)
 {
-       struct nlattr *nla;
-       const char *conn_name = NULL;
+       if (adm_ctx.tconn) {
+               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+               adm_ctx.tconn = NULL;
+       }
 
        if (!adm_ctx.reply_skb)
                return -ENOMEM;
 
        adm_ctx.reply_dh->ret_code = retcode;
-
-       nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
-       if (nla) {
-               nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
-               if (nla)
-                       conn_name = nla_data(nla);
-       }
-
        drbd_adm_send_reply(adm_ctx.reply_skb, info);
        return 0;
 }
 
+static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
+{
+       char *afs;
+
+       /* FIXME: A future version will not allow this case. */
+       if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+               return;
+
+       switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+       case AF_INET6:
+               afs = "ipv6";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+                        &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+               break;
+       case AF_INET:
+               afs = "ipv4";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+               break;
+       default:
+               afs = "ssocks";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+       }
+       snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+}
+
 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
 {
        char *envp[] = { "HOME=/",
                        "TERM=linux",
                        "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
-                       NULL, /* Will be set to address family */
-                       NULL, /* Will be set to address */
+                        (char[20]) { }, /* address family */
+                        (char[60]) { }, /* address */
                        NULL };
-       char mb[12], af[20], ad[60], *afs;
+       char mb[12];
        char *argv[] = {usermode_helper, cmd, mb, NULL };
+       struct drbd_tconn *tconn = mdev->tconn;
        struct sib_info sib;
        int ret;
 
-       snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
+       if (current == tconn->worker.task)
+               set_bit(CALLBACK_PENDING, &tconn->flags);
 
-       if (get_net_conf(mdev->tconn)) {
-               switch (((struct sockaddr *)mdev->tconn->net_conf->peer_addr)->sa_family) {
-               case AF_INET6:
-                       afs = "ipv6";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI6",
-                                &((struct sockaddr_in6 *)mdev->tconn->net_conf->peer_addr)->sin6_addr);
-                       break;
-               case AF_INET:
-                       afs = "ipv4";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
-                       break;
-               default:
-                       afs = "ssocks";
-                       snprintf(ad, 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)mdev->tconn->net_conf->peer_addr)->sin_addr);
-               }
-               snprintf(af, 20, "DRBD_PEER_AF=%s", afs);
-               envp[3]=af;
-               envp[4]=ad;
-               put_net_conf(mdev->tconn);
-       }
+       snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
+       setup_khelper_env(tconn, envp);
 
        /* The helper may take some time.
         * write out any unsynced meta data changes now */
@@ -295,7 +340,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        sib.sib_reason = SIB_HELPER_PRE;
        sib.helper_name = cmd;
        drbd_bcast_event(mdev, &sib);
-       ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+       ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
        if (ret)
                dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
                                usermode_helper, cmd, mb,
@@ -308,122 +353,172 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        sib.helper_exit_code = ret;
        drbd_bcast_event(mdev, &sib);
 
+       if (current == tconn->worker.task)
+               clear_bit(CALLBACK_PENDING, &tconn->flags);
+
+       if (ret < 0) /* Ignore any ERRNOs we got. */
+               ret = 0;
+
+       return ret;
+}
+
+int conn_khelper(struct drbd_tconn *tconn, char *cmd)
+{
+       char *envp[] = { "HOME=/",
+                       "TERM=linux",
+                       "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+                        (char[20]) { }, /* address family */
+                        (char[60]) { }, /* address */
+                       NULL };
+       char *argv[] = {usermode_helper, cmd, tconn->name, NULL };
+       int ret;
+
+       setup_khelper_env(tconn, envp);
+       conn_md_sync(tconn);
+
+       conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
+       /* TODO: conn_bcast_event() ?? */
+
+       ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
+       if (ret)
+               conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, tconn->name,
+                         (ret >> 8) & 0xff, ret);
+       else
+               conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
+                         usermode_helper, cmd, tconn->name,
+                         (ret >> 8) & 0xff, ret);
+       /* TODO: conn_bcast_event() ?? */
+
        if (ret < 0) /* Ignore any ERRNOs we got. */
                ret = 0;
 
        return ret;
 }
 
-enum drbd_disk_state drbd_try_outdate_peer(struct drbd_conf *mdev)
+static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
+{
+       enum drbd_fencing_p fp = FP_NOT_AVAIL;
+       struct drbd_conf *mdev;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (get_ldev_if_state(mdev, D_CONSISTENT)) {
+                       fp = max_t(enum drbd_fencing_p, fp,
+                                  rcu_dereference(mdev->ldev->disk_conf)->fencing);
+                       put_ldev(mdev);
+               }
+       }
+       rcu_read_unlock();
+
+       return fp;
+}
+
+bool conn_try_outdate_peer(struct drbd_tconn *tconn)
 {
+       union drbd_state mask = { };
+       union drbd_state val = { };
+       enum drbd_fencing_p fp;
        char *ex_to_string;
        int r;
-       enum drbd_disk_state nps;
-       enum drbd_fencing_p fp;
 
-       D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
+       if (tconn->cstate >= C_WF_REPORT_PARAMS) {
+               conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n");
+               return false;
+       }
 
-       if (get_ldev_if_state(mdev, D_CONSISTENT)) {
-               fp = mdev->ldev->dc.fencing;
-               put_ldev(mdev);
-       } else {
-               dev_warn(DEV, "Not fencing peer, I'm not even Consistent myself.\n");
-               nps = mdev->state.pdsk;
+       fp = highest_fencing_policy(tconn);
+       switch (fp) {
+       case FP_NOT_AVAIL:
+               conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n");
                goto out;
+       case FP_DONT_CARE:
+               return true;
+       default: ;
        }
 
-       r = drbd_khelper(mdev, "fence-peer");
+       r = conn_khelper(tconn, "fence-peer");
 
        switch ((r>>8) & 0xff) {
        case 3: /* peer is inconsistent */
                ex_to_string = "peer is inconsistent or worse";
-               nps = D_INCONSISTENT;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_INCONSISTENT;
                break;
        case 4: /* peer got outdated, or was already outdated */
                ex_to_string = "peer was fenced";
-               nps = D_OUTDATED;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_OUTDATED;
                break;
        case 5: /* peer was down */
-               if (mdev->state.disk == D_UP_TO_DATE) {
+               if (conn_highest_disk(tconn) == D_UP_TO_DATE) {
                        /* we will(have) create(d) a new UUID anyways... */
                        ex_to_string = "peer is unreachable, assumed to be dead";
-                       nps = D_OUTDATED;
+                       mask.pdsk = D_MASK;
+                       val.pdsk = D_OUTDATED;
                } else {
                        ex_to_string = "peer unreachable, doing nothing since disk != UpToDate";
-                       nps = mdev->state.pdsk;
                }
                break;
        case 6: /* Peer is primary, voluntarily outdate myself.
                 * This is useful when an unconnected R_SECONDARY is asked to
                 * become R_PRIMARY, but finds the other peer being active. */
                ex_to_string = "peer is active";
-               dev_warn(DEV, "Peer is primary, outdating myself.\n");
-               nps = D_UNKNOWN;
-               _drbd_request_state(mdev, NS(disk, D_OUTDATED), CS_WAIT_COMPLETE);
+               conn_warn(tconn, "Peer is primary, outdating myself.\n");
+               mask.disk = D_MASK;
+               val.disk = D_OUTDATED;
                break;
        case 7:
                if (fp != FP_STONITH)
-                       dev_err(DEV, "fence-peer() = 7 && fencing != Stonith !!!\n");
+                       conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n");
                ex_to_string = "peer was stonithed";
-               nps = D_OUTDATED;
+               mask.pdsk = D_MASK;
+               val.pdsk = D_OUTDATED;
                break;
        default:
                /* The script is broken ... */
-               nps = D_UNKNOWN;
-               dev_err(DEV, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
-               return nps;
+               conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
+               return false; /* Eventually leave IO frozen */
        }
 
-       dev_info(DEV, "fence-peer helper returned %d (%s)\n",
-                       (r>>8) & 0xff, ex_to_string);
+       conn_info(tconn, "fence-peer helper returned %d (%s)\n",
+                 (r>>8) & 0xff, ex_to_string);
 
-out:
-       if (mdev->state.susp_fen && nps >= D_UNKNOWN) {
-               /* The handler was not successful... unfreeze here, the
-                  state engine can not unfreeze... */
-               _drbd_request_state(mdev, NS(susp_fen, 0), CS_VERBOSE);
-       }
+ out:
+
+       /* Not using
+          conn_request_state(tconn, mask, val, CS_VERBOSE);
+          here, because we might were able to re-establish the connection in the
+          meantime. */
+       spin_lock_irq(&tconn->req_lock);
+       if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
+               _conn_request_state(tconn, mask, val, CS_VERBOSE);
+       spin_unlock_irq(&tconn->req_lock);
 
-       return nps;
+       return conn_highest_pdsk(tconn) <= D_OUTDATED;
 }
 
 static int _try_outdate_peer_async(void *data)
 {
-       struct drbd_conf *mdev = (struct drbd_conf *)data;
-       enum drbd_disk_state nps;
-       union drbd_state ns;
+       struct drbd_tconn *tconn = (struct drbd_tconn *)data;
 
-       nps = drbd_try_outdate_peer(mdev);
-
-       /* Not using
-          drbd_request_state(mdev, NS(pdsk, nps));
-          here, because we might were able to re-establish the connection
-          in the meantime. This can only partially be solved in the state's
-          engine is_valid_state() and is_valid_state_transition()
-          functions.
-
-          nps can be D_INCONSISTENT, D_OUTDATED or D_UNKNOWN.
-          pdsk == D_INCONSISTENT while conn >= C_CONNECTED is valid,
-          therefore we have to have the pre state change check here.
-       */
-       spin_lock_irq(&mdev->tconn->req_lock);
-       ns = mdev->state;
-       if (ns.conn < C_WF_REPORT_PARAMS) {
-               ns.pdsk = nps;
-               _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
-       }
-       spin_unlock_irq(&mdev->tconn->req_lock);
+       conn_try_outdate_peer(tconn);
 
+       kref_put(&tconn->kref, &conn_destroy);
        return 0;
 }
 
-void drbd_try_outdate_peer_async(struct drbd_conf *mdev)
+void conn_try_outdate_peer_async(struct drbd_tconn *tconn)
 {
        struct task_struct *opa;
 
-       opa = kthread_run(_try_outdate_peer_async, mdev, "drbd%d_a_helper", mdev_to_minor(mdev));
-       if (IS_ERR(opa))
-               dev_err(DEV, "out of mem, failed to invoke fence-peer helper\n");
+       kref_get(&tconn->kref);
+       opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h");
+       if (IS_ERR(opa)) {
+               conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n");
+               kref_put(&tconn->kref, &conn_destroy);
+       }
 }
 
 enum drbd_state_rv
@@ -431,10 +526,10 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
 {
        const int max_tries = 4;
        enum drbd_state_rv rv = SS_UNKNOWN_ERROR;
+       struct net_conf *nc;
        int try = 0;
        int forced = 0;
        union drbd_state mask, val;
-       enum drbd_disk_state nps;
 
        if (new_role == R_PRIMARY)
                request_ping(mdev->tconn); /* Detect a dead peer ASAP */
@@ -467,38 +562,34 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                if (rv == SS_NO_UP_TO_DATE_DISK &&
                    mdev->state.disk == D_CONSISTENT && mask.pdsk == 0) {
                        D_ASSERT(mdev->state.pdsk == D_UNKNOWN);
-                       nps = drbd_try_outdate_peer(mdev);
 
-                       if (nps == D_OUTDATED || nps == D_INCONSISTENT) {
+                       if (conn_try_outdate_peer(mdev->tconn)) {
                                val.disk = D_UP_TO_DATE;
                                mask.disk = D_MASK;
                        }
-
-                       val.pdsk = nps;
-                       mask.pdsk = D_MASK;
-
                        continue;
                }
 
                if (rv == SS_NOTHING_TO_DO)
                        goto out;
                if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
-                       nps = drbd_try_outdate_peer(mdev);
-
-                       if (force && nps > D_OUTDATED) {
+                       if (!conn_try_outdate_peer(mdev->tconn) && force) {
                                dev_warn(DEV, "Forced into split brain situation!\n");
-                               nps = D_OUTDATED;
-                       }
-
-                       mask.pdsk = D_MASK;
-                       val.pdsk  = nps;
+                               mask.pdsk = D_MASK;
+                               val.pdsk  = D_OUTDATED;
 
+                       }
                        continue;
                }
                if (rv == SS_TWO_PRIMARIES) {
                        /* Maybe the peer is detected as dead very soon...
                           retry at most once more in this case. */
-                       schedule_timeout_interruptible((mdev->tconn->net_conf->ping_timeo+1)*HZ/10);
+                       int timeo;
+                       rcu_read_lock();
+                       nc = rcu_dereference(mdev->tconn->net_conf);
+                       timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
+                       rcu_read_unlock();
+                       schedule_timeout_interruptible(timeo);
                        if (try < max_tries)
                                try = max_tries - 1;
                        continue;
@@ -521,6 +612,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        /* Wait until nothing is on the fly :) */
        wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
 
+       /* FIXME also wait for all pending P_BARRIER_ACK? */
+
        if (new_role == R_SECONDARY) {
                set_disk_ro(mdev->vdisk, true);
                if (get_ldev(mdev)) {
@@ -528,10 +621,12 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        put_ldev(mdev);
                }
        } else {
-               if (get_net_conf(mdev->tconn)) {
-                       mdev->tconn->net_conf->want_lose = 0;
-                       put_net_conf(mdev->tconn);
-               }
+               mutex_lock(&mdev->tconn->conf_update);
+               nc = mdev->tconn->net_conf;
+               if (nc)
+                       nc->discard_my_data = 0; /* without copy; single bit op is atomic */
+               mutex_unlock(&mdev->tconn->conf_update);
+
                set_disk_ro(mdev->vdisk, false);
                if (get_ldev(mdev)) {
                        if (((mdev->state.conn < C_CONNECTED ||
@@ -551,7 +646,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                /* if this was forced, we should consider sync */
                if (forced)
                        drbd_send_uuids(mdev);
-               drbd_send_state(mdev);
+               drbd_send_current_state(mdev);
        }
 
        drbd_md_sync(mdev);
@@ -566,6 +661,7 @@ static const char *from_attrs_err_to_txt(int err)
 {
        return  err == -ENOMSG ? "required attribute missing" :
                err == -EOPNOTSUPP ? "unknown mandatory attribute" :
+               err == -EEXIST ? "can not change invariant setting" :
                "invalid attribute value";
 }
 
@@ -583,7 +679,7 @@ int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info)
 
        memset(&parms, 0, sizeof(parms));
        if (info->attrs[DRBD_NLA_SET_ROLE_PARMS]) {
-               err = set_role_parms_from_attrs(&parms, info->attrs);
+               err = set_role_parms_from_attrs(&parms, info);
                if (err) {
                        retcode = ERR_MANDATORY_TAG;
                        drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -606,7 +702,12 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
                                       struct drbd_backing_dev *bdev)
 {
        sector_t md_size_sect = 0;
-       switch (bdev->dc.meta_dev_idx) {
+       int meta_dev_idx;
+
+       rcu_read_lock();
+       meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+
+       switch (meta_dev_idx) {
        default:
                /* v07 style fixed size indexed meta data */
                bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -641,6 +742,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
                bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
                break;
        }
+       rcu_read_unlock();
 }
 
 /* input size is expected to be in KB */
@@ -682,7 +784,7 @@ char *ppsize(char *buf, unsigned long long size)
 void drbd_suspend_io(struct drbd_conf *mdev)
 {
        set_bit(SUSPEND_IO, &mdev->flags);
-       if (is_susp(mdev->state))
+       if (drbd_suspended(mdev))
                return;
        wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_bio_cnt));
 }
@@ -703,7 +805,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
 {
        sector_t prev_first_sect, prev_size; /* previous meta location */
-       sector_t la_size;
+       sector_t la_size, u_size;
        sector_t size;
        char ppb[10];
 
@@ -731,7 +833,10 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
        /* TODO: should only be some assert here, not (re)init... */
        drbd_md_set_sector_offsets(mdev, mdev->ldev);
 
-       size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
+       rcu_read_lock();
+       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       rcu_read_unlock();
+       size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
 
        if (drbd_get_capacity(mdev->this_bdev) != size ||
            drbd_bm_capacity(mdev) != size) {
@@ -772,8 +877,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
                         la_size_changed && md_moved ? "size changed and md moved" :
                         la_size_changed ? "size changed" : "md moved");
                /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
-               err = drbd_bitmap_io(mdev, &drbd_bm_write,
-                               "size changed", BM_LOCKED_MASK);
+               err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+                                    "size changed", BM_LOCKED_MASK);
                if (err) {
                        rv = dev_size_error;
                        goto out;
@@ -794,12 +899,12 @@ out:
 }
 
 sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+                 sector_t u_size, int assume_peer_has_space)
 {
        sector_t p_size = mdev->p_size;   /* partner's disk size. */
        sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
        sector_t m_size; /* my size */
-       sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
        sector_t size = 0;
 
        m_size = drbd_get_max_capacity(bdev);
@@ -848,24 +953,21 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int ass
  * failed, and 0 on success. You should call drbd_md_sync() after you called
  * this function.
  */
-static int drbd_check_al_size(struct drbd_conf *mdev)
+static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
 {
        struct lru_cache *n, *t;
        struct lc_element *e;
        unsigned int in_use;
        int i;
 
-       if (!expect(mdev->sync_conf.al_extents >= DRBD_AL_EXTENTS_MIN))
-               mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_MIN;
-
        if (mdev->act_log &&
-           mdev->act_log->nr_elements == mdev->sync_conf.al_extents)
+           mdev->act_log->nr_elements == dc->al_extents)
                return 0;
 
        in_use = 0;
        t = mdev->act_log;
        n = lc_create("act_log", drbd_al_ext_cache, AL_UPDATES_PER_TRANSACTION,
-               mdev->sync_conf.al_extents, sizeof(struct lc_element), 0);
+               dc->al_extents, sizeof(struct lc_element), 0);
 
        if (n == NULL) {
                dev_err(DEV, "Cannot allocate act_log lru!\n");
@@ -899,14 +1001,16 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
 {
        struct request_queue * const q = mdev->rq_queue;
-       int max_hw_sectors = max_bio_size >> 9;
-       int max_segments = 0;
+       unsigned int max_hw_sectors = max_bio_size >> 9;
+       unsigned int max_segments = 0;
 
        if (get_ldev_if_state(mdev, D_ATTACHING)) {
                struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
 
                max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
-               max_segments = mdev->ldev->dc.max_bio_bvecs;
+               rcu_read_lock();
+               max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+               rcu_read_unlock();
                put_ldev(mdev);
        }
 
@@ -933,7 +1037,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
 
 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
 {
-       int now, new, local, peer;
+       unsigned int now, new, local, peer;
 
        now = queue_max_hw_sectors(mdev->rq_queue) << 9;
        local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -944,23 +1048,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
                mdev->local_max_bio_size = local;
                put_ldev(mdev);
        }
+       local = min(local, DRBD_MAX_BIO_SIZE);
 
        /* We may ignore peer limits if the peer is modern enough.
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
        if (mdev->state.conn >= C_CONNECTED) {
                if (mdev->tconn->agreed_pro_version < 94)
-                       peer = mdev->peer_max_bio_size;
+                       peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+                       /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
                else if (mdev->tconn->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
-               else /* drbd 8.3.8 onwards */
+               else if (mdev->tconn->agreed_pro_version < 100)
+                       peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
+               else
                        peer = DRBD_MAX_BIO_SIZE;
        }
 
-       new = min_t(int, local, peer);
+       new = min(local, peer);
 
        if (mdev->state.role == R_PRIMARY && new < now)
-               dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
+               dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
 
        if (new != now)
                dev_info(DEV, "max BIO size = %u\n", new);
@@ -968,34 +1076,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
        drbd_setup_queue_param(mdev, new);
 }
 
-/* serialize deconfig (worker exiting, doing cleanup)
- * and reconfig (drbdsetup disk, drbdsetup net)
- *
- * Wait for a potentially exiting worker, then restart it,
- * or start a new one.  Flush any pending work, there may still be an
- * after_state_change queued.
- */
+/* Starts the worker thread */
 static void conn_reconfig_start(struct drbd_tconn *tconn)
 {
-       wait_event(tconn->ping_wait, !test_and_set_bit(CONFIG_PENDING, &tconn->flags));
-       wait_event(tconn->ping_wait, !test_bit(OBJECT_DYING, &tconn->flags));
        drbd_thread_start(&tconn->worker);
        conn_flush_workqueue(tconn);
 }
 
-/* if still unconfigured, stops worker again.
- * if configured now, clears CONFIG_PENDING.
- * wakes potential waiters */
+/* if still unconfigured, stops worker again. */
 static void conn_reconfig_done(struct drbd_tconn *tconn)
 {
+       bool stop_threads;
        spin_lock_irq(&tconn->req_lock);
-       if (conn_all_vols_unconf(tconn)) {
-               set_bit(OBJECT_DYING, &tconn->flags);
-               drbd_thread_stop_nowait(&tconn->worker);
-       } else
-               clear_bit(CONFIG_PENDING, &tconn->flags);
+       stop_threads = conn_all_vols_unconf(tconn) &&
+               tconn->cstate == C_STANDALONE;
        spin_unlock_irq(&tconn->req_lock);
-       wake_up(&tconn->ping_wait);
+       if (stop_threads) {
+               /* asender is implicitly stopped by receiver
+                * in conn_disconnect() */
+               drbd_thread_stop(&tconn->receiver);
+               drbd_thread_stop(&tconn->worker);
+       }
 }
 
 /* Make sure IO is suspended before calling this function(). */
@@ -1019,6 +1120,146 @@ static void drbd_suspend_al(struct drbd_conf *mdev)
                dev_info(DEV, "Suspended AL updates\n");
 }
 
+
+static bool should_set_defaults(struct genl_info *info)
+{
+       unsigned flags = ((struct drbd_genlmsghdr*)info->userhdr)->flags;
+       return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
+}
+
+static void enforce_disk_conf_limits(struct disk_conf *dc)
+{
+       if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
+               dc->al_extents = DRBD_AL_EXTENTS_MIN;
+       if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
+               dc->al_extents = DRBD_AL_EXTENTS_MAX;
+
+       if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+               dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+}
+
+int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct drbd_conf *mdev;
+       struct disk_conf *new_disk_conf, *old_disk_conf;
+       struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
+       int err, fifo_size;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       mdev = adm_ctx.mdev;
+
+       /* we also need a disk
+        * to change the options on */
+       if (!get_ldev(mdev)) {
+               retcode = ERR_NO_DISK;
+               goto out;
+       }
+
+       new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+       if (!new_disk_conf) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+
+       mutex_lock(&mdev->tconn->conf_update);
+       old_disk_conf = mdev->ldev->disk_conf;
+       *new_disk_conf = *old_disk_conf;
+       if (should_set_defaults(info))
+               set_disk_conf_defaults(new_disk_conf);
+
+       err = disk_conf_from_attrs_for_change(new_disk_conf, info);
+       if (err && err != -ENOMSG) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+       }
+
+       if (!expect(new_disk_conf->resync_rate >= 1))
+               new_disk_conf->resync_rate = 1;
+
+       enforce_disk_conf_limits(new_disk_conf);
+
+       fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+       if (fifo_size != mdev->rs_plan_s->size) {
+               new_plan = fifo_alloc(fifo_size);
+               if (!new_plan) {
+                       dev_err(DEV, "kmalloc of fifo_buffer failed");
+                       retcode = ERR_NOMEM;
+                       goto fail_unlock;
+               }
+       }
+
+       drbd_suspend_io(mdev);
+       wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
+       drbd_al_shrink(mdev);
+       err = drbd_check_al_size(mdev, new_disk_conf);
+       lc_unlock(mdev->act_log);
+       wake_up(&mdev->al_wait);
+       drbd_resume_io(mdev);
+
+       if (err) {
+               retcode = ERR_NOMEM;
+               goto fail_unlock;
+       }
+
+       write_lock_irq(&global_state_lock);
+       retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+       if (retcode == NO_ERROR) {
+               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+               drbd_resync_after_changed(mdev);
+       }
+       write_unlock_irq(&global_state_lock);
+
+       if (retcode != NO_ERROR)
+               goto fail_unlock;
+
+       if (new_plan) {
+               old_plan = mdev->rs_plan_s;
+               rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+       }
+
+       mutex_unlock(&mdev->tconn->conf_update);
+
+       if (new_disk_conf->al_updates)
+               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+       else
+               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+       if (new_disk_conf->md_flushes)
+               clear_bit(MD_NO_FUA, &mdev->flags);
+       else
+               set_bit(MD_NO_FUA, &mdev->flags);
+
+       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
+       drbd_md_sync(mdev);
+
+       if (mdev->state.conn >= C_CONNECTED)
+               drbd_send_sync_param(mdev);
+
+       synchronize_rcu();
+       kfree(old_disk_conf);
+       kfree(old_plan);
+       mod_timer(&mdev->request_timer, jiffies + HZ);
+       goto success;
+
+fail_unlock:
+       mutex_unlock(&mdev->tconn->conf_update);
+ fail:
+       kfree(new_disk_conf);
+       kfree(new_plan);
+success:
+       put_ldev(mdev);
+ out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
 int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
 {
        struct drbd_conf *mdev;
@@ -1028,17 +1269,19 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        sector_t max_possible_sectors;
        sector_t min_md_device_sectors;
        struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+       struct disk_conf *new_disk_conf = NULL;
        struct block_device *bdev;
        struct lru_cache *resync_lru = NULL;
+       struct fifo_buffer *new_plan = NULL;
        union drbd_state ns, os;
        enum drbd_state_rv rv;
-       int cp_discovered = 0;
+       struct net_conf *nc;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
-               goto fail;
+               goto finish;
 
        mdev = adm_ctx.mdev;
        conn_reconfig_start(mdev->tconn);
@@ -1054,43 +1297,67 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * to realize a "hot spare" feature (not that I'd recommend that) */
        wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
+       /* make sure there is no leftover from previous force-detach attempts */
+       clear_bit(FORCE_DETACH, &mdev->flags);
+       clear_bit(WAS_IO_ERROR, &mdev->flags);
+       clear_bit(WAS_READ_ERROR, &mdev->flags);
+
+       /* and no leftover from previously aborted resync or verify, either */
+       mdev->rs_total = 0;
+       mdev->rs_failed = 0;
+       atomic_set(&mdev->rs_pending_cnt, 0);
+
        /* allocation not in the IO path, drbdsetup context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
        if (!nbc) {
                retcode = ERR_NOMEM;
                goto fail;
        }
+       spin_lock_init(&nbc->md.uuid_lock);
 
-       nbc->dc.disk_size     = DRBD_DISK_SIZE_SECT_DEF;
-       nbc->dc.on_io_error   = DRBD_ON_IO_ERROR_DEF;
-       nbc->dc.fencing       = DRBD_FENCING_DEF;
-       nbc->dc.max_bio_bvecs = DRBD_MAX_BIO_BVECS_DEF;
+       new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+       if (!new_disk_conf) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+       nbc->disk_conf = new_disk_conf;
 
-       err = disk_conf_from_attrs(&nbc->dc, info->attrs);
+       set_disk_conf_defaults(new_disk_conf);
+       err = disk_conf_from_attrs(new_disk_conf, info);
        if (err) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+       enforce_disk_conf_limits(new_disk_conf);
+
+       new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
+       if (!new_plan) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+
+       if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
                retcode = ERR_MD_IDX_INVALID;
                goto fail;
        }
 
-       if (get_net_conf(mdev->tconn)) {
-               int prot = mdev->tconn->net_conf->wire_protocol;
-               put_net_conf(mdev->tconn);
-               if (nbc->dc.fencing == FP_STONITH && prot == DRBD_PROT_A) {
+       rcu_read_lock();
+       nc = rcu_dereference(mdev->tconn->net_conf);
+       if (nc) {
+               if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+                       rcu_read_unlock();
                        retcode = ERR_STONITH_AND_PROT_A;
                        goto fail;
                }
        }
+       rcu_read_unlock();
 
-       bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+       bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_DISK;
                goto fail;
@@ -1105,12 +1372,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * should check it for you already; but if you don't, or
         * someone fooled it, we need to double check here)
         */
-       bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+       bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-                                 ((int)nbc->dc.meta_dev_idx < 0) ?
+                                 (new_disk_conf->meta_dev_idx < 0) ?
                                  (void *)mdev : (void *)drbd_m_holder);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_MD_DISK;
                goto fail;
@@ -1118,8 +1385,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        nbc->md_bdev = bdev;
 
        if ((nbc->backing_bdev == nbc->md_bdev) !=
-           (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
-            nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+           (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+            new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
                retcode = ERR_MD_IDX_INVALID;
                goto fail;
        }
@@ -1135,25 +1402,25 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
        drbd_md_set_sector_offsets(mdev, nbc);
 
-       if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+       if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
                dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
                        (unsigned long long) drbd_get_max_capacity(nbc),
-                       (unsigned long long) nbc->dc.disk_size);
-               retcode = ERR_DISK_TO_SMALL;
+                       (unsigned long long) new_disk_conf->disk_size);
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
-       if ((int)nbc->dc.meta_dev_idx < 0) {
+       if (new_disk_conf->meta_dev_idx < 0) {
                max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
                /* at least one MB, otherwise it does not make sense */
                min_md_device_sectors = (2<<10);
        } else {
                max_possible_sectors = DRBD_MAX_SECTORS;
-               min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+               min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
        }
 
        if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
-               retcode = ERR_MD_DISK_TO_SMALL;
+               retcode = ERR_MD_DISK_TOO_SMALL;
                dev_warn(DEV, "refusing attach: md-device too small, "
                     "at least %llu sectors needed for this meta-disk type\n",
                     (unsigned long long) min_md_device_sectors);
@@ -1164,7 +1431,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * (we may currently be R_PRIMARY with no local disk...) */
        if (drbd_get_max_capacity(nbc) <
            drbd_get_capacity(mdev->this_bdev)) {
-               retcode = ERR_DISK_TO_SMALL;
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
@@ -1174,14 +1441,20 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                dev_warn(DEV, "==> truncating very big lower level device "
                        "to currently maximum possible %llu sectors <==\n",
                        (unsigned long long) max_possible_sectors);
-               if ((int)nbc->dc.meta_dev_idx >= 0)
+               if (new_disk_conf->meta_dev_idx >= 0)
                        dev_warn(DEV, "==>> using internal or flexible "
                                      "meta data may help <<==\n");
        }
 
        drbd_suspend_io(mdev);
        /* also wait for the last barrier ack. */
-       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || is_susp(mdev->state));
+       /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+        * We need a way to either ignore barrier acks for barriers sent before a device
+        * was attached, or a way to wait for all pending barrier acks to come in.
+        * As barriers are counted per resource,
+        * we'd need to suspend io on all devices of a resource.
+        */
+       wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
        /* and for any other previously queued work */
        drbd_flush_workqueue(mdev);
 
@@ -1217,30 +1490,25 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        }
 
        /* Since we are diskless, fix the activity log first... */
-       if (drbd_check_al_size(mdev)) {
+       if (drbd_check_al_size(mdev, new_disk_conf)) {
                retcode = ERR_NOMEM;
                goto force_diskless_dec;
        }
 
        /* Prevent shrinking of consistent devices ! */
        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
-           drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
+           drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
                dev_warn(DEV, "refusing to truncate a consistent device\n");
-               retcode = ERR_DISK_TO_SMALL;
-               goto force_diskless_dec;
-       }
-
-       if (!drbd_al_read_log(mdev, nbc)) {
-               retcode = ERR_IO_MD_DISK;
+               retcode = ERR_DISK_TOO_SMALL;
                goto force_diskless_dec;
        }
 
        /* Reset the "barriers don't work" bits here, then force meta data to
         * be written, to ensure we determine if barriers are supported. */
-       if (nbc->dc.no_md_flush)
-               set_bit(MD_NO_FUA, &mdev->flags);
-       else
+       if (new_disk_conf->md_flushes)
                clear_bit(MD_NO_FUA, &mdev->flags);
+       else
+               set_bit(MD_NO_FUA, &mdev->flags);
 
        /* Point of no return reached.
         * Devices and memory are no longer released by error cleanup below.
@@ -1249,11 +1517,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        D_ASSERT(mdev->ldev == NULL);
        mdev->ldev = nbc;
        mdev->resync = resync_lru;
+       mdev->rs_plan_s = new_plan;
        nbc = NULL;
        resync_lru = NULL;
+       new_disk_conf = NULL;
+       new_plan = NULL;
 
-       mdev->write_ordering = WO_bdev_flush;
-       drbd_bump_write_ordering(mdev, WO_bdev_flush);
+       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
                set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1261,10 +1531,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !(mdev->state.role == R_PRIMARY && mdev->state.susp_nod)) {
+           !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
                set_bit(CRASHED_PRIMARY, &mdev->flags);
-               cp_discovered = 1;
-       }
 
        mdev->send_cnt = 0;
        mdev->recv_cnt = 0;
@@ -1300,7 +1568,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        } else if (dd == grew)
                set_bit(RESYNC_AFTER_NEG, &mdev->flags);
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+       if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+           (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+            drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
                dev_info(DEV, "Assuming that all blocks are out of sync "
                     "(aka FullSync)\n");
                if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
@@ -1310,16 +1580,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                }
        } else {
                if (drbd_bitmap_io(mdev, &drbd_bm_read,
-                       "read from attaching", BM_LOCKED_MASK) < 0) {
-                       retcode = ERR_IO_MD_DISK;
-                       goto force_diskless_dec;
-               }
-       }
-
-       if (cp_discovered) {
-               drbd_al_apply_to_bm(mdev);
-               if (drbd_bitmap_io(mdev, &drbd_bm_write,
-                       "crashed primary apply AL", BM_LOCKED_MASK)) {
+                       "read from attaching", BM_LOCKED_MASK)) {
                        retcode = ERR_IO_MD_DISK;
                        goto force_diskless_dec;
                }
@@ -1329,8 +1590,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                drbd_suspend_al(mdev); /* IO is still suspended here... */
 
        spin_lock_irq(&mdev->tconn->req_lock);
-       os = mdev->state;
-       ns.i = os.i;
+       os = drbd_read_state(mdev);
+       ns = os;
        /* If MDF_CONSISTENT is not set go into inconsistent state,
           otherwise investigate MDF_WasUpToDate...
           If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
@@ -1348,8 +1609,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
                ns.pdsk = D_OUTDATED;
 
-       if ( ns.disk == D_CONSISTENT &&
-           (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
+       rcu_read_lock();
+       if (ns.disk == D_CONSISTENT &&
+           (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
                ns.disk = D_UP_TO_DATE;
 
        /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1357,6 +1619,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
           this point, because drbd_request_state() modifies these
           flags. */
 
+       if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+       else
+               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+       rcu_read_unlock();
+
        /* In case we are C_CONNECTED postpone any decision on the new disk
           state after the negotiation phase. */
        if (mdev->state.conn == C_CONNECTED) {
@@ -1372,12 +1641,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        }
 
        rv = _drbd_set_state(mdev, ns, CS_VERBOSE, NULL);
-       ns = mdev->state;
        spin_unlock_irq(&mdev->tconn->req_lock);
 
        if (rv < SS_SUCCESS)
                goto force_diskless_dec;
 
+       mod_timer(&mdev->request_timer, jiffies + HZ);
+
        if (mdev->state.role == R_PRIMARY)
                mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
        else
@@ -1395,10 +1665,10 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  force_diskless_dec:
        put_ldev(mdev);
  force_diskless:
-       drbd_force_state(mdev, NS(disk, D_FAILED));
+       drbd_force_state(mdev, NS(disk, D_DISKLESS));
        drbd_md_sync(mdev);
-       conn_reconfig_done(mdev->tconn);
  fail:
+       conn_reconfig_done(mdev->tconn);
        if (nbc) {
                if (nbc->backing_bdev)
                        blkdev_put(nbc->backing_bdev,
@@ -1408,12 +1678,43 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
                kfree(nbc);
        }
+       kfree(new_disk_conf);
        lc_destroy(resync_lru);
+       kfree(new_plan);
 
+ finish:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
+static int adm_detach(struct drbd_conf *mdev, int force)
+{
+       enum drbd_state_rv retcode;
+       int ret;
+
+       if (force) {
+               set_bit(FORCE_DETACH, &mdev->flags);
+               drbd_force_state(mdev, NS(disk, D_FAILED));
+               retcode = SS_SUCCESS;
+               goto out;
+       }
+
+       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
+       drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
+       retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+       drbd_md_put_buffer(mdev);
+       /* D_FAILED will transition to DISKLESS. */
+       ret = wait_event_interruptible(mdev->misc_wait,
+                       mdev->state.disk != D_FAILED);
+       drbd_resume_io(mdev);
+       if ((int)retcode == (int)SS_IS_DISKLESS)
+               retcode = SS_NOTHING_TO_DO;
+       if (ret)
+               retcode = ERR_INTR;
+out:
+       return retcode;
+}
+
 /* Detaching the disk is a process in multiple stages.  First we need to lock
  * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
  * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
@@ -1421,8 +1722,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  * Only then we have finally detached. */
 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
+       struct detach_parms parms = { };
+       int err;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -1430,242 +1732,410 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
-       drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
-       retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
-       wait_event(mdev->misc_wait,
-                       mdev->state.disk != D_DISKLESS ||
-                       !atomic_read(&mdev->local_cnt));
-       drbd_resume_io(mdev);
+       if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+               err = detach_parms_from_attrs(&parms, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto out;
+               }
+       }
+
+       retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+static bool conn_resync_running(struct drbd_tconn *tconn)
 {
-       char hmac_name[CRYPTO_MAX_ALG_NAME];
        struct drbd_conf *mdev;
-       struct net_conf *new_conf = NULL;
-       struct crypto_hash *tfm = NULL;
-       struct crypto_hash *integrity_w_tfm = NULL;
-       struct crypto_hash *integrity_r_tfm = NULL;
-       void *int_dig_out = NULL;
-       void *int_dig_in = NULL;
-       void *int_dig_vv = NULL;
-       struct drbd_tconn *oconn;
-       struct drbd_tconn *tconn;
-       struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
-       enum drbd_ret_code retcode;
+       bool rv = false;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (mdev->state.conn == C_SYNC_SOURCE ||
+                   mdev->state.conn == C_SYNC_TARGET ||
+                   mdev->state.conn == C_PAUSED_SYNC_S ||
+                   mdev->state.conn == C_PAUSED_SYNC_T) {
+                       rv = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return rv;
+}
+
+static bool conn_ov_running(struct drbd_tconn *tconn)
+{
+       struct drbd_conf *mdev;
+       bool rv = false;
+       int vnr;
+
+       rcu_read_lock();
+       idr_for_each_entry(&tconn->volumes, mdev, vnr) {
+               if (mdev->state.conn == C_VERIFY_S ||
+                   mdev->state.conn == C_VERIFY_T) {
+                       rv = true;
+                       break;
+               }
+       }
+       rcu_read_unlock();
+
+       return rv;
+}
+
+static enum drbd_ret_code
+_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf)
+{
+       struct drbd_conf *mdev;
+       int i;
+
+       if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+               if (new_conf->wire_protocol != old_conf->wire_protocol)
+                       return ERR_NEED_APV_100;
+
+               if (new_conf->two_primaries != old_conf->two_primaries)
+                       return ERR_NEED_APV_100;
+
+               if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+                       return ERR_NEED_APV_100;
+       }
+
+       if (!new_conf->two_primaries &&
+           conn_highest_role(tconn) == R_PRIMARY &&
+           conn_highest_peer(tconn) == R_PRIMARY)
+               return ERR_NEED_ALLOW_TWO_PRI;
+
+       if (new_conf->two_primaries &&
+           (new_conf->wire_protocol != DRBD_PROT_C))
+               return ERR_NOT_PROTO_C;
+
+       idr_for_each_entry(&tconn->volumes, mdev, i) {
+               if (get_ldev(mdev)) {
+                       enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
+                       put_ldev(mdev);
+                       if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
+                               return ERR_STONITH_AND_PROT_A;
+               }
+               if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+                       return ERR_DISCARD_IMPOSSIBLE;
+       }
+
+       if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
+               return ERR_CONG_NOT_PROTO_A;
+
+       return NO_ERROR;
+}
+
+static enum drbd_ret_code
+check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf)
+{
+       static enum drbd_ret_code rv;
+       struct drbd_conf *mdev;
        int i;
+
+       rcu_read_lock();
+       rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf);
+       rcu_read_unlock();
+
+       /* tconn->volumes protected by genl_lock() here */
+       idr_for_each_entry(&tconn->volumes, mdev, i) {
+               if (!mdev->bitmap) {
+                       if(drbd_bm_init(mdev))
+                               return ERR_NOMEM;
+               }
+       }
+
+       return rv;
+}
+
+struct crypto {
+       struct crypto_hash *verify_tfm;
+       struct crypto_hash *csums_tfm;
+       struct crypto_hash *cram_hmac_tfm;
+       struct crypto_hash *integrity_tfm;
+};
+
+static int
+alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
+{
+       if (!tfm_name[0])
+               return NO_ERROR;
+
+       *tfm = crypto_alloc_hash(tfm_name, 0, CRYPTO_ALG_ASYNC);
+       if (IS_ERR(*tfm)) {
+               *tfm = NULL;
+               return err_alg;
+       }
+
+       return NO_ERROR;
+}
+
+static enum drbd_ret_code
+alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
+{
+       char hmac_name[CRYPTO_MAX_ALG_NAME];
+       enum drbd_ret_code rv;
+
+       rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+                      ERR_CSUMS_ALG);
+       if (rv != NO_ERROR)
+               return rv;
+       rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+                      ERR_VERIFY_ALG);
+       if (rv != NO_ERROR)
+               return rv;
+       rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+                      ERR_INTEGRITY_ALG);
+       if (rv != NO_ERROR)
+               return rv;
+       if (new_conf->cram_hmac_alg[0] != 0) {
+               snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
+                        new_conf->cram_hmac_alg);
+
+               rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
+                              ERR_AUTH_ALG);
+       }
+
+       return rv;
+}
+
+static void free_crypto(struct crypto *crypto)
+{
+       crypto_free_hash(crypto->cram_hmac_tfm);
+       crypto_free_hash(crypto->integrity_tfm);
+       crypto_free_hash(crypto->csums_tfm);
+       crypto_free_hash(crypto->verify_tfm);
+}
+
+int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct drbd_tconn *tconn;
+       struct net_conf *old_conf, *new_conf = NULL;
        int err;
+       int ovr; /* online verify running */
+       int rsr; /* re-sync running */
+       struct crypto crypto = { };
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
        tconn = adm_ctx.tconn;
-       conn_reconfig_start(tconn);
-
-       if (tconn->cstate > C_STANDALONE) {
-               retcode = ERR_NET_CONFIGURED;
-               goto fail;
-       }
 
-       /* allocation not in the IO path, cqueue thread context */
        new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
        if (!new_conf) {
                retcode = ERR_NOMEM;
+               goto out;
+       }
+
+       conn_reconfig_start(tconn);
+
+       mutex_lock(&tconn->data.mutex);
+       mutex_lock(&tconn->conf_update);
+       old_conf = tconn->net_conf;
+
+       if (!old_conf) {
+               drbd_msg_put_info("net conf missing, try connect");
+               retcode = ERR_INVALID_REQUEST;
                goto fail;
        }
 
-       new_conf->timeout          = DRBD_TIMEOUT_DEF;
-       new_conf->try_connect_int  = DRBD_CONNECT_INT_DEF;
-       new_conf->ping_int         = DRBD_PING_INT_DEF;
-       new_conf->max_epoch_size   = DRBD_MAX_EPOCH_SIZE_DEF;
-       new_conf->max_buffers      = DRBD_MAX_BUFFERS_DEF;
-       new_conf->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
-       new_conf->sndbuf_size      = DRBD_SNDBUF_SIZE_DEF;
-       new_conf->rcvbuf_size      = DRBD_RCVBUF_SIZE_DEF;
-       new_conf->ko_count         = DRBD_KO_COUNT_DEF;
-       new_conf->after_sb_0p      = DRBD_AFTER_SB_0P_DEF;
-       new_conf->after_sb_1p      = DRBD_AFTER_SB_1P_DEF;
-       new_conf->after_sb_2p      = DRBD_AFTER_SB_2P_DEF;
-       new_conf->want_lose        = 0;
-       new_conf->two_primaries    = 0;
-       new_conf->wire_protocol    = DRBD_PROT_C;
-       new_conf->ping_timeo       = DRBD_PING_TIMEO_DEF;
-       new_conf->rr_conflict      = DRBD_RR_CONFLICT_DEF;
-       new_conf->on_congestion    = DRBD_ON_CONGESTION_DEF;
-       new_conf->cong_extents     = DRBD_CONG_EXTENTS_DEF;
-
-       err = net_conf_from_attrs(new_conf, info->attrs);
-       if (err) {
+       *new_conf = *old_conf;
+       if (should_set_defaults(info))
+               set_net_conf_defaults(new_conf);
+
+       err = net_conf_from_attrs_for_change(new_conf, info);
+       if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       if (new_conf->two_primaries
-           && (new_conf->wire_protocol != DRBD_PROT_C)) {
-               retcode = ERR_NOT_PROTO_C;
+       retcode = check_net_options(tconn, new_conf);
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       /* re-sync running */
+       rsr = conn_resync_running(tconn);
+       if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
+               retcode = ERR_CSUMS_RESYNC_RUNNING;
                goto fail;
        }
 
-       idr_for_each_entry(&tconn->volumes, mdev, i) {
-               if (get_ldev(mdev)) {
-                       enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
-                       put_ldev(mdev);
-                       if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH) {
-                               retcode = ERR_STONITH_AND_PROT_A;
-                               goto fail;
-                       }
-               }
-               if (mdev->state.role == R_PRIMARY && new_conf->want_lose) {
-                       retcode = ERR_DISCARD;
-                       goto fail;
-               }
-               if (!mdev->bitmap) {
-                       if(drbd_bm_init(mdev)) {
-                               retcode = ERR_NOMEM;
-                               goto fail;
-                       }
-               }
+       /* online verify running */
+       ovr = conn_ov_running(tconn);
+       if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
+               retcode = ERR_VERIFY_RUNNING;
+               goto fail;
        }
 
-       if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A) {
-               retcode = ERR_CONG_NOT_PROTO_A;
+       retcode = alloc_crypto(&crypto, new_conf);
+       if (retcode != NO_ERROR)
                goto fail;
+
+       rcu_assign_pointer(tconn->net_conf, new_conf);
+
+       if (!rsr) {
+               crypto_free_hash(tconn->csums_tfm);
+               tconn->csums_tfm = crypto.csums_tfm;
+               crypto.csums_tfm = NULL;
+       }
+       if (!ovr) {
+               crypto_free_hash(tconn->verify_tfm);
+               tconn->verify_tfm = crypto.verify_tfm;
+               crypto.verify_tfm = NULL;
        }
 
-       retcode = NO_ERROR;
+       crypto_free_hash(tconn->integrity_tfm);
+       tconn->integrity_tfm = crypto.integrity_tfm;
+       if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
+               /* Do this without trying to take tconn->data.mutex again.  */
+               __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
 
-       new_my_addr = (struct sockaddr *)&new_conf->my_addr;
-       new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
-       list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
-               if (oconn == tconn)
-                       continue;
-               if (get_net_conf(oconn)) {
-                       taken_addr = (struct sockaddr *)&oconn->net_conf->my_addr;
-                       if (new_conf->my_addr_len == oconn->net_conf->my_addr_len &&
-                           !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
-                               retcode = ERR_LOCAL_ADDR;
-
-                       taken_addr = (struct sockaddr *)&oconn->net_conf->peer_addr;
-                       if (new_conf->peer_addr_len == oconn->net_conf->peer_addr_len &&
-                           !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
-                               retcode = ERR_PEER_ADDR;
-
-                       put_net_conf(oconn);
-                       if (retcode != NO_ERROR)
-                               goto fail;
-               }
+       crypto_free_hash(tconn->cram_hmac_tfm);
+       tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+
+       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&tconn->data.mutex);
+       synchronize_rcu();
+       kfree(old_conf);
+
+       if (tconn->cstate >= C_WF_REPORT_PARAMS)
+               drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn)));
+
+       goto done;
+
+ fail:
+       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&tconn->data.mutex);
+       free_crypto(&crypto);
+       kfree(new_conf);
+ done:
+       conn_reconfig_done(tconn);
+ out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
+int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
+{
+       struct drbd_conf *mdev;
+       struct net_conf *old_conf, *new_conf = NULL;
+       struct crypto crypto = { };
+       struct drbd_tconn *tconn;
+       enum drbd_ret_code retcode;
+       int i;
+       int err;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+       if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+               drbd_msg_put_info("connection endpoint(s) missing");
+               retcode = ERR_INVALID_REQUEST;
+               goto out;
        }
 
-       if (new_conf->cram_hmac_alg[0] != 0) {
-               snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
-                       new_conf->cram_hmac_alg);
-               tfm = crypto_alloc_hash(hmac_name, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(tfm)) {
-                       tfm = NULL;
-                       retcode = ERR_AUTH_ALG;
-                       goto fail;
+       /* No need for _rcu here. All reconfiguration is
+        * strictly serialized on genl_lock(). We are protected against
+        * concurrent reconfiguration/addition/deletion */
+       list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+               if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+                   !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+                       retcode = ERR_LOCAL_ADDR;
+                       goto out;
                }
 
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
-                       retcode = ERR_AUTH_ALG_ND;
-                       goto fail;
+               if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+                   !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+                       retcode = ERR_PEER_ADDR;
+                       goto out;
                }
        }
 
-       if (new_conf->integrity_alg[0]) {
-               integrity_w_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(integrity_w_tfm)) {
-                       integrity_w_tfm = NULL;
-                       retcode=ERR_INTEGRITY_ALG;
-                       goto fail;
-               }
+       tconn = adm_ctx.tconn;
+       conn_reconfig_start(tconn);
 
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(integrity_w_tfm))) {
-                       retcode=ERR_INTEGRITY_ALG_ND;
-                       goto fail;
-               }
+       if (tconn->cstate > C_STANDALONE) {
+               retcode = ERR_NET_CONFIGURED;
+               goto fail;
+       }
 
-               integrity_r_tfm = crypto_alloc_hash(new_conf->integrity_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(integrity_r_tfm)) {
-                       integrity_r_tfm = NULL;
-                       retcode=ERR_INTEGRITY_ALG;
-                       goto fail;
-               }
+       /* allocation not in the IO path, drbdsetup / netlink process context */
+       new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
+       if (!new_conf) {
+               retcode = ERR_NOMEM;
+               goto fail;
        }
 
-       ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+       set_net_conf_defaults(new_conf);
 
-       /* allocation not in the IO path, cqueue thread context */
-       if (integrity_w_tfm) {
-               i = crypto_hash_digestsize(integrity_w_tfm);
-               int_dig_out = kmalloc(i, GFP_KERNEL);
-               if (!int_dig_out) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-               int_dig_in = kmalloc(i, GFP_KERNEL);
-               if (!int_dig_in) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-               int_dig_vv = kmalloc(i, GFP_KERNEL);
-               if (!int_dig_vv) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
+       err = net_conf_from_attrs(new_conf, info);
+       if (err && err != -ENOMSG) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+               goto fail;
        }
 
+       retcode = check_net_options(tconn, new_conf);
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       retcode = alloc_crypto(&crypto, new_conf);
+       if (retcode != NO_ERROR)
+               goto fail;
+
+       ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
+
        conn_flush_workqueue(tconn);
-       spin_lock_irq(&tconn->req_lock);
-       if (tconn->net_conf != NULL) {
+
+       mutex_lock(&tconn->conf_update);
+       old_conf = tconn->net_conf;
+       if (old_conf) {
                retcode = ERR_NET_CONFIGURED;
-               spin_unlock_irq(&tconn->req_lock);
+               mutex_unlock(&tconn->conf_update);
                goto fail;
        }
-       tconn->net_conf = new_conf;
+       rcu_assign_pointer(tconn->net_conf, new_conf);
 
-       crypto_free_hash(tconn->cram_hmac_tfm);
-       tconn->cram_hmac_tfm = tfm;
-
-       crypto_free_hash(tconn->integrity_w_tfm);
-       tconn->integrity_w_tfm = integrity_w_tfm;
+       conn_free_crypto(tconn);
+       tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
+       tconn->integrity_tfm = crypto.integrity_tfm;
+       tconn->csums_tfm = crypto.csums_tfm;
+       tconn->verify_tfm = crypto.verify_tfm;
 
-       crypto_free_hash(tconn->integrity_r_tfm);
-       tconn->integrity_r_tfm = integrity_r_tfm;
+       tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+       memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+       tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+       memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
 
-       kfree(tconn->int_dig_out);
-       kfree(tconn->int_dig_in);
-       kfree(tconn->int_dig_vv);
-       tconn->int_dig_out=int_dig_out;
-       tconn->int_dig_in=int_dig_in;
-       tconn->int_dig_vv=int_dig_vv;
-       retcode = _conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
-       spin_unlock_irq(&tconn->req_lock);
+       mutex_unlock(&tconn->conf_update);
 
+       rcu_read_lock();
        idr_for_each_entry(&tconn->volumes, mdev, i) {
                mdev->send_cnt = 0;
                mdev->recv_cnt = 0;
-               kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
        }
+       rcu_read_unlock();
+
+       retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE);
+
        conn_reconfig_done(tconn);
        drbd_adm_finish(info, retcode);
        return 0;
 
 fail:
-       kfree(int_dig_out);
-       kfree(int_dig_in);
-       kfree(int_dig_vv);
-       crypto_free_hash(tfm);
-       crypto_free_hash(integrity_w_tfm);
-       crypto_free_hash(integrity_r_tfm);
+       free_crypto(&crypto);
        kfree(new_conf);
 
        conn_reconfig_done(tconn);
@@ -1674,14 +2144,69 @@ out:
        return 0;
 }
 
+static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
+{
+       enum drbd_state_rv rv;
+
+       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+                       force ? CS_HARD : 0);
+
+       switch (rv) {
+       case SS_NOTHING_TO_DO:
+               break;
+       case SS_ALREADY_STANDALONE:
+               return SS_SUCCESS;
+       case SS_PRIMARY_NOP:
+               /* Our state checking code wants to see the peer outdated. */
+               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+                                               pdsk, D_OUTDATED), CS_VERBOSE);
+               break;
+       case SS_CW_FAILED_BY_PEER:
+               /* The peer probably wants to see us outdated. */
+               rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
+                                                       disk, D_OUTDATED), 0);
+               if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
+                       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+                                       CS_HARD);
+               }
+               break;
+       default:;
+               /* no special handling necessary */
+       }
+
+       if (rv >= SS_SUCCESS) {
+               enum drbd_state_rv rv2;
+               /* No one else can reconfigure the network while I am here.
+                * The state handling only uses drbd_thread_stop_nowait(),
+                * we want to really wait here until the receiver is no more.
+                */
+               drbd_thread_stop(&adm_ctx.tconn->receiver);
+
+               /* Race breaker.  This additional state change request may be
+                * necessary, if this was a forced disconnect during a receiver
+                * restart.  We may have "killed" the receiver thread just
+                * after drbdd_init() returned.  Typically, we should be
+                * C_STANDALONE already, now, and this becomes a no-op.
+                */
+               rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+                               CS_VERBOSE | CS_HARD);
+               if (rv2 < SS_SUCCESS)
+                       conn_err(tconn,
+                               "unexpected rv2=%d in conn_try_disconnect()\n",
+                               rv2);
+       }
+       return rv;
+}
+
 int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
 {
        struct disconnect_parms parms;
        struct drbd_tconn *tconn;
+       enum drbd_state_rv rv;
        enum drbd_ret_code retcode;
        int err;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
@@ -1690,7 +2215,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
        tconn = adm_ctx.tconn;
        memset(&parms, 0, sizeof(parms));
        if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
-               err = disconnect_parms_from_attrs(&parms, info->attrs);
+               err = disconnect_parms_from_attrs(&parms, info);
                if (err) {
                        retcode = ERR_MANDATORY_TAG;
                        drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -1698,47 +2223,11 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       if (parms.force_disconnect) {
-               spin_lock_irq(&tconn->req_lock);
-               if (tconn->cstate >= C_WF_CONNECTION)
-                       _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
-               spin_unlock_irq(&tconn->req_lock);
-               goto done;
-       }
-
-       retcode = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
-
-       if (retcode == SS_NOTHING_TO_DO)
-               goto done;
-       else if (retcode == SS_ALREADY_STANDALONE)
-               goto done;
-       else if (retcode == SS_PRIMARY_NOP) {
-               /* Our state checking code wants to see the peer outdated. */
-               retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
-                                                       pdsk, D_OUTDATED), CS_VERBOSE);
-       } else if (retcode == SS_CW_FAILED_BY_PEER) {
-               /* The peer probably wants to see us outdated. */
-               retcode = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
-                                                       disk, D_OUTDATED), 0);
-               if (retcode == SS_IS_DISKLESS || retcode == SS_LOWER_THAN_OUTDATED) {
-                       conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
-                       retcode = SS_SUCCESS;
-               }
-       }
-
-       if (retcode < SS_SUCCESS)
-               goto fail;
-
-       if (wait_event_interruptible(tconn->ping_wait,
-                                    tconn->cstate != C_DISCONNECTING)) {
-               /* Do not test for mdev->state.conn == C_STANDALONE, since
-                  someone else might connect us in the mean time! */
-               retcode = ERR_INTR;
-               goto fail;
-       }
-
- done:
-       retcode = NO_ERROR;
+       rv = conn_try_disconnect(tconn, parms.force_disconnect);
+       if (rv < SS_SUCCESS)
+               retcode = rv;  /* FIXME: Type mismatch. */
+       else
+               retcode = NO_ERROR;
  fail:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -1752,7 +2241,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
        if (mdev->state.role != mdev->state.peer)
                iass = (mdev->state.role == R_PRIMARY);
        else
-               iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
+               iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
 
        if (iass)
                drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -1762,11 +2251,13 @@ void resync_after_online_grow(struct drbd_conf *mdev)
 
 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
 {
+       struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
        struct resize_parms rs;
        struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        enum dds_flags ddsf;
+       sector_t u_size;
        int err;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -1776,259 +2267,121 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
                goto fail;
 
        memset(&rs, 0, sizeof(struct resize_parms));
-       if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
-               err = resize_parms_from_attrs(&rs, info->attrs);
-               if (err) {
-                       retcode = ERR_MANDATORY_TAG;
-                       drbd_msg_put_info(from_attrs_err_to_txt(err));
-                       goto fail;
-               }
-       }
-
-       mdev = adm_ctx.mdev;
-       if (mdev->state.conn > C_CONNECTED) {
-               retcode = ERR_RESIZE_RESYNC;
-               goto fail;
-       }
-
-       if (mdev->state.role == R_SECONDARY &&
-           mdev->state.peer == R_SECONDARY) {
-               retcode = ERR_NO_PRIMARY;
-               goto fail;
-       }
-
-       if (!get_ldev(mdev)) {
-               retcode = ERR_NO_DISK;
-               goto fail;
-       }
-
-       if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
-               retcode = ERR_NEED_APV_93;
-               goto fail;
-       }
-
-       if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
-               mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
-
-       mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
-       ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
-       dd = drbd_determine_dev_size(mdev, ddsf);
-       drbd_md_sync(mdev);
-       put_ldev(mdev);
-       if (dd == dev_size_error) {
-               retcode = ERR_NOMEM_BITMAP;
-               goto fail;
-       }
-
-       if (mdev->state.conn == C_CONNECTED) {
-               if (dd == grew)
-                       set_bit(RESIZE_PENDING, &mdev->flags);
-
-               drbd_send_uuids(mdev);
-               drbd_send_sizes(mdev, 1, ddsf);
-       }
-
- fail:
-       drbd_adm_finish(info, retcode);
-       return 0;
-}
-
-int drbd_adm_syncer(struct sk_buff *skb, struct genl_info *info)
-{
-       struct drbd_conf *mdev;
-       enum drbd_ret_code retcode;
-       int err;
-       int ovr; /* online verify running */
-       int rsr; /* re-sync running */
-       struct crypto_hash *verify_tfm = NULL;
-       struct crypto_hash *csums_tfm = NULL;
-       struct syncer_conf sc;
-       cpumask_var_t new_cpu_mask;
-       int *rs_plan_s = NULL;
-       int fifo_size;
-
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
-       if (!adm_ctx.reply_skb)
-               return retcode;
-       if (retcode != NO_ERROR)
-               goto fail;
-       mdev = adm_ctx.mdev;
-
-       if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
-               retcode = ERR_NOMEM;
-               drbd_msg_put_info("unable to allocate cpumask");
-               goto fail;
-       }
-
-       if (((struct drbd_genlmsghdr*)info->userhdr)->flags
-                       & DRBD_GENL_F_SET_DEFAULTS) {
-               memset(&sc, 0, sizeof(struct syncer_conf));
-               sc.rate       = DRBD_RATE_DEF;
-               sc.after      = DRBD_AFTER_DEF;
-               sc.al_extents = DRBD_AL_EXTENTS_DEF;
-               sc.on_no_data  = DRBD_ON_NO_DATA_DEF;
-               sc.c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
-               sc.c_delay_target = DRBD_C_DELAY_TARGET_DEF;
-               sc.c_fill_target = DRBD_C_FILL_TARGET_DEF;
-               sc.c_max_rate = DRBD_C_MAX_RATE_DEF;
-               sc.c_min_rate = DRBD_C_MIN_RATE_DEF;
-       } else
-               memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf));
-
-       err = syncer_conf_from_attrs(&sc, info->attrs);
-       if (err) {
-               retcode = ERR_MANDATORY_TAG;
-               drbd_msg_put_info(from_attrs_err_to_txt(err));
-               goto fail;
-       }
-
-       /* re-sync running */
-       rsr = ( mdev->state.conn == C_SYNC_SOURCE ||
-               mdev->state.conn == C_SYNC_TARGET ||
-               mdev->state.conn == C_PAUSED_SYNC_S ||
-               mdev->state.conn == C_PAUSED_SYNC_T );
-
-       if (rsr && strcmp(sc.csums_alg, mdev->sync_conf.csums_alg)) {
-               retcode = ERR_CSUMS_RESYNC_RUNNING;
-               goto fail;
-       }
-
-       if (!rsr && sc.csums_alg[0]) {
-               csums_tfm = crypto_alloc_hash(sc.csums_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(csums_tfm)) {
-                       csums_tfm = NULL;
-                       retcode = ERR_CSUMS_ALG;
-                       goto fail;
-               }
-
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(csums_tfm))) {
-                       retcode = ERR_CSUMS_ALG_ND;
-                       goto fail;
-               }
-       }
-
-       /* online verify running */
-       ovr = (mdev->state.conn == C_VERIFY_S || mdev->state.conn == C_VERIFY_T);
-
-       if (ovr) {
-               if (strcmp(sc.verify_alg, mdev->sync_conf.verify_alg)) {
-                       retcode = ERR_VERIFY_RUNNING;
-                       goto fail;
-               }
-       }
-
-       if (!ovr && sc.verify_alg[0]) {
-               verify_tfm = crypto_alloc_hash(sc.verify_alg, 0, CRYPTO_ALG_ASYNC);
-               if (IS_ERR(verify_tfm)) {
-                       verify_tfm = NULL;
-                       retcode = ERR_VERIFY_ALG;
-                       goto fail;
-               }
-
-               if (!drbd_crypto_is_hash(crypto_hash_tfm(verify_tfm))) {
-                       retcode = ERR_VERIFY_ALG_ND;
-                       goto fail;
-               }
-       }
-
-       /* silently ignore cpu mask on UP kernel */
-       if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
-               err = __bitmap_parse(sc.cpu_mask, 32, 0,
-                               cpumask_bits(new_cpu_mask), nr_cpu_ids);
+       if (info->attrs[DRBD_NLA_RESIZE_PARMS]) {
+               err = resize_parms_from_attrs(&rs, info);
                if (err) {
-                       dev_warn(DEV, "__bitmap_parse() failed with %d\n", err);
-                       retcode = ERR_CPU_MASK_PARSE;
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
                        goto fail;
                }
        }
 
-       if (!expect(sc.rate >= 1))
-               sc.rate = 1;
+       mdev = adm_ctx.mdev;
+       if (mdev->state.conn > C_CONNECTED) {
+               retcode = ERR_RESIZE_RESYNC;
+               goto fail;
+       }
 
-       /* clip to allowed range */
-       if (!expect(sc.al_extents >= DRBD_AL_EXTENTS_MIN))
-               sc.al_extents = DRBD_AL_EXTENTS_MIN;
-       if (!expect(sc.al_extents <= DRBD_AL_EXTENTS_MAX))
-               sc.al_extents = DRBD_AL_EXTENTS_MAX;
+       if (mdev->state.role == R_SECONDARY &&
+           mdev->state.peer == R_SECONDARY) {
+               retcode = ERR_NO_PRIMARY;
+               goto fail;
+       }
 
-       /* most sanity checks done, try to assign the new sync-after
-        * dependency.  need to hold the global lock in there,
-        * to avoid a race in the dependency loop check. */
-       retcode = drbd_alter_sa(mdev, sc.after);
-       if (retcode != NO_ERROR)
+       if (!get_ldev(mdev)) {
+               retcode = ERR_NO_DISK;
                goto fail;
+       }
 
-       fifo_size = (sc.c_plan_ahead * 10 * SLEEP_TIME) / HZ;
-       if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
-               rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
-               if (!rs_plan_s) {
-                       dev_err(DEV, "kmalloc of fifo_buffer failed");
+       if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
+               retcode = ERR_NEED_APV_93;
+               goto fail_ldev;
+       }
+
+       rcu_read_lock();
+       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       rcu_read_unlock();
+       if (u_size != (sector_t)rs.resize_size) {
+               new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+               if (!new_disk_conf) {
                        retcode = ERR_NOMEM;
-                       goto fail;
+                       goto fail_ldev;
                }
        }
 
-       /* ok, assign the rest of it as well.
-        * lock against receive_SyncParam() */
-       spin_lock(&mdev->peer_seq_lock);
-       mdev->sync_conf = sc;
+       if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
+               mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
 
-       if (!rsr) {
-               crypto_free_hash(mdev->csums_tfm);
-               mdev->csums_tfm = csums_tfm;
-               csums_tfm = NULL;
+       if (new_disk_conf) {
+               mutex_lock(&mdev->tconn->conf_update);
+               old_disk_conf = mdev->ldev->disk_conf;
+               *new_disk_conf = *old_disk_conf;
+               new_disk_conf->disk_size = (sector_t)rs.resize_size;
+               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+               mutex_unlock(&mdev->tconn->conf_update);
+               synchronize_rcu();
+               kfree(old_disk_conf);
        }
 
-       if (!ovr) {
-               crypto_free_hash(mdev->verify_tfm);
-               mdev->verify_tfm = verify_tfm;
-               verify_tfm = NULL;
+       ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
+       dd = drbd_determine_dev_size(mdev, ddsf);
+       drbd_md_sync(mdev);
+       put_ldev(mdev);
+       if (dd == dev_size_error) {
+               retcode = ERR_NOMEM_BITMAP;
+               goto fail;
        }
 
-       if (fifo_size != mdev->rs_plan_s.size) {
-               kfree(mdev->rs_plan_s.values);
-               mdev->rs_plan_s.values = rs_plan_s;
-               mdev->rs_plan_s.size   = fifo_size;
-               mdev->rs_planed = 0;
-               rs_plan_s = NULL;
+       if (mdev->state.conn == C_CONNECTED) {
+               if (dd == grew)
+                       set_bit(RESIZE_PENDING, &mdev->flags);
+
+               drbd_send_uuids(mdev);
+               drbd_send_sizes(mdev, 1, ddsf);
        }
 
-       spin_unlock(&mdev->peer_seq_lock);
+ fail:
+       drbd_adm_finish(info, retcode);
+       return 0;
 
-       if (get_ldev(mdev)) {
-               wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
-               drbd_al_shrink(mdev);
-               err = drbd_check_al_size(mdev);
-               lc_unlock(mdev->act_log);
-               wake_up(&mdev->al_wait);
+ fail_ldev:
+       put_ldev(mdev);
+       goto fail;
+}
 
-               put_ldev(mdev);
-               drbd_md_sync(mdev);
+int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
+{
+       enum drbd_ret_code retcode;
+       struct drbd_tconn *tconn;
+       struct res_opts res_opts;
+       int err;
 
-               if (err) {
-                       retcode = ERR_NOMEM;
-                       goto fail;
-               }
-       }
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto fail;
+       tconn = adm_ctx.tconn;
 
-       if (mdev->state.conn >= C_CONNECTED)
-               drbd_send_sync_param(mdev, &sc);
+       res_opts = tconn->res_opts;
+       if (should_set_defaults(info))
+               set_res_opts_defaults(&res_opts);
+
+       err = res_opts_from_attrs(&res_opts, info);
+       if (err && err != -ENOMSG) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+               goto fail;
+       }
 
-       if (!cpumask_equal(mdev->tconn->cpu_mask, new_cpu_mask)) {
-               cpumask_copy(mdev->tconn->cpu_mask, new_cpu_mask);
-               drbd_calc_cpu_mask(mdev->tconn);
-               mdev->tconn->receiver.reset_cpu_mask = 1;
-               mdev->tconn->asender.reset_cpu_mask = 1;
-               mdev->tconn->worker.reset_cpu_mask = 1;
+       err = set_resource_options(tconn, &res_opts);
+       if (err) {
+               retcode = ERR_INVALID_REQUEST;
+               if (err == -ENOMEM)
+                       retcode = ERR_NOMEM;
        }
 
-       kobject_uevent(&disk_to_dev(mdev->vdisk)->kobj, KOBJ_CHANGE);
 fail:
-       kfree(rs_plan_s);
-       free_cpumask_var(new_cpu_mask);
-       crypto_free_hash(csums_tfm);
-       crypto_free_hash(verify_tfm);
-
        drbd_adm_finish(info, retcode);
        return 0;
 }
@@ -2047,8 +2400,11 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
        mdev = adm_ctx.mdev;
 
        /* If there is still bitmap IO pending, probably because of a previous
-        * resync just being finished, wait for it before requesting a new resync. */
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 
@@ -2066,7 +2422,25 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
 
                retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
        }
+       drbd_resume_io(mdev);
+
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+               union drbd_state mask, union drbd_state val)
+{
+       enum drbd_ret_code retcode;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
 
+       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2081,10 +2455,10 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
        return rv;
 }
 
-static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
-               union drbd_state mask, union drbd_state val)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
 {
-       enum drbd_ret_code retcode;
+       int retcode; /* drbd_ret_code, drbd_state_rv */
+       struct drbd_conf *mdev;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -2092,17 +2466,37 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+       mdev = adm_ctx.mdev;
+
+       /* If there is still bitmap IO pending, probably because of a previous
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
+       drbd_suspend_io(mdev);
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
+
+       retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
+       if (retcode < SS_SUCCESS) {
+               if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
+                       /* The peer will get a resync upon connect anyways.
+                        * Just make that into a full resync. */
+                       retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+                       if (retcode >= SS_SUCCESS) {
+                               if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+                                                  "set_n_write from invalidate_peer",
+                                                  BM_LOCKED_SET_ALLOWED))
+                                       retcode = ERR_IO_MD_DISK;
+                       }
+               } else
+                       retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
+       }
+       drbd_resume_io(mdev);
+
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
-{
-       return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
-}
-
 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
@@ -2122,7 +2516,7 @@ out:
 
 int drbd_adm_resume_sync(struct sk_buff *skb, struct genl_info *info)
 {
-       union drbd_state s;
+       union drbd_dev_state s;
        enum drbd_ret_code retcode;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2187,13 +2581,39 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
        return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
 }
 
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
+{
+       struct nlattr *nla;
+       nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
+       if (!nla)
+               goto nla_put_failure;
+       if (vnr != VOLUME_UNSPECIFIED &&
+           nla_put_u32(skb, T_ctx_volume, vnr))
+               goto nla_put_failure;
+       if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+               goto nla_put_failure;
+       if (tconn->my_addr_len &&
+           nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+               goto nla_put_failure;
+       if (tconn->peer_addr_len &&
+           nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+               goto nla_put_failure;
+       nla_nest_end(skb, nla);
+       return 0;
+
+nla_put_failure:
+       if (nla)
+               nla_nest_cancel(skb, nla);
+       return -EMSGSIZE;
+}
+
 int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
                const struct sib_info *sib)
 {
        struct state_info *si = NULL; /* for sizeof(si->member); */
+       struct net_conf *nc;
        struct nlattr *nla;
        int got_ldev;
-       int got_net;
        int err = 0;
        int exclude_sensitive;
 
@@ -2211,44 +2631,64 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
        exclude_sensitive = sib || !capable(CAP_SYS_ADMIN);
 
        got_ldev = get_ldev(mdev);
-       got_net = get_net_conf(mdev->tconn);
 
        /* We need to add connection name and volume number information still.
         * Minor number is in drbd_genlmsghdr. */
-       nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
-       if (!nla)
+       if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
+               goto nla_put_failure;
+
+       if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
                goto nla_put_failure;
-       NLA_PUT_U32(skb, T_ctx_volume, mdev->vnr);
-       NLA_PUT_STRING(skb, T_ctx_conn_name, mdev->tconn->name);
-       nla_nest_end(skb, nla);
 
+       rcu_read_lock();
        if (got_ldev)
-               if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
-                       goto nla_put_failure;
-       if (got_net)
-               if (net_conf_to_skb(skb, mdev->tconn->net_conf, exclude_sensitive))
+               if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
                        goto nla_put_failure;
 
-       if (syncer_conf_to_skb(skb, &mdev->sync_conf, exclude_sensitive))
-                       goto nla_put_failure;
+       nc = rcu_dereference(mdev->tconn->net_conf);
+       if (nc)
+               err = net_conf_to_skb(skb, nc, exclude_sensitive);
+       rcu_read_unlock();
+       if (err)
+               goto nla_put_failure;
 
        nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
        if (!nla)
                goto nla_put_failure;
-       NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
-       NLA_PUT_U32(skb, T_current_state, mdev->state.i);
-       NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
-       NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
+       if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+           nla_put_u32(skb, T_current_state, mdev->state.i) ||
+           nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+           nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+           nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+           nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+           nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+           nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+           nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+           nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+           nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+           nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+           nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+               goto nla_put_failure;
 
        if (got_ldev) {
-               NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
-               NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
-               NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
-               NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
+               int err;
+
+               spin_lock_irq(&mdev->ldev->md.uuid_lock);
+               err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+               spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+               if (err)
+                       goto nla_put_failure;
+
+               if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+                   nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+                   nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+                       goto nla_put_failure;
                if (C_SYNC_SOURCE <= mdev->state.conn &&
                    C_PAUSED_SYNC_T >= mdev->state.conn) {
-                       NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
-                       NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
+                       if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+                           nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+                               goto nla_put_failure;
                }
        }
 
@@ -2258,15 +2698,18 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
                case SIB_GET_STATUS_REPLY:
                        break;
                case SIB_STATE_CHANGE:
-                       NLA_PUT_U32(skb, T_prev_state, sib->os.i);
-                       NLA_PUT_U32(skb, T_new_state, sib->ns.i);
+                       if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+                           nla_put_u32(skb, T_new_state, sib->ns.i))
+                               goto nla_put_failure;
                        break;
                case SIB_HELPER_POST:
-                       NLA_PUT_U32(skb,
-                               T_helper_exit_code, sib->helper_exit_code);
+                       if (nla_put_u32(skb, T_helper_exit_code,
+                                       sib->helper_exit_code))
+                               goto nla_put_failure;
                        /* fall through */
                case SIB_HELPER_PRE:
-                       NLA_PUT_STRING(skb, T_helper, sib->helper_name);
+                       if (nla_put_string(skb, T_helper, sib->helper_name))
+                               goto nla_put_failure;
                        break;
                }
        }
@@ -2277,8 +2720,6 @@ nla_put_failure:
                err = -EMSGSIZE;
        if (got_ldev)
                put_ldev(mdev);
-       if (got_net)
-               put_net_conf(mdev->tconn);
        return err;
 }
 
@@ -2303,48 +2744,179 @@ out:
        return 0;
 }
 
-int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
+int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
 {
        struct drbd_conf *mdev;
        struct drbd_genlmsghdr *dh;
-       int minor = cb->args[0];
-
-       /* Open coded deferred single idr_for_each_entry iteration.
+       struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0];
+       struct drbd_tconn *tconn = NULL;
+       struct drbd_tconn *tmp;
+       unsigned volume = cb->args[1];
+
+       /* Open coded, deferred, iteration:
+        * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
+        *      idr_for_each_entry(&tconn->volumes, mdev, i) {
+        *        ...
+        *      }
+        * }
+        * where tconn is cb->args[0];
+        * and i is cb->args[1];
+        *
+        * cb->args[2] indicates if we shall loop over all resources,
+        * or just dump all volumes of a single resource.
+        *
         * This may miss entries inserted after this dump started,
         * or entries deleted before they are reached.
-        * But we need to make sure the mdev won't disappear while
-        * we are looking at it. */
+        *
+        * We need to make sure the mdev won't disappear while
+        * we are looking at it, and revalidate our iterators
+        * on each iteration.
+        */
 
+       /* synchronize with conn_create()/conn_destroy() */
        rcu_read_lock();
-       mdev = idr_get_next(&minors, &minor);
-       if (mdev) {
-               dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
+       /* revalidate iterator position */
+       list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
+               if (pos == NULL) {
+                       /* first iteration */
+                       pos = tmp;
+                       tconn = pos;
+                       break;
+               }
+               if (tmp == pos) {
+                       tconn = pos;
+                       break;
+               }
+       }
+       if (tconn) {
+next_tconn:
+               mdev = idr_get_next(&tconn->volumes, &volume);
+               if (!mdev) {
+                       /* No more volumes to dump on this tconn.
+                        * Advance tconn iterator. */
+                       pos = list_entry_rcu(tconn->all_tconn.next,
+                                            struct drbd_tconn, all_tconn);
+                       /* Did we dump any volume on this tconn yet? */
+                       if (volume != 0) {
+                               /* If we reached the end of the list,
+                                * or only a single resource dump was requested,
+                                * we are done. */
+                               if (&pos->all_tconn == &drbd_tconns || cb->args[2])
+                                       goto out;
+                               volume = 0;
+                               tconn = pos;
+                               goto next_tconn;
+                       }
+               }
+
+               dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, &drbd_genl_family,
                                NLM_F_MULTI, DRBD_ADM_GET_STATUS);
                if (!dh)
-                       goto errout;
+                       goto out;
+
+               if (!mdev) {
+                       /* This is a tconn without a single volume.
+                        * Suprisingly enough, it may have a network
+                        * configuration. */
+                       struct net_conf *nc;
+                       dh->minor = -1U;
+                       dh->ret_code = NO_ERROR;
+                       if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+                               goto cancel;
+                       nc = rcu_dereference(tconn->net_conf);
+                       if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+                               goto cancel;
+                       goto done;
+               }
 
-               D_ASSERT(mdev->minor == minor);
+               D_ASSERT(mdev->vnr == volume);
+               D_ASSERT(mdev->tconn == tconn);
 
-               dh->minor = minor;
+               dh->minor = mdev_to_minor(mdev);
                dh->ret_code = NO_ERROR;
 
                if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
                        genlmsg_cancel(skb, dh);
-                       goto errout;
+                       goto out;
                }
+done:
                genlmsg_end(skb, dh);
         }
 
-errout:
+out:
        rcu_read_unlock();
-       /* where to start idr_get_next with the next iteration */
-        cb->args[0] = minor+1;
+       /* where to start the next iteration */
+        cb->args[0] = (long)pos;
+        cb->args[1] = (pos == tconn) ? volume + 1 : 0;
 
-       /* No more minors found: empty skb. Which will terminate the dump. */
+       /* No more tconns/volumes/minors found results in an empty skb.
+        * Which will terminate the dump. */
         return skb->len;
 }
 
+/*
+ * Request status of all resources, or of all volumes within a single resource.
+ *
+ * This is a dump, as the answer may not fit in a single reply skb otherwise.
+ * Which means we cannot use the family->attrbuf or other such members, because
+ * dump is NOT protected by the genl_lock().  During dump, we only have access
+ * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
+ *
+ * Once things are setup properly, we call into get_one_status().
+ */
+int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
+       struct nlattr *nla;
+       const char *resource_name;
+       struct drbd_tconn *tconn;
+       int maxtype;
+
+       /* Is this a followup call? */
+       if (cb->args[0]) {
+               /* ... of a single resource dump,
+                * and the resource iterator has been advanced already? */
+               if (cb->args[2] && cb->args[2] != cb->args[0])
+                       return 0; /* DONE. */
+               goto dump;
+       }
+
+       /* First call (from netlink_dump_start).  We need to figure out
+        * which resource(s) the user wants us to dump. */
+       nla = nla_find(nlmsg_attrdata(cb->nlh, hdrlen),
+                       nlmsg_attrlen(cb->nlh, hdrlen),
+                       DRBD_NLA_CFG_CONTEXT);
+
+       /* No explicit context given.  Dump all. */
+       if (!nla)
+               goto dump;
+       maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+       nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+       if (IS_ERR(nla))
+               return PTR_ERR(nla);
+       /* context given, but no name present? */
+       if (!nla)
+               return -EINVAL;
+       resource_name = nla_data(nla);
+       tconn = conn_get_by_name(resource_name);
+
+       if (!tconn)
+               return -ENODEV;
+
+       kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */
+
+       /* prime iterators, and set "filter" mode mark:
+        * only dump this tconn. */
+       cb->args[0] = (long)tconn;
+       /* cb->args[1] = 0; passed in this way. */
+       cb->args[2] = (long)tconn;
+
+dump:
+       return get_one_status(skb, cb);
+}
+
 int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
@@ -2376,6 +2948,7 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
 {
        struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
+       struct start_ov_parms parms;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -2384,23 +2957,28 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        mdev = adm_ctx.mdev;
+
+       /* resume from last known position, if possible */
+       parms.ov_start_sector = mdev->ov_start_sector;
+       parms.ov_stop_sector = ULLONG_MAX;
        if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
-               /* resume from last known position, if possible */
-               struct start_ov_parms parms =
-                       { .ov_start_sector = mdev->ov_start_sector };
-               int err = start_ov_parms_from_attrs(&parms, info->attrs);
+               int err = start_ov_parms_from_attrs(&parms, info);
                if (err) {
                        retcode = ERR_MANDATORY_TAG;
                        drbd_msg_put_info(from_attrs_err_to_txt(err));
                        goto out;
                }
-               /* w_make_ov_request expects position to be aligned */
-               mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
        }
+       /* w_make_ov_request expects position to be aligned */
+       mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+       mdev->ov_stop_sector = parms.ov_stop_sector;
+
        /* If there is still bitmap IO pending, e.g. previous resync or verify
         * just being finished, wait for it before requesting a new resync. */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
        retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+       drbd_resume_io(mdev);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2424,7 +3002,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
        mdev = adm_ctx.mdev;
        memset(&args, 0, sizeof(args));
        if (info->attrs[DRBD_NLA_NEW_C_UUID_PARMS]) {
-               err = new_c_uuid_parms_from_attrs(&args, info->attrs);
+               err = new_c_uuid_parms_from_attrs(&args, info);
                if (err) {
                        retcode = ERR_MANDATORY_TAG;
                        drbd_msg_put_info(from_attrs_err_to_txt(err));
@@ -2481,24 +3059,26 @@ out_nolock:
 }
 
 static enum drbd_ret_code
-drbd_check_conn_name(const char *name)
+drbd_check_resource_name(const char *name)
 {
        if (!name || !name[0]) {
-               drbd_msg_put_info("connection name missing");
+               drbd_msg_put_info("resource name missing");
                return ERR_MANDATORY_TAG;
        }
        /* if we want to use these in sysfs/configfs/debugfs some day,
         * we must not allow slashes */
        if (strchr(name, '/')) {
-               drbd_msg_put_info("invalid connection name");
+               drbd_msg_put_info("invalid resource name");
                return ERR_INVALID_REQUEST;
        }
        return NO_ERROR;
 }
 
-int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
+       struct res_opts res_opts;
+       int err;
 
        retcode = drbd_adm_prepare(skb, info, 0);
        if (!adm_ctx.reply_skb)
@@ -2506,17 +3086,28 @@ int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = drbd_check_conn_name(adm_ctx.conn_name);
+       set_res_opts_defaults(&res_opts);
+       err = res_opts_from_attrs(&res_opts, info);
+       if (err && err != -ENOMSG) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+               goto out;
+       }
+
+       retcode = drbd_check_resource_name(adm_ctx.resource_name);
        if (retcode != NO_ERROR)
                goto out;
 
        if (adm_ctx.tconn) {
-               retcode = ERR_INVALID_REQUEST;
-               drbd_msg_put_info("connection exists");
+               if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
+                       retcode = ERR_INVALID_REQUEST;
+                       drbd_msg_put_info("resource exists");
+               }
+               /* else: still NO_ERROR */
                goto out;
        }
 
-       if (!drbd_new_tconn(adm_ctx.conn_name))
+       if (!conn_create(adm_ctx.resource_name, &res_opts))
                retcode = ERR_NOMEM;
 out:
        drbd_adm_finish(info, retcode);
@@ -2528,34 +3119,59 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
        struct drbd_genlmsghdr *dh = info->userhdr;
        enum drbd_ret_code retcode;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
-       /* FIXME drop minor_count parameter, limit to MINORMASK */
-       if (dh->minor >= minor_count) {
+       if (dh->minor > MINORMASK) {
                drbd_msg_put_info("requested minor out of range");
                retcode = ERR_INVALID_REQUEST;
                goto out;
        }
-       /* FIXME we need a define here */
-       if (adm_ctx.volume >= 256) {
+       if (adm_ctx.volume > DRBD_VOLUME_MAX) {
                drbd_msg_put_info("requested volume id out of range");
                retcode = ERR_INVALID_REQUEST;
                goto out;
        }
 
+       /* drbd_adm_prepare made sure already
+        * that mdev->tconn and mdev->vnr match the request. */
+       if (adm_ctx.mdev) {
+               if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
+                       retcode = ERR_MINOR_EXISTS;
+               /* else: still NO_ERROR */
+               goto out;
+       }
+
        retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
+static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
+{
+       if (mdev->state.disk == D_DISKLESS &&
+           /* no need to be mdev->state.conn == C_STANDALONE &&
+            * we may want to delete a minor from a live replication group.
+            */
+           mdev->state.role == R_SECONDARY) {
+               _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+                                   CS_VERBOSE + CS_WAIT_COMPLETE);
+               idr_remove(&mdev->tconn->volumes, mdev->vnr);
+               idr_remove(&minors, mdev_to_minor(mdev));
+               del_gendisk(mdev->vdisk);
+               synchronize_rcu();
+               kref_put(&mdev->kref, &drbd_minor_destroy);
+               return NO_ERROR;
+       } else
+               return ERR_MINOR_CONFIGURED;
+}
+
 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
 {
-       struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2564,36 +3180,110 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       mdev = adm_ctx.mdev;
-       if (mdev->state.disk == D_DISKLESS &&
-           mdev->state.conn == C_STANDALONE &&
-           mdev->state.role == R_SECONDARY) {
-               drbd_delete_device(mdev_to_minor(mdev));
+       retcode = adm_delete_minor(adm_ctx.mdev);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
+int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
+{
+       int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
+       struct drbd_conf *mdev;
+       unsigned i;
+
+       retcode = drbd_adm_prepare(skb, info, 0);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       if (!adm_ctx.tconn) {
+               retcode = ERR_RES_NOT_KNOWN;
+               goto out;
+       }
+
+       /* demote */
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               retcode = drbd_set_role(mdev, R_SECONDARY, 0);
+               if (retcode < SS_SUCCESS) {
+                       drbd_msg_put_info("failed to demote");
+                       goto out;
+               }
+       }
+
+       retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+       if (retcode < SS_SUCCESS) {
+               drbd_msg_put_info("failed to disconnect");
+               goto out;
+       }
+
+       /* detach */
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               retcode = adm_detach(mdev, 0);
+               if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
+                       drbd_msg_put_info("failed to detach");
+                       goto out;
+               }
+       }
+
+       /* If we reach this, all volumes (of this tconn) are Secondary,
+        * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
+        * actually stopped, state handling only does drbd_thread_stop_nowait(). */
+       drbd_thread_stop(&adm_ctx.tconn->worker);
+
+       /* Now, nothing can fail anymore */
+
+       /* delete volumes */
+       idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
+               retcode = adm_delete_minor(mdev);
+               if (retcode != NO_ERROR) {
+                       /* "can not happen" */
+                       drbd_msg_put_info("failed to delete volume");
+                       goto out;
+               }
+       }
+
+       /* delete connection */
+       if (conn_lowest_minor(adm_ctx.tconn) < 0) {
+               list_del_rcu(&adm_ctx.tconn->all_tconn);
+               synchronize_rcu();
+               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
                retcode = NO_ERROR;
-       } else
-               retcode = ERR_MINOR_CONFIGURED;
+       } else {
+               /* "can not happen" */
+               retcode = ERR_RES_IN_USE;
+               drbd_msg_put_info("failed to delete connection");
+       }
+       goto out;
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
        if (conn_lowest_minor(adm_ctx.tconn) < 0) {
-               drbd_free_tconn(adm_ctx.tconn);
+               list_del_rcu(&adm_ctx.tconn->all_tconn);
+               synchronize_rcu();
+               kref_put(&adm_ctx.tconn->kref, &conn_destroy);
+
                retcode = NO_ERROR;
        } else {
-               retcode = ERR_CONN_IN_USE;
+               retcode = ERR_RES_IN_USE;
        }
 
+       if (retcode == NO_ERROR)
+               drbd_thread_stop(&adm_ctx.tconn->worker);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2607,6 +3297,13 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        unsigned seq;
        int err = -ENOMEM;
 
+       if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+               if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+                       mdev->rs_last_bcast = jiffies;
+               else
+                       return;
+       }
+
        seq = atomic_inc_return(&drbd_genl_seq);
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
        if (!msg)
@@ -2617,7 +3314,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        if (!d_out) /* cannot happen, but anyways. */
                goto nla_put_failure;
        d_out->minor = mdev_to_minor(mdev);
-       d_out->ret_code = 0;
+       d_out->ret_code = NO_ERROR;
 
        if (nla_put_status_info(msg, mdev, sib))
                goto nla_put_failure;