drbd: Fix drbdsetup wait-connect, wait-sync etc... commands
[linux-block.git] / drivers / block / drbd / drbd_nl.c
index ac41aca72cb8b4f85e2cf2601af3afe904ee34bf..2af26fc9528083d23cbde63fbe42128a26757599 100644 (file)
@@ -47,8 +47,8 @@
 int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info);
 
-int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info);
-int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info);
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info);
 
 int drbd_adm_set_role(struct sk_buff *skb, struct genl_info *info);
@@ -75,6 +75,7 @@ int drbd_adm_get_timeout_type(struct sk_buff *skb, struct genl_info *info);
 int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb);
 
 #include <linux/drbd_genl_api.h>
+#include "drbd_nla.h"
 #include <linux/genl_magic_func.h>
 
 /* used blkdev_get_by_path, to claim our meta data device(s) */
@@ -92,7 +93,9 @@ static struct drbd_config_context {
 #define VOLUME_UNSPECIFIED             (-1U)
        /* pointer into the request skb,
         * limited lifetime! */
-       char *conn_name;
+       char *resource_name;
+       struct nlattr *my_addr;
+       struct nlattr *peer_addr;
 
        /* reply buffer */
        struct sk_buff *reply_skb;
@@ -140,7 +143,8 @@ int drbd_msg_put_info(const char *info)
  * If it returns successfully, adm_ctx members are valid.
  */
 #define DRBD_ADM_NEED_MINOR    1
-#define DRBD_ADM_NEED_CONN     2
+#define DRBD_ADM_NEED_RESOURCE 2
+#define DRBD_ADM_NEED_CONNECTION 4
 static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
                unsigned flags)
 {
@@ -151,24 +155,28 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
        memset(&adm_ctx, 0, sizeof(adm_ctx));
 
        /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
-       if (cmd != DRBD_ADM_GET_STATUS
-       && security_netlink_recv(skb, CAP_SYS_ADMIN))
+       if (cmd != DRBD_ADM_GET_STATUS && !capable(CAP_NET_ADMIN))
               return -EPERM;
 
        adm_ctx.reply_skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
-       if (!adm_ctx.reply_skb)
+       if (!adm_ctx.reply_skb) {
+               err = -ENOMEM;
                goto fail;
+       }
 
        adm_ctx.reply_dh = genlmsg_put_reply(adm_ctx.reply_skb,
                                        info, &drbd_genl_family, 0, cmd);
        /* put of a few bytes into a fresh skb of >= 4k will always succeed.
         * but anyways */
-       if (!adm_ctx.reply_dh)
+       if (!adm_ctx.reply_dh) {
+               err = -ENOMEM;
                goto fail;
+       }
 
        adm_ctx.reply_dh->minor = d_in->minor;
        adm_ctx.reply_dh->ret_code = NO_ERROR;
 
+       adm_ctx.volume = VOLUME_UNSPECIFIED;
        if (info->attrs[DRBD_NLA_CFG_CONTEXT]) {
                struct nlattr *nla;
                /* parse and validate only */
@@ -186,32 +194,62 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
 
                /* and assign stuff to the global adm_ctx */
                nla = nested_attr_tb[__nla_type(T_ctx_volume)];
-               adm_ctx.volume = nla ? nla_get_u32(nla) : VOLUME_UNSPECIFIED;
-               nla = nested_attr_tb[__nla_type(T_ctx_conn_name)];
                if (nla)
-                       adm_ctx.conn_name = nla_data(nla);
-       } else
-               adm_ctx.volume = VOLUME_UNSPECIFIED;
+                       adm_ctx.volume = nla_get_u32(nla);
+               nla = nested_attr_tb[__nla_type(T_ctx_resource_name)];
+               if (nla)
+                       adm_ctx.resource_name = nla_data(nla);
+               adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
+               adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
+               if ((adm_ctx.my_addr &&
+                    nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) ||
+                   (adm_ctx.peer_addr &&
+                    nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) {
+                       err = -EINVAL;
+                       goto fail;
+               }
+       }
 
        adm_ctx.minor = d_in->minor;
        adm_ctx.mdev = minor_to_mdev(d_in->minor);
-       adm_ctx.tconn = conn_get_by_name(adm_ctx.conn_name);
+       adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name);
 
        if (!adm_ctx.mdev && (flags & DRBD_ADM_NEED_MINOR)) {
                drbd_msg_put_info("unknown minor");
                return ERR_MINOR_INVALID;
        }
-       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_CONN)) {
-               drbd_msg_put_info("unknown connection");
+       if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) {
+               drbd_msg_put_info("unknown resource");
                return ERR_INVALID_REQUEST;
        }
 
+       if (flags & DRBD_ADM_NEED_CONNECTION) {
+               if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) {
+                       drbd_msg_put_info("no resource name expected");
+                       return ERR_INVALID_REQUEST;
+               }
+               if (adm_ctx.mdev) {
+                       drbd_msg_put_info("no minor number expected");
+                       return ERR_INVALID_REQUEST;
+               }
+               if (adm_ctx.my_addr && adm_ctx.peer_addr)
+                       adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
+                                                         nla_len(adm_ctx.my_addr),
+                                                         nla_data(adm_ctx.peer_addr),
+                                                         nla_len(adm_ctx.peer_addr));
+               if (!adm_ctx.tconn) {
+                       drbd_msg_put_info("unknown connection");
+                       return ERR_INVALID_REQUEST;
+               }
+       }
+
        /* some more paranoia, if the request was over-determined */
        if (adm_ctx.mdev && adm_ctx.tconn &&
            adm_ctx.mdev->tconn != adm_ctx.tconn) {
-               pr_warning("request: minor=%u, conn=%s; but that minor belongs to connection %s\n",
-                               adm_ctx.minor, adm_ctx.conn_name, adm_ctx.mdev->tconn->name);
-               drbd_msg_put_info("minor exists in different connection");
+               pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
+                               adm_ctx.minor, adm_ctx.resource_name,
+                               adm_ctx.mdev->tconn->name);
+               drbd_msg_put_info("minor exists in different resource");
                return ERR_INVALID_REQUEST;
        }
        if (adm_ctx.mdev &&
@@ -229,14 +267,11 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
 fail:
        nlmsg_free(adm_ctx.reply_skb);
        adm_ctx.reply_skb = NULL;
-       return -ENOMEM;
+       return err;
 }
 
 static int drbd_adm_finish(struct genl_info *info, int retcode)
 {
-       struct nlattr *nla;
-       const char *conn_name = NULL;
-
        if (adm_ctx.tconn) {
                kref_put(&adm_ctx.tconn->kref, &conn_destroy);
                adm_ctx.tconn = NULL;
@@ -246,14 +281,6 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
                return -ENOMEM;
 
        adm_ctx.reply_dh->ret_code = retcode;
-
-       nla = info->attrs[DRBD_NLA_CFG_CONTEXT];
-       if (nla) {
-               nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
-               if (nla)
-                       conn_name = nla_data(nla);
-       }
-
        drbd_adm_send_reply(adm_ctx.reply_skb, info);
        return 0;
 }
@@ -261,30 +288,28 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
 static void setup_khelper_env(struct drbd_tconn *tconn, char **envp)
 {
        char *afs;
-       struct net_conf *nc;
 
-       rcu_read_lock();
-       nc = rcu_dereference(tconn->net_conf);
-       if (nc) {
-               switch (((struct sockaddr *)nc->peer_addr)->sa_family) {
-               case AF_INET6:
-                       afs = "ipv6";
-                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
-                                &((struct sockaddr_in6 *)nc->peer_addr)->sin6_addr);
-                       break;
-               case AF_INET:
-                       afs = "ipv4";
-                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
-                       break;
-               default:
-                       afs = "ssocks";
-                       snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
-                                &((struct sockaddr_in *)nc->peer_addr)->sin_addr);
-               }
-               snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
+       /* FIXME: A future version will not allow this case. */
+       if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0)
+               return;
+
+       switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) {
+       case AF_INET6:
+               afs = "ipv6";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
+                        &((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr);
+               break;
+       case AF_INET:
+               afs = "ipv4";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
+               break;
+       default:
+               afs = "ssocks";
+               snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
+                        &((struct sockaddr_in *)&tconn->peer_addr)->sin_addr);
        }
-       rcu_read_unlock();
+       snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
 }
 
 int drbd_khelper(struct drbd_conf *mdev, char *cmd)
@@ -297,11 +322,15 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
                        NULL };
        char mb[12];
        char *argv[] = {usermode_helper, cmd, mb, NULL };
+       struct drbd_tconn *tconn = mdev->tconn;
        struct sib_info sib;
        int ret;
 
+       if (current == tconn->worker.task)
+               set_bit(CALLBACK_PENDING, &tconn->flags);
+
        snprintf(mb, 12, "minor-%d", mdev_to_minor(mdev));
-       setup_khelper_env(mdev->tconn, envp);
+       setup_khelper_env(tconn, envp);
 
        /* The helper may take some time.
         * write out any unsynced meta data changes now */
@@ -311,7 +340,7 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        sib.sib_reason = SIB_HELPER_PRE;
        sib.helper_name = cmd;
        drbd_bcast_event(mdev, &sib);
-       ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+       ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
        if (ret)
                dev_warn(DEV, "helper command: %s %s %s exit code %u (0x%x)\n",
                                usermode_helper, cmd, mb,
@@ -324,23 +353,15 @@ int drbd_khelper(struct drbd_conf *mdev, char *cmd)
        sib.helper_exit_code = ret;
        drbd_bcast_event(mdev, &sib);
 
+       if (current == tconn->worker.task)
+               clear_bit(CALLBACK_PENDING, &tconn->flags);
+
        if (ret < 0) /* Ignore any ERRNOs we got. */
                ret = 0;
 
        return ret;
 }
 
-static void conn_md_sync(struct drbd_tconn *tconn)
-{
-       struct drbd_conf *mdev;
-       int vnr;
-
-       down_read(&drbd_cfg_rwsem);
-       idr_for_each_entry(&tconn->volumes, mdev, vnr)
-               drbd_md_sync(mdev);
-       up_read(&drbd_cfg_rwsem);
-}
-
 int conn_khelper(struct drbd_tconn *tconn, char *cmd)
 {
        char *envp[] = { "HOME=/",
@@ -358,7 +379,7 @@ int conn_khelper(struct drbd_tconn *tconn, char *cmd)
        conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name);
        /* TODO: conn_bcast_event() ?? */
 
-       ret = call_usermodehelper(usermode_helper, argv, envp, 1);
+       ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
        if (ret)
                conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n",
                          usermode_helper, cmd, tconn->name,
@@ -384,7 +405,8 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
        rcu_read_lock();
        idr_for_each_entry(&tconn->volumes, mdev, vnr) {
                if (get_ldev_if_state(mdev, D_CONSISTENT)) {
-                       fp = max_t(enum drbd_fencing_p, fp, mdev->ldev->dc.fencing);
+                       fp = max_t(enum drbd_fencing_p, fp,
+                                  rcu_dereference(mdev->ldev->disk_conf)->fencing);
                        put_ldev(mdev);
                }
        }
@@ -470,7 +492,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
           here, because we might were able to re-establish the connection in the
           meantime. */
        spin_lock_irq(&tconn->req_lock);
-       if (tconn->cstate < C_WF_REPORT_PARAMS)
+       if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags))
                _conn_request_state(tconn, mask, val, CS_VERBOSE);
        spin_unlock_irq(&tconn->req_lock);
 
@@ -590,6 +612,8 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
        /* Wait until nothing is on the fly :) */
        wait_event(mdev->misc_wait, atomic_read(&mdev->ap_pending_cnt) == 0);
 
+       /* FIXME also wait for all pending P_BARRIER_ACK? */
+
        if (new_role == R_SECONDARY) {
                set_disk_ro(mdev->vdisk, true);
                if (get_ldev(mdev)) {
@@ -597,11 +621,11 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                        put_ldev(mdev);
                }
        } else {
-               mutex_lock(&mdev->tconn->net_conf_update);
+               mutex_lock(&mdev->tconn->conf_update);
                nc = mdev->tconn->net_conf;
                if (nc)
-                       nc->want_lose = 0; /* without copy; single bit op is atomic */
-               mutex_unlock(&mdev->tconn->net_conf_update);
+                       nc->discard_my_data = 0; /* without copy; single bit op is atomic */
+               mutex_unlock(&mdev->tconn->conf_update);
 
                set_disk_ro(mdev->vdisk, false);
                if (get_ldev(mdev)) {
@@ -622,7 +646,7 @@ drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, int force)
                /* if this was forced, we should consider sync */
                if (forced)
                        drbd_send_uuids(mdev);
-               drbd_send_state(mdev);
+               drbd_send_current_state(mdev);
        }
 
        drbd_md_sync(mdev);
@@ -678,7 +702,12 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
                                       struct drbd_backing_dev *bdev)
 {
        sector_t md_size_sect = 0;
-       switch (bdev->dc.meta_dev_idx) {
+       int meta_dev_idx;
+
+       rcu_read_lock();
+       meta_dev_idx = rcu_dereference(bdev->disk_conf)->meta_dev_idx;
+
+       switch (meta_dev_idx) {
        default:
                /* v07 style fixed size indexed meta data */
                bdev->md.md_size_sect = MD_RESERVED_SECT;
@@ -713,6 +742,7 @@ static void drbd_md_set_sector_offsets(struct drbd_conf *mdev,
                bdev->md.bm_offset   = -md_size_sect + MD_AL_OFFSET;
                break;
        }
+       rcu_read_unlock();
 }
 
 /* input size is expected to be in KB */
@@ -775,7 +805,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
 enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds_flags flags) __must_hold(local)
 {
        sector_t prev_first_sect, prev_size; /* previous meta location */
-       sector_t la_size;
+       sector_t la_size, u_size;
        sector_t size;
        char ppb[10];
 
@@ -803,7 +833,10 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
        /* TODO: should only be some assert here, not (re)init... */
        drbd_md_set_sector_offsets(mdev, mdev->ldev);
 
-       size = drbd_new_dev_size(mdev, mdev->ldev, flags & DDSF_FORCED);
+       rcu_read_lock();
+       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       rcu_read_unlock();
+       size = drbd_new_dev_size(mdev, mdev->ldev, u_size, flags & DDSF_FORCED);
 
        if (drbd_get_capacity(mdev->this_bdev) != size ||
            drbd_bm_capacity(mdev) != size) {
@@ -844,8 +877,8 @@ enum determine_dev_size drbd_determine_dev_size(struct drbd_conf *mdev, enum dds
                         la_size_changed && md_moved ? "size changed and md moved" :
                         la_size_changed ? "size changed" : "md moved");
                /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
-               err = drbd_bitmap_io(mdev, &drbd_bm_write,
-                               "size changed", BM_LOCKED_MASK);
+               err = drbd_bitmap_io(mdev, md_moved ? &drbd_bm_write_all : &drbd_bm_write,
+                                    "size changed", BM_LOCKED_MASK);
                if (err) {
                        rv = dev_size_error;
                        goto out;
@@ -866,12 +899,12 @@ out:
 }
 
 sector_t
-drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
+drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev,
+                 sector_t u_size, int assume_peer_has_space)
 {
        sector_t p_size = mdev->p_size;   /* partner's disk size. */
        sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
        sector_t m_size; /* my size */
-       sector_t u_size = bdev->dc.disk_size; /* size requested by user. */
        sector_t size = 0;
 
        m_size = drbd_get_max_capacity(bdev);
@@ -927,9 +960,6 @@ static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
        unsigned int in_use;
        int i;
 
-       if (!expect(dc->al_extents >= DRBD_AL_EXTENTS_MIN))
-               dc->al_extents = DRBD_AL_EXTENTS_MIN;
-
        if (mdev->act_log &&
            mdev->act_log->nr_elements == dc->al_extents)
                return 0;
@@ -971,14 +1001,16 @@ static int drbd_check_al_size(struct drbd_conf *mdev, struct disk_conf *dc)
 static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
 {
        struct request_queue * const q = mdev->rq_queue;
-       int max_hw_sectors = max_bio_size >> 9;
-       int max_segments = 0;
+       unsigned int max_hw_sectors = max_bio_size >> 9;
+       unsigned int max_segments = 0;
 
        if (get_ldev_if_state(mdev, D_ATTACHING)) {
                struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
 
                max_hw_sectors = min(queue_max_hw_sectors(b), max_bio_size >> 9);
-               max_segments = mdev->ldev->dc.max_bio_bvecs;
+               rcu_read_lock();
+               max_segments = rcu_dereference(mdev->ldev->disk_conf)->max_bio_bvecs;
+               rcu_read_unlock();
                put_ldev(mdev);
        }
 
@@ -1005,7 +1037,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
 
 void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
 {
-       int now, new, local, peer;
+       unsigned int now, new, local, peer;
 
        now = queue_max_hw_sectors(mdev->rq_queue) << 9;
        local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
@@ -1016,23 +1048,27 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
                mdev->local_max_bio_size = local;
                put_ldev(mdev);
        }
+       local = min(local, DRBD_MAX_BIO_SIZE);
 
        /* We may ignore peer limits if the peer is modern enough.
           Because new from 8.3.8 onwards the peer can use multiple
           BIOs for a single peer_request */
        if (mdev->state.conn >= C_CONNECTED) {
                if (mdev->tconn->agreed_pro_version < 94)
-                       peer = mdev->peer_max_bio_size;
+                       peer = min( mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
+                       /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
                else if (mdev->tconn->agreed_pro_version == 94)
                        peer = DRBD_MAX_SIZE_H80_PACKET;
-               else /* drbd 8.3.8 onwards */
+               else if (mdev->tconn->agreed_pro_version < 100)
+                       peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
+               else
                        peer = DRBD_MAX_BIO_SIZE;
        }
 
-       new = min_t(int, local, peer);
+       new = min(local, peer);
 
        if (mdev->state.role == R_PRIMARY && new < now)
-               dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now);
+               dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
 
        if (new != now)
                dev_info(DEV, "max BIO size = %u\n", new);
@@ -1050,10 +1086,17 @@ static void conn_reconfig_start(struct drbd_tconn *tconn)
 /* if still unconfigured, stops worker again. */
 static void conn_reconfig_done(struct drbd_tconn *tconn)
 {
+       bool stop_threads;
        spin_lock_irq(&tconn->req_lock);
-       if (conn_all_vols_unconf(tconn))
-               drbd_thread_stop_nowait(&tconn->worker);
+       stop_threads = conn_all_vols_unconf(tconn) &&
+               tconn->cstate == C_STANDALONE;
        spin_unlock_irq(&tconn->req_lock);
+       if (stop_threads) {
+               /* asender is implicitly stopped by receiver
+                * in conn_disconnect() */
+               drbd_thread_stop(&tconn->receiver);
+               drbd_thread_stop(&tconn->worker);
+       }
 }
 
 /* Make sure IO is suspended before calling this function(). */
@@ -1084,84 +1127,24 @@ static bool should_set_defaults(struct genl_info *info)
        return 0 != (flags & DRBD_GENL_F_SET_DEFAULTS);
 }
 
-/* Maybe we should we generate these functions
- * from the drbd_genl.h magic as well?
- * That way we would not "accidentally forget" to add defaults here. */
-
-#define RESET_ARRAY_FIELD(field) do { \
-       memset(field, 0, sizeof(field)); \
-       field ## _len = 0; \
-} while (0)
-void drbd_set_res_opts_default(struct res_opts *r)
-{
-       RESET_ARRAY_FIELD(r->cpu_mask);
-       r->on_no_data  = DRBD_ON_NO_DATA_DEF;
-}
-
-static void drbd_set_net_conf_defaults(struct net_conf *nc)
-{
-       /* Do NOT (re)set those fields marked as GENLA_F_INVARIANT
-        * in drbd_genl.h, they can only be change with disconnect/reconnect */
-       RESET_ARRAY_FIELD(nc->shared_secret);
-
-       RESET_ARRAY_FIELD(nc->cram_hmac_alg);
-       RESET_ARRAY_FIELD(nc->integrity_alg);
-       RESET_ARRAY_FIELD(nc->verify_alg);
-       RESET_ARRAY_FIELD(nc->csums_alg);
-#undef RESET_ARRAY_FIELD
-
-       nc->wire_protocol = DRBD_PROTOCOL_DEF;
-       nc->try_connect_int = DRBD_CONNECT_INT_DEF;
-       nc->timeout = DRBD_TIMEOUT_DEF;
-       nc->ping_int = DRBD_PING_INT_DEF;
-       nc->ping_timeo = DRBD_PING_TIMEO_DEF;
-       nc->sndbuf_size = DRBD_SNDBUF_SIZE_DEF;
-       nc->rcvbuf_size = DRBD_RCVBUF_SIZE_DEF;
-       nc->ko_count = DRBD_KO_COUNT_DEF;
-       nc->max_buffers = DRBD_MAX_BUFFERS_DEF;
-       nc->max_epoch_size = DRBD_MAX_EPOCH_SIZE_DEF;
-       nc->unplug_watermark = DRBD_UNPLUG_WATERMARK_DEF;
-       nc->after_sb_0p = DRBD_AFTER_SB_0P_DEF;
-       nc->after_sb_1p = DRBD_AFTER_SB_1P_DEF;
-       nc->after_sb_2p = DRBD_AFTER_SB_2P_DEF;
-       nc->rr_conflict = DRBD_RR_CONFLICT_DEF;
-       nc->on_congestion = DRBD_ON_CONGESTION_DEF;
-       nc->cong_fill = DRBD_CONG_FILL_DEF;
-       nc->cong_extents = DRBD_CONG_EXTENTS_DEF;
-       nc->two_primaries = 0;
-       nc->no_cork = 0;
-       nc->always_asbp = 0;
-       nc->use_rle = 0;
-}
-
-static void drbd_set_disk_conf_defaults(struct disk_conf *dc)
-{
-       /* Do NOT (re)set those fields marked as GENLA_F_INVARIANT
-        * in drbd_genl.h, they can only be change with detach/reattach */
-       dc->on_io_error = DRBD_ON_IO_ERROR_DEF;
-       dc->fencing = DRBD_FENCING_DEF;
-       dc->resync_rate = DRBD_RATE_DEF;
-       dc->resync_after = DRBD_AFTER_DEF;
-       dc->al_extents = DRBD_AL_EXTENTS_DEF;
-       dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_DEF;
-       dc->c_delay_target = DRBD_C_DELAY_TARGET_DEF;
-       dc->c_fill_target = DRBD_C_FILL_TARGET_DEF;
-       dc->c_max_rate = DRBD_C_MAX_RATE_DEF;
-       dc->c_min_rate = DRBD_C_MIN_RATE_DEF;
-       dc->no_disk_barrier = 0;
-       dc->no_disk_flush = 0;
-       dc->no_disk_drain = 0;
-       dc->no_md_flush = 0;
-}
+static void enforce_disk_conf_limits(struct disk_conf *dc)
+{
+       if (dc->al_extents < DRBD_AL_EXTENTS_MIN)
+               dc->al_extents = DRBD_AL_EXTENTS_MIN;
+       if (dc->al_extents > DRBD_AL_EXTENTS_MAX)
+               dc->al_extents = DRBD_AL_EXTENTS_MAX;
 
+       if (dc->c_plan_ahead > DRBD_C_PLAN_AHEAD_MAX)
+               dc->c_plan_ahead = DRBD_C_PLAN_AHEAD_MAX;
+}
 
 int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
        struct drbd_conf *mdev;
-       struct disk_conf *ndc; /* new disk conf */
+       struct disk_conf *new_disk_conf, *old_disk_conf;
+       struct fifo_buffer *old_plan = NULL, *new_plan = NULL;
        int err, fifo_size;
-       int *rs_plan_s = NULL;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -1178,89 +1161,100 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-/* FIXME freeze IO, cluster wide.
- *
- * We should make sure no-one uses
- * some half-updated struct when we
- * assign it later. */
-
-       ndc = kmalloc(sizeof(*ndc), GFP_KERNEL);
-       if (!ndc) {
+       new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+       if (!new_disk_conf) {
                retcode = ERR_NOMEM;
                goto fail;
        }
 
-       memcpy(ndc, &mdev->ldev->dc, sizeof(*ndc));
+       mutex_lock(&mdev->tconn->conf_update);
+       old_disk_conf = mdev->ldev->disk_conf;
+       *new_disk_conf = *old_disk_conf;
        if (should_set_defaults(info))
-               drbd_set_disk_conf_defaults(ndc);
+               set_disk_conf_defaults(new_disk_conf);
 
-       err = disk_conf_from_attrs_for_change(ndc, info);
-       if (err) {
+       err = disk_conf_from_attrs_for_change(new_disk_conf, info);
+       if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
        }
 
-       if (!expect(ndc->resync_rate >= 1))
-               ndc->resync_rate = 1;
+       if (!expect(new_disk_conf->resync_rate >= 1))
+               new_disk_conf->resync_rate = 1;
 
-       /* clip to allowed range */
-       if (!expect(ndc->al_extents >= DRBD_AL_EXTENTS_MIN))
-               ndc->al_extents = DRBD_AL_EXTENTS_MIN;
-       if (!expect(ndc->al_extents <= DRBD_AL_EXTENTS_MAX))
-               ndc->al_extents = DRBD_AL_EXTENTS_MAX;
+       enforce_disk_conf_limits(new_disk_conf);
 
-       /* most sanity checks done, try to assign the new sync-after
-        * dependency.  need to hold the global lock in there,
-        * to avoid a race in the dependency loop check. */
-       retcode = drbd_alter_sa(mdev, ndc->resync_after);
-       if (retcode != NO_ERROR)
-               goto fail;
-
-       fifo_size = (ndc->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
-       if (fifo_size != mdev->rs_plan_s.size && fifo_size > 0) {
-               rs_plan_s   = kzalloc(sizeof(int) * fifo_size, GFP_KERNEL);
-               if (!rs_plan_s) {
+       fifo_size = (new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ;
+       if (fifo_size != mdev->rs_plan_s->size) {
+               new_plan = fifo_alloc(fifo_size);
+               if (!new_plan) {
                        dev_err(DEV, "kmalloc of fifo_buffer failed");
                        retcode = ERR_NOMEM;
-                       goto fail;
+                       goto fail_unlock;
                }
        }
 
-       if (fifo_size != mdev->rs_plan_s.size) {
-               kfree(mdev->rs_plan_s.values);
-               mdev->rs_plan_s.values = rs_plan_s;
-               mdev->rs_plan_s.size   = fifo_size;
-               mdev->rs_planed = 0;
-               rs_plan_s = NULL;
-       }
-
+       drbd_suspend_io(mdev);
        wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
        drbd_al_shrink(mdev);
-       err = drbd_check_al_size(mdev, ndc);
+       err = drbd_check_al_size(mdev, new_disk_conf);
        lc_unlock(mdev->act_log);
        wake_up(&mdev->al_wait);
+       drbd_resume_io(mdev);
 
        if (err) {
                retcode = ERR_NOMEM;
-               goto fail;
+               goto fail_unlock;
        }
 
-       /* FIXME
-        * To avoid someone looking at a half-updated struct, we probably
-        * should have a rw-semaphor on net_conf and disk_conf.
-        */
-       mdev->ldev->dc = *ndc;
+       write_lock_irq(&global_state_lock);
+       retcode = drbd_resync_after_valid(mdev, new_disk_conf->resync_after);
+       if (retcode == NO_ERROR) {
+               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+               drbd_resync_after_changed(mdev);
+       }
+       write_unlock_irq(&global_state_lock);
 
-       drbd_md_sync(mdev);
+       if (retcode != NO_ERROR)
+               goto fail_unlock;
 
+       if (new_plan) {
+               old_plan = mdev->rs_plan_s;
+               rcu_assign_pointer(mdev->rs_plan_s, new_plan);
+       }
+
+       mutex_unlock(&mdev->tconn->conf_update);
+
+       if (new_disk_conf->al_updates)
+               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+       else
+               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+       if (new_disk_conf->md_flushes)
+               clear_bit(MD_NO_FUA, &mdev->flags);
+       else
+               set_bit(MD_NO_FUA, &mdev->flags);
+
+       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
+
+       drbd_md_sync(mdev);
 
        if (mdev->state.conn >= C_CONNECTED)
                drbd_send_sync_param(mdev);
 
+       synchronize_rcu();
+       kfree(old_disk_conf);
+       kfree(old_plan);
+       mod_timer(&mdev->request_timer, jiffies + HZ);
+       goto success;
+
+fail_unlock:
+       mutex_unlock(&mdev->tconn->conf_update);
  fail:
+       kfree(new_disk_conf);
+       kfree(new_plan);
+success:
        put_ldev(mdev);
-       kfree(ndc);
-       kfree(rs_plan_s);
  out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -1275,12 +1269,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        sector_t max_possible_sectors;
        sector_t min_md_device_sectors;
        struct drbd_backing_dev *nbc = NULL; /* new_backing_conf */
+       struct disk_conf *new_disk_conf = NULL;
        struct block_device *bdev;
        struct lru_cache *resync_lru = NULL;
+       struct fifo_buffer *new_plan = NULL;
        union drbd_state ns, os;
        enum drbd_state_rv rv;
        struct net_conf *nc;
-       int cp_discovered = 0;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -1302,23 +1297,48 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * to realize a "hot spare" feature (not that I'd recommend that) */
        wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
 
+       /* make sure there is no leftover from previous force-detach attempts */
+       clear_bit(FORCE_DETACH, &mdev->flags);
+       clear_bit(WAS_IO_ERROR, &mdev->flags);
+       clear_bit(WAS_READ_ERROR, &mdev->flags);
+
+       /* and no leftover from previously aborted resync or verify, either */
+       mdev->rs_total = 0;
+       mdev->rs_failed = 0;
+       atomic_set(&mdev->rs_pending_cnt, 0);
+
        /* allocation not in the IO path, drbdsetup context */
        nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
        if (!nbc) {
                retcode = ERR_NOMEM;
                goto fail;
        }
+       spin_lock_init(&nbc->md.uuid_lock);
 
-       drbd_set_disk_conf_defaults(&nbc->dc);
+       new_disk_conf = kzalloc(sizeof(struct disk_conf), GFP_KERNEL);
+       if (!new_disk_conf) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+       nbc->disk_conf = new_disk_conf;
 
-       err = disk_conf_from_attrs(&nbc->dc, info);
+       set_disk_conf_defaults(new_disk_conf);
+       err = disk_conf_from_attrs(new_disk_conf, info);
        if (err) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       if ((int)nbc->dc.meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
+       enforce_disk_conf_limits(new_disk_conf);
+
+       new_plan = fifo_alloc((new_disk_conf->c_plan_ahead * 10 * SLEEP_TIME) / HZ);
+       if (!new_plan) {
+               retcode = ERR_NOMEM;
+               goto fail;
+       }
+
+       if (new_disk_conf->meta_dev_idx < DRBD_MD_INDEX_FLEX_INT) {
                retcode = ERR_MD_IDX_INVALID;
                goto fail;
        }
@@ -1326,7 +1346,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        rcu_read_lock();
        nc = rcu_dereference(mdev->tconn->net_conf);
        if (nc) {
-               if (nbc->dc.fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
+               if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
                        rcu_read_unlock();
                        retcode = ERR_STONITH_AND_PROT_A;
                        goto fail;
@@ -1334,10 +1354,10 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        }
        rcu_read_unlock();
 
-       bdev = blkdev_get_by_path(nbc->dc.backing_dev,
+       bdev = blkdev_get_by_path(new_disk_conf->backing_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL, mdev);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.backing_dev,
+               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->backing_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_DISK;
                goto fail;
@@ -1352,12 +1372,12 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * should check it for you already; but if you don't, or
         * someone fooled it, we need to double check here)
         */
-       bdev = blkdev_get_by_path(nbc->dc.meta_dev,
+       bdev = blkdev_get_by_path(new_disk_conf->meta_dev,
                                  FMODE_READ | FMODE_WRITE | FMODE_EXCL,
-                                 ((int)nbc->dc.meta_dev_idx < 0) ?
+                                 (new_disk_conf->meta_dev_idx < 0) ?
                                  (void *)mdev : (void *)drbd_m_holder);
        if (IS_ERR(bdev)) {
-               dev_err(DEV, "open(\"%s\") failed with %ld\n", nbc->dc.meta_dev,
+               dev_err(DEV, "open(\"%s\") failed with %ld\n", new_disk_conf->meta_dev,
                        PTR_ERR(bdev));
                retcode = ERR_OPEN_MD_DISK;
                goto fail;
@@ -1365,8 +1385,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        nbc->md_bdev = bdev;
 
        if ((nbc->backing_bdev == nbc->md_bdev) !=
-           (nbc->dc.meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
-            nbc->dc.meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
+           (new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
+            new_disk_conf->meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)) {
                retcode = ERR_MD_IDX_INVALID;
                goto fail;
        }
@@ -1382,25 +1402,25 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        /* RT - for drbd_get_max_capacity() DRBD_MD_INDEX_FLEX_INT */
        drbd_md_set_sector_offsets(mdev, nbc);
 
-       if (drbd_get_max_capacity(nbc) < nbc->dc.disk_size) {
+       if (drbd_get_max_capacity(nbc) < new_disk_conf->disk_size) {
                dev_err(DEV, "max capacity %llu smaller than disk size %llu\n",
                        (unsigned long long) drbd_get_max_capacity(nbc),
-                       (unsigned long long) nbc->dc.disk_size);
-               retcode = ERR_DISK_TO_SMALL;
+                       (unsigned long long) new_disk_conf->disk_size);
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
-       if ((int)nbc->dc.meta_dev_idx < 0) {
+       if (new_disk_conf->meta_dev_idx < 0) {
                max_possible_sectors = DRBD_MAX_SECTORS_FLEX;
                /* at least one MB, otherwise it does not make sense */
                min_md_device_sectors = (2<<10);
        } else {
                max_possible_sectors = DRBD_MAX_SECTORS;
-               min_md_device_sectors = MD_RESERVED_SECT * (nbc->dc.meta_dev_idx + 1);
+               min_md_device_sectors = MD_RESERVED_SECT * (new_disk_conf->meta_dev_idx + 1);
        }
 
        if (drbd_get_capacity(nbc->md_bdev) < min_md_device_sectors) {
-               retcode = ERR_MD_DISK_TO_SMALL;
+               retcode = ERR_MD_DISK_TOO_SMALL;
                dev_warn(DEV, "refusing attach: md-device too small, "
                     "at least %llu sectors needed for this meta-disk type\n",
                     (unsigned long long) min_md_device_sectors);
@@ -1411,7 +1431,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
         * (we may currently be R_PRIMARY with no local disk...) */
        if (drbd_get_max_capacity(nbc) <
            drbd_get_capacity(mdev->this_bdev)) {
-               retcode = ERR_DISK_TO_SMALL;
+               retcode = ERR_DISK_TOO_SMALL;
                goto fail;
        }
 
@@ -1421,13 +1441,19 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                dev_warn(DEV, "==> truncating very big lower level device "
                        "to currently maximum possible %llu sectors <==\n",
                        (unsigned long long) max_possible_sectors);
-               if ((int)nbc->dc.meta_dev_idx >= 0)
+               if (new_disk_conf->meta_dev_idx >= 0)
                        dev_warn(DEV, "==>> using internal or flexible "
                                      "meta data may help <<==\n");
        }
 
        drbd_suspend_io(mdev);
        /* also wait for the last barrier ack. */
+       /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
+        * We need a way to either ignore barrier acks for barriers sent before a device
+        * was attached, or a way to wait for all pending barrier acks to come in.
+        * As barriers are counted per resource,
+        * we'd need to suspend io on all devices of a resource.
+        */
        wait_event(mdev->misc_wait, !atomic_read(&mdev->ap_pending_cnt) || drbd_suspended(mdev));
        /* and for any other previously queued work */
        drbd_flush_workqueue(mdev);
@@ -1464,30 +1490,25 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        }
 
        /* Since we are diskless, fix the activity log first... */
-       if (drbd_check_al_size(mdev, &nbc->dc)) {
+       if (drbd_check_al_size(mdev, new_disk_conf)) {
                retcode = ERR_NOMEM;
                goto force_diskless_dec;
        }
 
        /* Prevent shrinking of consistent devices ! */
        if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
-           drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
+           drbd_new_dev_size(mdev, nbc, nbc->disk_conf->disk_size, 0) < nbc->md.la_size_sect) {
                dev_warn(DEV, "refusing to truncate a consistent device\n");
-               retcode = ERR_DISK_TO_SMALL;
-               goto force_diskless_dec;
-       }
-
-       if (!drbd_al_read_log(mdev, nbc)) {
-               retcode = ERR_IO_MD_DISK;
+               retcode = ERR_DISK_TOO_SMALL;
                goto force_diskless_dec;
        }
 
        /* Reset the "barriers don't work" bits here, then force meta data to
         * be written, to ensure we determine if barriers are supported. */
-       if (nbc->dc.no_md_flush)
-               set_bit(MD_NO_FUA, &mdev->flags);
-       else
+       if (new_disk_conf->md_flushes)
                clear_bit(MD_NO_FUA, &mdev->flags);
+       else
+               set_bit(MD_NO_FUA, &mdev->flags);
 
        /* Point of no return reached.
         * Devices and memory are no longer released by error cleanup below.
@@ -1496,11 +1517,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        D_ASSERT(mdev->ldev == NULL);
        mdev->ldev = nbc;
        mdev->resync = resync_lru;
+       mdev->rs_plan_s = new_plan;
        nbc = NULL;
        resync_lru = NULL;
+       new_disk_conf = NULL;
+       new_plan = NULL;
 
-       mdev->write_ordering = WO_bdev_flush;
-       drbd_bump_write_ordering(mdev, WO_bdev_flush);
+       drbd_bump_write_ordering(mdev->tconn, WO_bdev_flush);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
                set_bit(CRASHED_PRIMARY, &mdev->flags);
@@ -1508,10 +1531,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                clear_bit(CRASHED_PRIMARY, &mdev->flags);
 
        if (drbd_md_test_flag(mdev->ldev, MDF_PRIMARY_IND) &&
-           !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod)) {
+           !(mdev->state.role == R_PRIMARY && mdev->tconn->susp_nod))
                set_bit(CRASHED_PRIMARY, &mdev->flags);
-               cp_discovered = 1;
-       }
 
        mdev->send_cnt = 0;
        mdev->recv_cnt = 0;
@@ -1547,7 +1568,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        } else if (dd == grew)
                set_bit(RESYNC_AFTER_NEG, &mdev->flags);
 
-       if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC)) {
+       if (drbd_md_test_flag(mdev->ldev, MDF_FULL_SYNC) ||
+           (test_bit(CRASHED_PRIMARY, &mdev->flags) &&
+            drbd_md_test_flag(mdev->ldev, MDF_AL_DISABLED))) {
                dev_info(DEV, "Assuming that all blocks are out of sync "
                     "(aka FullSync)\n");
                if (drbd_bitmap_io(mdev, &drbd_bmio_set_n_write,
@@ -1563,15 +1586,6 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                }
        }
 
-       if (cp_discovered) {
-               drbd_al_apply_to_bm(mdev);
-               if (drbd_bitmap_io(mdev, &drbd_bm_write,
-                       "crashed primary apply AL", BM_LOCKED_MASK)) {
-                       retcode = ERR_IO_MD_DISK;
-                       goto force_diskless_dec;
-               }
-       }
-
        if (_drbd_bm_total_weight(mdev) == drbd_bm_bits(mdev))
                drbd_suspend_al(mdev); /* IO is still suspended here... */
 
@@ -1595,8 +1609,9 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        if (drbd_md_test_flag(mdev->ldev, MDF_PEER_OUT_DATED))
                ns.pdsk = D_OUTDATED;
 
-       if ( ns.disk == D_CONSISTENT &&
-           (ns.pdsk == D_OUTDATED || mdev->ldev->dc.fencing == FP_DONT_CARE))
+       rcu_read_lock();
+       if (ns.disk == D_CONSISTENT &&
+           (ns.pdsk == D_OUTDATED || rcu_dereference(mdev->ldev->disk_conf)->fencing == FP_DONT_CARE))
                ns.disk = D_UP_TO_DATE;
 
        /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
@@ -1604,6 +1619,13 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
           this point, because drbd_request_state() modifies these
           flags. */
 
+       if (rcu_dereference(mdev->ldev->disk_conf)->al_updates)
+               mdev->ldev->md.flags &= ~MDF_AL_DISABLED;
+       else
+               mdev->ldev->md.flags |= MDF_AL_DISABLED;
+
+       rcu_read_unlock();
+
        /* In case we are C_CONNECTED postpone any decision on the new disk
           state after the negotiation phase. */
        if (mdev->state.conn == C_CONNECTED) {
@@ -1624,6 +1646,8 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
        if (rv < SS_SUCCESS)
                goto force_diskless_dec;
 
+       mod_timer(&mdev->request_timer, jiffies + HZ);
+
        if (mdev->state.role == R_PRIMARY)
                mdev->ldev->md.uuid[UI_CURRENT] |=  (u64)1;
        else
@@ -1641,7 +1665,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
  force_diskless_dec:
        put_ldev(mdev);
  force_diskless:
-       drbd_force_state(mdev, NS(disk, D_FAILED));
+       drbd_force_state(mdev, NS(disk, D_DISKLESS));
        drbd_md_sync(mdev);
  fail:
        conn_reconfig_done(mdev->tconn);
@@ -1654,22 +1678,40 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
                                   FMODE_READ | FMODE_WRITE | FMODE_EXCL);
                kfree(nbc);
        }
+       kfree(new_disk_conf);
        lc_destroy(resync_lru);
+       kfree(new_plan);
 
  finish:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-static int adm_detach(struct drbd_conf *mdev)
+static int adm_detach(struct drbd_conf *mdev, int force)
 {
        enum drbd_state_rv retcode;
+       int ret;
+
+       if (force) {
+               set_bit(FORCE_DETACH, &mdev->flags);
+               drbd_force_state(mdev, NS(disk, D_FAILED));
+               retcode = SS_SUCCESS;
+               goto out;
+       }
+
        drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
-       retcode = drbd_request_state(mdev, NS(disk, D_DISKLESS));
-       wait_event(mdev->misc_wait,
-                       mdev->state.disk != D_DISKLESS ||
-                       !atomic_read(&mdev->local_cnt));
+       drbd_md_get_buffer(mdev); /* make sure there is no in-flight meta-data IO */
+       retcode = drbd_request_state(mdev, NS(disk, D_FAILED));
+       drbd_md_put_buffer(mdev);
+       /* D_FAILED will transition to DISKLESS. */
+       ret = wait_event_interruptible(mdev->misc_wait,
+                       mdev->state.disk != D_FAILED);
        drbd_resume_io(mdev);
+       if ((int)retcode == (int)SS_IS_DISKLESS)
+               retcode = SS_NOTHING_TO_DO;
+       if (ret)
+               retcode = ERR_INTR;
+out:
        return retcode;
 }
 
@@ -1681,6 +1723,8 @@ static int adm_detach(struct drbd_conf *mdev)
 int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
+       struct detach_parms parms = { };
+       int err;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -1688,7 +1732,16 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = adm_detach(adm_ctx.mdev);
+       if (info->attrs[DRBD_NLA_DETACH_PARMS]) {
+               err = detach_parms_from_attrs(&parms, info);
+               if (err) {
+                       retcode = ERR_MANDATORY_TAG;
+                       drbd_msg_put_info(from_attrs_err_to_txt(err));
+                       goto out;
+               }
+       }
+
+       retcode = adm_detach(adm_ctx.mdev, parms.force_detach);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -1740,10 +1793,21 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
        struct drbd_conf *mdev;
        int i;
 
-       if (old_conf && tconn->agreed_pro_version < 100 &&
-           tconn->cstate == C_WF_REPORT_PARAMS &&
-           new_conf->wire_protocol != old_conf->wire_protocol)
-               return ERR_NEED_APV_100;
+       if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) {
+               if (new_conf->wire_protocol != old_conf->wire_protocol)
+                       return ERR_NEED_APV_100;
+
+               if (new_conf->two_primaries != old_conf->two_primaries)
+                       return ERR_NEED_APV_100;
+
+               if (strcmp(new_conf->integrity_alg, old_conf->integrity_alg))
+                       return ERR_NEED_APV_100;
+       }
+
+       if (!new_conf->two_primaries &&
+           conn_highest_role(tconn) == R_PRIMARY &&
+           conn_highest_peer(tconn) == R_PRIMARY)
+               return ERR_NEED_ALLOW_TWO_PRI;
 
        if (new_conf->two_primaries &&
            (new_conf->wire_protocol != DRBD_PROT_C))
@@ -1751,13 +1815,13 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
 
        idr_for_each_entry(&tconn->volumes, mdev, i) {
                if (get_ldev(mdev)) {
-                       enum drbd_fencing_p fp = mdev->ldev->dc.fencing;
+                       enum drbd_fencing_p fp = rcu_dereference(mdev->ldev->disk_conf)->fencing;
                        put_ldev(mdev);
                        if (new_conf->wire_protocol == DRBD_PROT_A && fp == FP_STONITH)
                                return ERR_STONITH_AND_PROT_A;
                }
-               if (mdev->state.role == R_PRIMARY && new_conf->want_lose)
-                       return ERR_DISCARD;
+               if (mdev->state.role == R_PRIMARY && new_conf->discard_my_data)
+                       return ERR_DISCARD_IMPOSSIBLE;
        }
 
        if (new_conf->on_congestion != OC_BLOCK && new_conf->wire_protocol != DRBD_PROT_A)
@@ -1792,14 +1856,11 @@ struct crypto {
        struct crypto_hash *verify_tfm;
        struct crypto_hash *csums_tfm;
        struct crypto_hash *cram_hmac_tfm;
-       struct crypto_hash *integrity_w_tfm;
-       struct crypto_hash *integrity_r_tfm;
-       void *int_dig_in;
-       void *int_dig_vv;
+       struct crypto_hash *integrity_tfm;
 };
 
 static int
-alloc_tfm(struct crypto_hash **tfm, char *tfm_name, int err_alg, int err_nd)
+alloc_hash(struct crypto_hash **tfm, char *tfm_name, int err_alg)
 {
        if (!tfm_name[0])
                return NO_ERROR;
@@ -1810,9 +1871,6 @@ alloc_tfm(struct crypto_hash **tfm, char *tfm_name, int err_alg, int err_nd)
                return err_alg;
        }
 
-       if (!drbd_crypto_is_hash(crypto_hash_tfm(*tfm)))
-               return err_nd;
-
        return NO_ERROR;
 }
 
@@ -1821,39 +1879,25 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
 {
        char hmac_name[CRYPTO_MAX_ALG_NAME];
        enum drbd_ret_code rv;
-       int hash_size;
 
-       rv = alloc_tfm(&crypto->csums_tfm, new_conf->csums_alg,
-                      ERR_CSUMS_ALG, ERR_CSUMS_ALG_ND);
-       if (rv != NO_ERROR)
-               return rv;
-       rv = alloc_tfm(&crypto->verify_tfm, new_conf->verify_alg,
-                      ERR_VERIFY_ALG, ERR_VERIFY_ALG_ND);
+       rv = alloc_hash(&crypto->csums_tfm, new_conf->csums_alg,
+                      ERR_CSUMS_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_tfm(&crypto->integrity_w_tfm, new_conf->integrity_alg,
-                      ERR_INTEGRITY_ALG, ERR_INTEGRITY_ALG_ND);
+       rv = alloc_hash(&crypto->verify_tfm, new_conf->verify_alg,
+                      ERR_VERIFY_ALG);
        if (rv != NO_ERROR)
                return rv;
-       rv = alloc_tfm(&crypto->integrity_r_tfm, new_conf->integrity_alg,
-                      ERR_INTEGRITY_ALG, ERR_INTEGRITY_ALG_ND);
+       rv = alloc_hash(&crypto->integrity_tfm, new_conf->integrity_alg,
+                      ERR_INTEGRITY_ALG);
        if (rv != NO_ERROR)
                return rv;
        if (new_conf->cram_hmac_alg[0] != 0) {
                snprintf(hmac_name, CRYPTO_MAX_ALG_NAME, "hmac(%s)",
                         new_conf->cram_hmac_alg);
 
-               rv = alloc_tfm(&crypto->cram_hmac_tfm, hmac_name,
-                              ERR_AUTH_ALG, ERR_AUTH_ALG_ND);
-       }
-       if (crypto->integrity_w_tfm) {
-               hash_size = crypto_hash_digestsize(crypto->integrity_w_tfm);
-               crypto->int_dig_in = kmalloc(hash_size, GFP_KERNEL);
-               if (!crypto->int_dig_in)
-                       return ERR_NOMEM;
-               crypto->int_dig_vv = kmalloc(hash_size, GFP_KERNEL);
-               if (!crypto->int_dig_vv)
-                       return ERR_NOMEM;
+               rv = alloc_hash(&crypto->cram_hmac_tfm, hmac_name,
+                              ERR_AUTH_ALG);
        }
 
        return rv;
@@ -1861,11 +1905,8 @@ alloc_crypto(struct crypto *crypto, struct net_conf *new_conf)
 
 static void free_crypto(struct crypto *crypto)
 {
-       kfree(crypto->int_dig_in);
-       kfree(crypto->int_dig_vv);
        crypto_free_hash(crypto->cram_hmac_tfm);
-       crypto_free_hash(crypto->integrity_w_tfm);
-       crypto_free_hash(crypto->integrity_r_tfm);
+       crypto_free_hash(crypto->integrity_tfm);
        crypto_free_hash(crypto->csums_tfm);
        crypto_free_hash(crypto->verify_tfm);
 }
@@ -1880,7 +1921,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        int rsr; /* re-sync running */
        struct crypto crypto = { };
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
@@ -1896,7 +1937,8 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
 
        conn_reconfig_start(tconn);
 
-       mutex_lock(&tconn->net_conf_update);
+       mutex_lock(&tconn->data.mutex);
+       mutex_lock(&tconn->conf_update);
        old_conf = tconn->net_conf;
 
        if (!old_conf) {
@@ -1907,10 +1949,10 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
 
        *new_conf = *old_conf;
        if (should_set_defaults(info))
-               drbd_set_net_conf_defaults(new_conf);
+               set_net_conf_defaults(new_conf);
 
        err = net_conf_from_attrs_for_change(new_conf, info);
-       if (err) {
+       if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
@@ -1951,21 +1993,17 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
                crypto.verify_tfm = NULL;
        }
 
-       /* FIXME can not assign these so bluntly while we have ongoing IO */
-       kfree(tconn->int_dig_in);
-       tconn->int_dig_in = crypto.int_dig_in;
-       kfree(tconn->int_dig_vv);
-       tconn->int_dig_vv = crypto.int_dig_vv;
-       crypto_free_hash(tconn->integrity_w_tfm);
-       tconn->integrity_w_tfm = crypto.integrity_w_tfm;
-       crypto_free_hash(tconn->integrity_r_tfm);
-       tconn->integrity_r_tfm = crypto.integrity_r_tfm;
+       crypto_free_hash(tconn->integrity_tfm);
+       tconn->integrity_tfm = crypto.integrity_tfm;
+       if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100)
+               /* Do this without trying to take tconn->data.mutex again.  */
+               __drbd_send_protocol(tconn, P_PROTOCOL_UPDATE);
 
-       /* FIXME Changing cram_hmac while the connection is established is useless */
        crypto_free_hash(tconn->cram_hmac_tfm);
        tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
 
-       mutex_unlock(&tconn->net_conf_update);
+       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&tconn->data.mutex);
        synchronize_rcu();
        kfree(old_conf);
 
@@ -1975,7 +2013,8 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
        goto done;
 
  fail:
-       mutex_unlock(&tconn->net_conf_update);
+       mutex_unlock(&tconn->conf_update);
+       mutex_unlock(&tconn->data.mutex);
        free_crypto(&crypto);
        kfree(new_conf);
  done:
@@ -1990,18 +2029,39 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
        struct drbd_conf *mdev;
        struct net_conf *old_conf, *new_conf = NULL;
        struct crypto crypto = { };
-       struct drbd_tconn *oconn;
        struct drbd_tconn *tconn;
-       struct sockaddr *new_my_addr, *new_peer_addr, *taken_addr;
        enum drbd_ret_code retcode;
        int i;
        int err;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
+
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
+       if (!(adm_ctx.my_addr && adm_ctx.peer_addr)) {
+               drbd_msg_put_info("connection endpoint(s) missing");
+               retcode = ERR_INVALID_REQUEST;
+               goto out;
+       }
+
+       /* No need for _rcu here. All reconfiguration is
+        * strictly serialized on genl_lock(). We are protected against
+        * concurrent reconfiguration/addition/deletion */
+       list_for_each_entry(tconn, &drbd_tconns, all_tconn) {
+               if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len &&
+                   !memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) {
+                       retcode = ERR_LOCAL_ADDR;
+                       goto out;
+               }
+
+               if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len &&
+                   !memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) {
+                       retcode = ERR_PEER_ADDR;
+                       goto out;
+               }
+       }
 
        tconn = adm_ctx.tconn;
        conn_reconfig_start(tconn);
@@ -2011,17 +2071,17 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
                goto fail;
        }
 
-       /* allocation not in the IO path, cqueue thread context */
+       /* allocation not in the IO path, drbdsetup / netlink process context */
        new_conf = kzalloc(sizeof(*new_conf), GFP_KERNEL);
        if (!new_conf) {
                retcode = ERR_NOMEM;
                goto fail;
        }
 
-       drbd_set_net_conf_defaults(new_conf);
+       set_net_conf_defaults(new_conf);
 
        err = net_conf_from_attrs(new_conf, info);
-       if (err) {
+       if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
@@ -2031,37 +2091,6 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto fail;
 
-       retcode = NO_ERROR;
-
-       new_my_addr = (struct sockaddr *)&new_conf->my_addr;
-       new_peer_addr = (struct sockaddr *)&new_conf->peer_addr;
-
-       /* No need to take drbd_cfg_rwsem here.  All reconfiguration is
-        * strictly serialized on genl_lock(). We are protected against
-        * concurrent reconfiguration/addition/deletion */
-       list_for_each_entry(oconn, &drbd_tconns, all_tconn) {
-               struct net_conf *nc;
-               if (oconn == tconn)
-                       continue;
-
-               rcu_read_lock();
-               nc = rcu_dereference(oconn->net_conf);
-               if (nc) {
-                       taken_addr = (struct sockaddr *)&nc->my_addr;
-                       if (new_conf->my_addr_len == nc->my_addr_len &&
-                           !memcmp(new_my_addr, taken_addr, new_conf->my_addr_len))
-                               retcode = ERR_LOCAL_ADDR;
-
-                       taken_addr = (struct sockaddr *)&nc->peer_addr;
-                       if (new_conf->peer_addr_len == nc->peer_addr_len &&
-                           !memcmp(new_peer_addr, taken_addr, new_conf->peer_addr_len))
-                               retcode = ERR_PEER_ADDR;
-               }
-               rcu_read_unlock();
-               if (retcode != NO_ERROR)
-                       goto fail;
-       }
-
        retcode = alloc_crypto(&crypto, new_conf);
        if (retcode != NO_ERROR)
                goto fail;
@@ -2070,25 +2099,27 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
 
        conn_flush_workqueue(tconn);
 
-       mutex_lock(&tconn->net_conf_update);
+       mutex_lock(&tconn->conf_update);
        old_conf = tconn->net_conf;
        if (old_conf) {
                retcode = ERR_NET_CONFIGURED;
-               mutex_unlock(&tconn->net_conf_update);
+               mutex_unlock(&tconn->conf_update);
                goto fail;
        }
        rcu_assign_pointer(tconn->net_conf, new_conf);
 
        conn_free_crypto(tconn);
-       tconn->int_dig_in = crypto.int_dig_in;
-       tconn->int_dig_vv = crypto.int_dig_vv;
        tconn->cram_hmac_tfm = crypto.cram_hmac_tfm;
-       tconn->integrity_w_tfm = crypto.integrity_w_tfm;
-       tconn->integrity_r_tfm = crypto.integrity_r_tfm;
+       tconn->integrity_tfm = crypto.integrity_tfm;
        tconn->csums_tfm = crypto.csums_tfm;
        tconn->verify_tfm = crypto.verify_tfm;
 
-       mutex_unlock(&tconn->net_conf_update);
+       tconn->my_addr_len = nla_len(adm_ctx.my_addr);
+       memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len);
+       tconn->peer_addr_len = nla_len(adm_ctx.peer_addr);
+       memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len);
+
+       mutex_unlock(&tconn->conf_update);
 
        rcu_read_lock();
        idr_for_each_entry(&tconn->volumes, mdev, i) {
@@ -2116,37 +2147,54 @@ out:
 static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force)
 {
        enum drbd_state_rv rv;
-       if (force) {
-               spin_lock_irq(&tconn->req_lock);
-               rv = _conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
-               spin_unlock_irq(&tconn->req_lock);
-               return rv;
-       }
 
-       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), 0);
+       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+                       force ? CS_HARD : 0);
 
        switch (rv) {
        case SS_NOTHING_TO_DO:
+               break;
        case SS_ALREADY_STANDALONE:
                return SS_SUCCESS;
        case SS_PRIMARY_NOP:
                /* Our state checking code wants to see the peer outdated. */
                rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
-                                                       pdsk, D_OUTDATED), CS_VERBOSE);
+                                               pdsk, D_OUTDATED), CS_VERBOSE);
                break;
        case SS_CW_FAILED_BY_PEER:
                /* The peer probably wants to see us outdated. */
                rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING,
                                                        disk, D_OUTDATED), 0);
                if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
-                       conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_HARD);
-                       rv = SS_SUCCESS;
+                       rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING),
+                                       CS_HARD);
                }
                break;
        default:;
                /* no special handling necessary */
        }
 
+       if (rv >= SS_SUCCESS) {
+               enum drbd_state_rv rv2;
+               /* No one else can reconfigure the network while I am here.
+                * The state handling only uses drbd_thread_stop_nowait(),
+                * we want to really wait here until the receiver is no more.
+                */
+               drbd_thread_stop(&adm_ctx.tconn->receiver);
+
+               /* Race breaker.  This additional state change request may be
+                * necessary, if this was a forced disconnect during a receiver
+                * restart.  We may have "killed" the receiver thread just
+                * after drbdd_init() returned.  Typically, we should be
+                * C_STANDALONE already, now, and this becomes a no-op.
+                */
+               rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE),
+                               CS_VERBOSE | CS_HARD);
+               if (rv2 < SS_SUCCESS)
+                       conn_err(tconn,
+                               "unexpected rv2=%d in conn_try_disconnect()\n",
+                               rv2);
+       }
        return rv;
 }
 
@@ -2158,7 +2206,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
        enum drbd_ret_code retcode;
        int err;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONNECTION);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
@@ -2177,19 +2225,9 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
 
        rv = conn_try_disconnect(tconn, parms.force_disconnect);
        if (rv < SS_SUCCESS)
-               goto fail;
-
-       /* No one else can reconfigure the network while I am here.
-        * The state handling only uses drbd_thread_stop_nowait(),
-        * we want to really wait here until the receiver is no more. */
-       drbd_thread_stop(&tconn->receiver);
-       if (wait_event_interruptible(tconn->ping_wait,
-                                    tconn->cstate == C_STANDALONE)) {
-               retcode = ERR_INTR;
-               goto fail;
-       }
-
-       retcode = NO_ERROR;
+               retcode = rv;  /* FIXME: Type mismatch. */
+       else
+               retcode = NO_ERROR;
  fail:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2203,7 +2241,7 @@ void resync_after_online_grow(struct drbd_conf *mdev)
        if (mdev->state.role != mdev->state.peer)
                iass = (mdev->state.role == R_PRIMARY);
        else
-               iass = test_bit(DISCARD_CONCURRENT, &mdev->tconn->flags);
+               iass = test_bit(RESOLVE_CONFLICTS, &mdev->tconn->flags);
 
        if (iass)
                drbd_start_resync(mdev, C_SYNC_SOURCE);
@@ -2213,11 +2251,13 @@ void resync_after_online_grow(struct drbd_conf *mdev)
 
 int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
 {
+       struct disk_conf *old_disk_conf, *new_disk_conf = NULL;
        struct resize_parms rs;
        struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
        enum determine_dev_size dd;
        enum dds_flags ddsf;
+       sector_t u_size;
        int err;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
@@ -2255,13 +2295,34 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
 
        if (rs.no_resync && mdev->tconn->agreed_pro_version < 93) {
                retcode = ERR_NEED_APV_93;
-               goto fail;
+               goto fail_ldev;
+       }
+
+       rcu_read_lock();
+       u_size = rcu_dereference(mdev->ldev->disk_conf)->disk_size;
+       rcu_read_unlock();
+       if (u_size != (sector_t)rs.resize_size) {
+               new_disk_conf = kmalloc(sizeof(struct disk_conf), GFP_KERNEL);
+               if (!new_disk_conf) {
+                       retcode = ERR_NOMEM;
+                       goto fail_ldev;
+               }
        }
 
        if (mdev->ldev->known_size != drbd_get_capacity(mdev->ldev->backing_bdev))
                mdev->ldev->known_size = drbd_get_capacity(mdev->ldev->backing_bdev);
 
-       mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
+       if (new_disk_conf) {
+               mutex_lock(&mdev->tconn->conf_update);
+               old_disk_conf = mdev->ldev->disk_conf;
+               *new_disk_conf = *old_disk_conf;
+               new_disk_conf->disk_size = (sector_t)rs.resize_size;
+               rcu_assign_pointer(mdev->ldev->disk_conf, new_disk_conf);
+               mutex_unlock(&mdev->tconn->conf_update);
+               synchronize_rcu();
+               kfree(old_disk_conf);
+       }
+
        ddsf = (rs.resize_force ? DDSF_FORCED : 0) | (rs.no_resync ? DDSF_NO_RESYNC : 0);
        dd = drbd_determine_dev_size(mdev, ddsf);
        drbd_md_sync(mdev);
@@ -2282,67 +2343,45 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
  fail:
        drbd_adm_finish(info, retcode);
        return 0;
+
+ fail_ldev:
+       put_ldev(mdev);
+       goto fail;
 }
 
 int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
-       cpumask_var_t new_cpu_mask;
        struct drbd_tconn *tconn;
-       int *rs_plan_s = NULL;
-       struct res_opts sc;
+       struct res_opts res_opts;
        int err;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto fail;
        tconn = adm_ctx.tconn;
 
-       if (!zalloc_cpumask_var(&new_cpu_mask, GFP_KERNEL)) {
-               retcode = ERR_NOMEM;
-               drbd_msg_put_info("unable to allocate cpumask");
-               goto fail;
-       }
-
-       sc = tconn->res_opts;
+       res_opts = tconn->res_opts;
        if (should_set_defaults(info))
-               drbd_set_res_opts_default(&sc);
+               set_res_opts_defaults(&res_opts);
 
-       err = res_opts_from_attrs(&sc, info);
-       if (err) {
+       err = res_opts_from_attrs(&res_opts, info);
+       if (err && err != -ENOMSG) {
                retcode = ERR_MANDATORY_TAG;
                drbd_msg_put_info(from_attrs_err_to_txt(err));
                goto fail;
        }
 
-       /* silently ignore cpu mask on UP kernel */
-       if (nr_cpu_ids > 1 && sc.cpu_mask[0] != 0) {
-               err = __bitmap_parse(sc.cpu_mask, 32, 0,
-                               cpumask_bits(new_cpu_mask), nr_cpu_ids);
-               if (err) {
-                       conn_warn(tconn, "__bitmap_parse() failed with %d\n", err);
-                       retcode = ERR_CPU_MASK_PARSE;
-                       goto fail;
-               }
-       }
-
-
-       tconn->res_opts = sc;
-
-       if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) {
-               cpumask_copy(tconn->cpu_mask, new_cpu_mask);
-               drbd_calc_cpu_mask(tconn);
-               tconn->receiver.reset_cpu_mask = 1;
-               tconn->asender.reset_cpu_mask = 1;
-               tconn->worker.reset_cpu_mask = 1;
+       err = set_resource_options(tconn, &res_opts);
+       if (err) {
+               retcode = ERR_INVALID_REQUEST;
+               if (err == -ENOMEM)
+                       retcode = ERR_NOMEM;
        }
 
 fail:
-       kfree(rs_plan_s);
-       free_cpumask_var(new_cpu_mask);
-
        drbd_adm_finish(info, retcode);
        return 0;
 }
@@ -2361,8 +2400,11 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
        mdev = adm_ctx.mdev;
 
        /* If there is still bitmap IO pending, probably because of a previous
-        * resync just being finished, wait for it before requesting a new resync. */
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
 
        retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T), CS_ORDERED);
 
@@ -2380,12 +2422,30 @@ int drbd_adm_invalidate(struct sk_buff *skb, struct genl_info *info)
 
                retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_T));
        }
+       drbd_resume_io(mdev);
 
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
+static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
+               union drbd_state mask, union drbd_state val)
+{
+       enum drbd_ret_code retcode;
+
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
+       if (!adm_ctx.reply_skb)
+               return retcode;
+       if (retcode != NO_ERROR)
+               goto out;
+
+       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+out:
+       drbd_adm_finish(info, retcode);
+       return 0;
+}
+
 static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
 {
        int rv;
@@ -2395,10 +2455,10 @@ static int drbd_bmio_set_susp_al(struct drbd_conf *mdev)
        return rv;
 }
 
-static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *info,
-               union drbd_state mask, union drbd_state val)
+int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
 {
-       enum drbd_ret_code retcode;
+       int retcode; /* drbd_ret_code, drbd_state_rv */
+       struct drbd_conf *mdev;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -2406,17 +2466,37 @@ static int drbd_adm_simple_request_state(struct sk_buff *skb, struct genl_info *
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = drbd_request_state(adm_ctx.mdev, mask, val);
+       mdev = adm_ctx.mdev;
+
+       /* If there is still bitmap IO pending, probably because of a previous
+        * resync just being finished, wait for it before requesting a new resync.
+        * Also wait for it's after_state_ch(). */
+       drbd_suspend_io(mdev);
+       wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
+       drbd_flush_workqueue(mdev);
+
+       retcode = _drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S), CS_ORDERED);
+       if (retcode < SS_SUCCESS) {
+               if (retcode == SS_NEED_CONNECTION && mdev->state.role == R_PRIMARY) {
+                       /* The peer will get a resync upon connect anyways.
+                        * Just make that into a full resync. */
+                       retcode = drbd_request_state(mdev, NS(pdsk, D_INCONSISTENT));
+                       if (retcode >= SS_SUCCESS) {
+                               if (drbd_bitmap_io(mdev, &drbd_bmio_set_susp_al,
+                                                  "set_n_write from invalidate_peer",
+                                                  BM_LOCKED_SET_ALLOWED))
+                                       retcode = ERR_IO_MD_DISK;
+                       }
+               } else
+                       retcode = drbd_request_state(mdev, NS(conn, C_STARTING_SYNC_S));
+       }
+       drbd_resume_io(mdev);
+
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-int drbd_adm_invalidate_peer(struct sk_buff *skb, struct genl_info *info)
-{
-       return drbd_adm_simple_request_state(skb, info, NS(conn, C_STARTING_SYNC_S));
-}
-
 int drbd_adm_pause_sync(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
@@ -2501,15 +2581,23 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
        return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
 }
 
-int nla_put_drbd_cfg_context(struct sk_buff *skb, const char *conn_name, unsigned vnr)
+int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_tconn *tconn, unsigned vnr)
 {
        struct nlattr *nla;
        nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
        if (!nla)
                goto nla_put_failure;
-       if (vnr != VOLUME_UNSPECIFIED)
-               NLA_PUT_U32(skb, T_ctx_volume, vnr);
-       NLA_PUT_STRING(skb, T_ctx_conn_name, conn_name);
+       if (vnr != VOLUME_UNSPECIFIED &&
+           nla_put_u32(skb, T_ctx_volume, vnr))
+               goto nla_put_failure;
+       if (nla_put_string(skb, T_ctx_resource_name, tconn->name))
+               goto nla_put_failure;
+       if (tconn->my_addr_len &&
+           nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr))
+               goto nla_put_failure;
+       if (tconn->peer_addr_len &&
+           nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr))
+               goto nla_put_failure;
        nla_nest_end(skb, nla);
        return 0;
 
@@ -2546,17 +2634,17 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
 
        /* We need to add connection name and volume number information still.
         * Minor number is in drbd_genlmsghdr. */
-       if (nla_put_drbd_cfg_context(skb, mdev->tconn->name, mdev->vnr))
+       if (nla_put_drbd_cfg_context(skb, mdev->tconn, mdev->vnr))
                goto nla_put_failure;
 
        if (res_opts_to_skb(skb, &mdev->tconn->res_opts, exclude_sensitive))
                goto nla_put_failure;
 
+       rcu_read_lock();
        if (got_ldev)
-               if (disk_conf_to_skb(skb, &mdev->ldev->dc, exclude_sensitive))
+               if (disk_conf_to_skb(skb, rcu_dereference(mdev->ldev->disk_conf), exclude_sensitive))
                        goto nla_put_failure;
 
-       rcu_read_lock();
        nc = rcu_dereference(mdev->tconn->net_conf);
        if (nc)
                err = net_conf_to_skb(skb, nc, exclude_sensitive);
@@ -2567,20 +2655,40 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
        nla = nla_nest_start(skb, DRBD_NLA_STATE_INFO);
        if (!nla)
                goto nla_put_failure;
-       NLA_PUT_U32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY);
-       NLA_PUT_U32(skb, T_current_state, mdev->state.i);
-       NLA_PUT_U64(skb, T_ed_uuid, mdev->ed_uuid);
-       NLA_PUT_U64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev));
+       if (nla_put_u32(skb, T_sib_reason, sib ? sib->sib_reason : SIB_GET_STATUS_REPLY) ||
+           nla_put_u32(skb, T_current_state, mdev->state.i) ||
+           nla_put_u64(skb, T_ed_uuid, mdev->ed_uuid) ||
+           nla_put_u64(skb, T_capacity, drbd_get_capacity(mdev->this_bdev)) ||
+           nla_put_u64(skb, T_send_cnt, mdev->send_cnt) ||
+           nla_put_u64(skb, T_recv_cnt, mdev->recv_cnt) ||
+           nla_put_u64(skb, T_read_cnt, mdev->read_cnt) ||
+           nla_put_u64(skb, T_writ_cnt, mdev->writ_cnt) ||
+           nla_put_u64(skb, T_al_writ_cnt, mdev->al_writ_cnt) ||
+           nla_put_u64(skb, T_bm_writ_cnt, mdev->bm_writ_cnt) ||
+           nla_put_u32(skb, T_ap_bio_cnt, atomic_read(&mdev->ap_bio_cnt)) ||
+           nla_put_u32(skb, T_ap_pending_cnt, atomic_read(&mdev->ap_pending_cnt)) ||
+           nla_put_u32(skb, T_rs_pending_cnt, atomic_read(&mdev->rs_pending_cnt)))
+               goto nla_put_failure;
 
        if (got_ldev) {
-               NLA_PUT_U32(skb, T_disk_flags, mdev->ldev->md.flags);
-               NLA_PUT(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
-               NLA_PUT_U64(skb, T_bits_total, drbd_bm_bits(mdev));
-               NLA_PUT_U64(skb, T_bits_oos, drbd_bm_total_weight(mdev));
+               int err;
+
+               spin_lock_irq(&mdev->ldev->md.uuid_lock);
+               err = nla_put(skb, T_uuids, sizeof(si->uuids), mdev->ldev->md.uuid);
+               spin_unlock_irq(&mdev->ldev->md.uuid_lock);
+
+               if (err)
+                       goto nla_put_failure;
+
+               if (nla_put_u32(skb, T_disk_flags, mdev->ldev->md.flags) ||
+                   nla_put_u64(skb, T_bits_total, drbd_bm_bits(mdev)) ||
+                   nla_put_u64(skb, T_bits_oos, drbd_bm_total_weight(mdev)))
+                       goto nla_put_failure;
                if (C_SYNC_SOURCE <= mdev->state.conn &&
                    C_PAUSED_SYNC_T >= mdev->state.conn) {
-                       NLA_PUT_U64(skb, T_bits_rs_total, mdev->rs_total);
-                       NLA_PUT_U64(skb, T_bits_rs_failed, mdev->rs_failed);
+                       if (nla_put_u64(skb, T_bits_rs_total, mdev->rs_total) ||
+                           nla_put_u64(skb, T_bits_rs_failed, mdev->rs_failed))
+                               goto nla_put_failure;
                }
        }
 
@@ -2590,15 +2698,18 @@ int nla_put_status_info(struct sk_buff *skb, struct drbd_conf *mdev,
                case SIB_GET_STATUS_REPLY:
                        break;
                case SIB_STATE_CHANGE:
-                       NLA_PUT_U32(skb, T_prev_state, sib->os.i);
-                       NLA_PUT_U32(skb, T_new_state, sib->ns.i);
+                       if (nla_put_u32(skb, T_prev_state, sib->os.i) ||
+                           nla_put_u32(skb, T_new_state, sib->ns.i))
+                               goto nla_put_failure;
                        break;
                case SIB_HELPER_POST:
-                       NLA_PUT_U32(skb,
-                               T_helper_exit_code, sib->helper_exit_code);
+                       if (nla_put_u32(skb, T_helper_exit_code,
+                                       sib->helper_exit_code))
+                               goto nla_put_failure;
                        /* fall through */
                case SIB_HELPER_PRE:
-                       NLA_PUT_STRING(skb, T_helper, sib->helper_name);
+                       if (nla_put_string(skb, T_helper, sib->helper_name))
+                               goto nla_put_failure;
                        break;
                }
        }
@@ -2663,9 +2774,9 @@ int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
         */
 
        /* synchronize with conn_create()/conn_destroy() */
-       down_read(&drbd_cfg_rwsem);
+       rcu_read_lock();
        /* revalidate iterator position */
-       list_for_each_entry(tmp, &drbd_tconns, all_tconn) {
+       list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) {
                if (pos == NULL) {
                        /* first iteration */
                        pos = tmp;
@@ -2683,8 +2794,8 @@ next_tconn:
                if (!mdev) {
                        /* No more volumes to dump on this tconn.
                         * Advance tconn iterator. */
-                       pos = list_entry(tconn->all_tconn.next,
-                                       struct drbd_tconn, all_tconn);
+                       pos = list_entry_rcu(tconn->all_tconn.next,
+                                            struct drbd_tconn, all_tconn);
                        /* Did we dump any volume on this tconn yet? */
                        if (volume != 0) {
                                /* If we reached the end of the list,
@@ -2698,21 +2809,25 @@ next_tconn:
                        }
                }
 
-               dh = genlmsg_put(skb, NETLINK_CB(cb->skb).pid,
+               dh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                cb->nlh->nlmsg_seq, &drbd_genl_family,
                                NLM_F_MULTI, DRBD_ADM_GET_STATUS);
                if (!dh)
                        goto out;
 
                if (!mdev) {
-                       /* this is a tconn without a single volume */
+                       /* This is a tconn without a single volume.
+                        * Suprisingly enough, it may have a network
+                        * configuration. */
+                       struct net_conf *nc;
                        dh->minor = -1U;
                        dh->ret_code = NO_ERROR;
-                       if (nla_put_drbd_cfg_context(skb, tconn->name, VOLUME_UNSPECIFIED))
-                               genlmsg_cancel(skb, dh);
-                       else
-                               genlmsg_end(skb, dh);
-                       goto out;
+                       if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED))
+                               goto cancel;
+                       nc = rcu_dereference(tconn->net_conf);
+                       if (nc && net_conf_to_skb(skb, nc, 1) != 0)
+                               goto cancel;
+                       goto done;
                }
 
                D_ASSERT(mdev->vnr == volume);
@@ -2722,14 +2837,16 @@ next_tconn:
                dh->ret_code = NO_ERROR;
 
                if (nla_put_status_info(skb, mdev, NULL)) {
+cancel:
                        genlmsg_cancel(skb, dh);
                        goto out;
                }
+done:
                genlmsg_end(skb, dh);
         }
 
 out:
-       up_read(&drbd_cfg_rwsem);
+       rcu_read_unlock();
        /* where to start the next iteration */
         cb->args[0] = (long)pos;
         cb->args[1] = (pos == tconn) ? volume + 1 : 0;
@@ -2753,8 +2870,9 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
 {
        const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
        struct nlattr *nla;
-       const char *conn_name;
+       const char *resource_name;
        struct drbd_tconn *tconn;
+       int maxtype;
 
        /* Is this a followup call? */
        if (cb->args[0]) {
@@ -2774,12 +2892,15 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
        /* No explicit context given.  Dump all. */
        if (!nla)
                goto dump;
-       nla = nla_find_nested(nla, __nla_type(T_ctx_conn_name));
+       maxtype = ARRAY_SIZE(drbd_cfg_context_nl_policy) - 1;
+       nla = drbd_nla_find_nested(maxtype, nla, __nla_type(T_ctx_resource_name));
+       if (IS_ERR(nla))
+               return PTR_ERR(nla);
        /* context given, but no name present? */
        if (!nla)
                return -EINVAL;
-       conn_name = nla_data(nla);
-       tconn = conn_get_by_name(conn_name);
+       resource_name = nla_data(nla);
+       tconn = conn_get_by_name(resource_name);
 
        if (!tconn)
                return -ENODEV;
@@ -2827,6 +2948,7 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
 {
        struct drbd_conf *mdev;
        enum drbd_ret_code retcode;
+       struct start_ov_parms parms;
 
        retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_MINOR);
        if (!adm_ctx.reply_skb)
@@ -2835,23 +2957,28 @@ int drbd_adm_start_ov(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        mdev = adm_ctx.mdev;
+
+       /* resume from last known position, if possible */
+       parms.ov_start_sector = mdev->ov_start_sector;
+       parms.ov_stop_sector = ULLONG_MAX;
        if (info->attrs[DRBD_NLA_START_OV_PARMS]) {
-               /* resume from last known position, if possible */
-               struct start_ov_parms parms =
-                       { .ov_start_sector = mdev->ov_start_sector };
                int err = start_ov_parms_from_attrs(&parms, info);
                if (err) {
                        retcode = ERR_MANDATORY_TAG;
                        drbd_msg_put_info(from_attrs_err_to_txt(err));
                        goto out;
                }
-               /* w_make_ov_request expects position to be aligned */
-               mdev->ov_start_sector = parms.ov_start_sector & ~BM_SECT_PER_BIT;
        }
+       /* w_make_ov_request expects position to be aligned */
+       mdev->ov_start_sector = parms.ov_start_sector & ~(BM_SECT_PER_BIT-1);
+       mdev->ov_stop_sector = parms.ov_stop_sector;
+
        /* If there is still bitmap IO pending, e.g. previous resync or verify
         * just being finished, wait for it before requesting a new resync. */
+       drbd_suspend_io(mdev);
        wait_event(mdev->misc_wait, !test_bit(BITMAP_IO, &mdev->flags));
        retcode = drbd_request_state(mdev,NS(conn,C_VERIFY_S));
+       drbd_resume_io(mdev);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -2932,24 +3059,26 @@ out_nolock:
 }
 
 static enum drbd_ret_code
-drbd_check_conn_name(const char *name)
+drbd_check_resource_name(const char *name)
 {
        if (!name || !name[0]) {
-               drbd_msg_put_info("connection name missing");
+               drbd_msg_put_info("resource name missing");
                return ERR_MANDATORY_TAG;
        }
        /* if we want to use these in sysfs/configfs/debugfs some day,
         * we must not allow slashes */
        if (strchr(name, '/')) {
-               drbd_msg_put_info("invalid connection name");
+               drbd_msg_put_info("invalid resource name");
                return ERR_INVALID_REQUEST;
        }
        return NO_ERROR;
 }
 
-int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
+       struct res_opts res_opts;
+       int err;
 
        retcode = drbd_adm_prepare(skb, info, 0);
        if (!adm_ctx.reply_skb)
@@ -2957,20 +3086,28 @@ int drbd_adm_create_connection(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       retcode = drbd_check_conn_name(adm_ctx.conn_name);
+       set_res_opts_defaults(&res_opts);
+       err = res_opts_from_attrs(&res_opts, info);
+       if (err && err != -ENOMSG) {
+               retcode = ERR_MANDATORY_TAG;
+               drbd_msg_put_info(from_attrs_err_to_txt(err));
+               goto out;
+       }
+
+       retcode = drbd_check_resource_name(adm_ctx.resource_name);
        if (retcode != NO_ERROR)
                goto out;
 
        if (adm_ctx.tconn) {
                if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
                        retcode = ERR_INVALID_REQUEST;
-                       drbd_msg_put_info("connection exists");
+                       drbd_msg_put_info("resource exists");
                }
                /* else: still NO_ERROR */
                goto out;
        }
 
-       if (!conn_create(adm_ctx.conn_name))
+       if (!conn_create(adm_ctx.resource_name, &res_opts))
                retcode = ERR_NOMEM;
 out:
        drbd_adm_finish(info, retcode);
@@ -2982,14 +3119,13 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
        struct drbd_genlmsghdr *dh = info->userhdr;
        enum drbd_ret_code retcode;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
-       /* FIXME drop minor_count parameter, limit to MINORMASK */
-       if (dh->minor >= minor_count) {
+       if (dh->minor > MINORMASK) {
                drbd_msg_put_info("requested minor out of range");
                retcode = ERR_INVALID_REQUEST;
                goto out;
@@ -3009,9 +3145,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
                goto out;
        }
 
-       down_write(&drbd_cfg_rwsem);
        retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume);
-       up_write(&drbd_cfg_rwsem);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3024,7 +3158,13 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_conf *mdev)
             * we may want to delete a minor from a live replication group.
             */
            mdev->state.role == R_SECONDARY) {
-               drbd_delete_device(mdev);
+               _drbd_request_state(mdev, NS(conn, C_WF_REPORT_PARAMS),
+                                   CS_VERBOSE + CS_WAIT_COMPLETE);
+               idr_remove(&mdev->tconn->volumes, mdev->vnr);
+               idr_remove(&minors, mdev_to_minor(mdev));
+               del_gendisk(mdev->vdisk);
+               synchronize_rcu();
+               kref_put(&mdev->kref, &drbd_minor_destroy);
                return NO_ERROR;
        } else
                return ERR_MINOR_CONFIGURED;
@@ -3040,9 +3180,7 @@ int drbd_adm_delete_minor(struct sk_buff *skb, struct genl_info *info)
        if (retcode != NO_ERROR)
                goto out;
 
-       down_write(&drbd_cfg_rwsem);
        retcode = adm_delete_minor(adm_ctx.mdev);
-       up_write(&drbd_cfg_rwsem);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3050,8 +3188,7 @@ out:
 
 int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
 {
-       enum drbd_ret_code retcode;
-       enum drbd_state_rv rv;
+       int retcode; /* enum drbd_ret_code rsp. enum drbd_state_rv */
        struct drbd_conf *mdev;
        unsigned i;
 
@@ -3062,98 +3199,91 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
                goto out;
 
        if (!adm_ctx.tconn) {
-               retcode = ERR_CONN_NOT_KNOWN;
+               retcode = ERR_RES_NOT_KNOWN;
                goto out;
        }
 
-       down_read(&drbd_cfg_rwsem);
        /* demote */
        idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
                retcode = drbd_set_role(mdev, R_SECONDARY, 0);
                if (retcode < SS_SUCCESS) {
                        drbd_msg_put_info("failed to demote");
-                       goto out_unlock;
+                       goto out;
                }
        }
 
-       /* disconnect */
-       rv = conn_try_disconnect(adm_ctx.tconn, 0);
-       if (rv < SS_SUCCESS) {
-               retcode = rv; /* enum type mismatch! */
+       retcode = conn_try_disconnect(adm_ctx.tconn, 0);
+       if (retcode < SS_SUCCESS) {
                drbd_msg_put_info("failed to disconnect");
-               goto out_unlock;
+               goto out;
        }
 
-       /* Make sure the network threads have actually stopped,
-        * state handling only does drbd_thread_stop_nowait(). */
-       drbd_thread_stop(&adm_ctx.tconn->receiver);
-
        /* detach */
        idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
-               rv = adm_detach(mdev);
-               if (rv < SS_SUCCESS) {
-                       retcode = rv; /* enum type mismatch! */
+               retcode = adm_detach(mdev, 0);
+               if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
                        drbd_msg_put_info("failed to detach");
-                       goto out_unlock;
+                       goto out;
                }
        }
-       up_read(&drbd_cfg_rwsem);
+
+       /* If we reach this, all volumes (of this tconn) are Secondary,
+        * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
+        * actually stopped, state handling only does drbd_thread_stop_nowait(). */
+       drbd_thread_stop(&adm_ctx.tconn->worker);
+
+       /* Now, nothing can fail anymore */
 
        /* delete volumes */
-       down_write(&drbd_cfg_rwsem);
        idr_for_each_entry(&adm_ctx.tconn->volumes, mdev, i) {
                retcode = adm_delete_minor(mdev);
                if (retcode != NO_ERROR) {
                        /* "can not happen" */
                        drbd_msg_put_info("failed to delete volume");
-                       up_write(&drbd_cfg_rwsem);
                        goto out;
                }
        }
 
        /* delete connection */
        if (conn_lowest_minor(adm_ctx.tconn) < 0) {
-               drbd_thread_stop(&adm_ctx.tconn->worker);
-               list_del(&adm_ctx.tconn->all_tconn);
+               list_del_rcu(&adm_ctx.tconn->all_tconn);
+               synchronize_rcu();
                kref_put(&adm_ctx.tconn->kref, &conn_destroy);
 
                retcode = NO_ERROR;
        } else {
                /* "can not happen" */
-               retcode = ERR_CONN_IN_USE;
+               retcode = ERR_RES_IN_USE;
                drbd_msg_put_info("failed to delete connection");
        }
-
-       up_write(&drbd_cfg_rwsem);
        goto out;
-out_unlock:
-       up_read(&drbd_cfg_rwsem);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
 }
 
-int drbd_adm_delete_connection(struct sk_buff *skb, struct genl_info *info)
+int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
 {
        enum drbd_ret_code retcode;
 
-       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_CONN);
+       retcode = drbd_adm_prepare(skb, info, DRBD_ADM_NEED_RESOURCE);
        if (!adm_ctx.reply_skb)
                return retcode;
        if (retcode != NO_ERROR)
                goto out;
 
-       down_write(&drbd_cfg_rwsem);
        if (conn_lowest_minor(adm_ctx.tconn) < 0) {
-               list_del(&adm_ctx.tconn->all_tconn);
+               list_del_rcu(&adm_ctx.tconn->all_tconn);
+               synchronize_rcu();
                kref_put(&adm_ctx.tconn->kref, &conn_destroy);
 
                retcode = NO_ERROR;
        } else {
-               retcode = ERR_CONN_IN_USE;
+               retcode = ERR_RES_IN_USE;
        }
-       up_write(&drbd_cfg_rwsem);
 
+       if (retcode == NO_ERROR)
+               drbd_thread_stop(&adm_ctx.tconn->worker);
 out:
        drbd_adm_finish(info, retcode);
        return 0;
@@ -3167,6 +3297,13 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        unsigned seq;
        int err = -ENOMEM;
 
+       if (sib->sib_reason == SIB_SYNC_PROGRESS) {
+               if (time_after(jiffies, mdev->rs_last_bcast + HZ))
+                       mdev->rs_last_bcast = jiffies;
+               else
+                       return;
+       }
+
        seq = atomic_inc_return(&drbd_genl_seq);
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
        if (!msg)
@@ -3177,7 +3314,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
        if (!d_out) /* cannot happen, but anyways. */
                goto nla_put_failure;
        d_out->minor = mdev_to_minor(mdev);
-       d_out->ret_code = 0;
+       d_out->ret_code = NO_ERROR;
 
        if (nla_put_status_info(msg, mdev, sib))
                goto nla_put_failure;