Merge branch 'fortglx/3.9/time' of git://git.linaro.org/people/jstultz/linux into...
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_iblock.c
index 57d7674c50133c67430019efb98966b6013fa9e3..b526d23dcd4f178be3fd06b4fdcc273a0ba78a4f 100644 (file)
@@ -4,10 +4,7 @@
  * This file contains the Storage Engine  <-> Linux BlockIO transport
  * specific functions.
  *
- * Copyright (c) 2003, 2004, 2005 PyX Technologies, Inc.
- * Copyright (c) 2005, 2006, 2007 SBE, Inc.
- * Copyright (c) 2007-2010 Rising Tide Systems
- * Copyright (c) 2008-2010 Linux-iSCSI.org
+ * (c) Copyright 2003-2012 RisingTide Systems LLC.
  *
  * Nicholas A. Bellinger <nab@kernel.org>
  *
 #define IBLOCK_MAX_BIO_PER_TASK         32     /* max # of bios to submit at a time */
 #define IBLOCK_BIO_POOL_SIZE   128
 
-static struct se_subsystem_api iblock_template;
+static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
+{
+       return container_of(dev, struct iblock_dev, dev);
+}
+
 
-static void iblock_bio_done(struct bio *, int);
+static struct se_subsystem_api iblock_template;
 
 /*     iblock_attach_hba(): (Part of se_subsystem_api_t template)
  *
@@ -70,7 +71,7 @@ static void iblock_detach_hba(struct se_hba *hba)
 {
 }
 
-static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
+static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
 {
        struct iblock_dev *ib_dev = NULL;
 
@@ -82,40 +83,28 @@ static void *iblock_allocate_virtdevice(struct se_hba *hba, const char *name)
 
        pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 
-       return ib_dev;
+       return &ib_dev->dev;
 }
 
-static struct se_device *iblock_create_virtdevice(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       void *p)
+static int iblock_configure_device(struct se_device *dev)
 {
-       struct iblock_dev *ib_dev = p;
-       struct se_device *dev;
-       struct se_dev_limits dev_limits;
-       struct block_device *bd = NULL;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        struct request_queue *q;
-       struct queue_limits *limits;
-       u32 dev_flags = 0;
+       struct block_device *bd = NULL;
        fmode_t mode;
-       int ret = -EINVAL;
+       int ret = -ENOMEM;
 
-       if (!ib_dev) {
-               pr_err("Unable to locate struct iblock_dev parameter\n");
-               return ERR_PTR(ret);
+       if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
+               pr_err("Missing udev_path= parameters for IBLOCK\n");
+               return -EINVAL;
        }
-       memset(&dev_limits, 0, sizeof(struct se_dev_limits));
 
        ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
        if (!ib_dev->ibd_bio_set) {
-               pr_err("IBLOCK: Unable to create bioset()\n");
-               return ERR_PTR(-ENOMEM);
+               pr_err("IBLOCK: Unable to create bioset\n");
+               goto out;
        }
-       pr_debug("IBLOCK: Created bio_set()\n");
-       /*
-        * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
-        * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
-        */
+
        pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
                        ib_dev->ibd_udev_path);
 
@@ -126,27 +115,15 @@ static struct se_device *iblock_create_virtdevice(
        bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
        if (IS_ERR(bd)) {
                ret = PTR_ERR(bd);
-               goto failed;
+               goto out_free_bioset;
        }
-       /*
-        * Setup the local scope queue_limits from struct request_queue->limits
-        * to pass into transport_add_device_to_core_hba() as struct se_dev_limits.
-        */
-       q = bdev_get_queue(bd);
-       limits = &dev_limits.limits;
-       limits->logical_block_size = bdev_logical_block_size(bd);
-       limits->max_hw_sectors = UINT_MAX;
-       limits->max_sectors = UINT_MAX;
-       dev_limits.hw_queue_depth = q->nr_requests;
-       dev_limits.queue_depth = q->nr_requests;
-
        ib_dev->ibd_bd = bd;
 
-       dev = transport_add_device_to_core_hba(hba,
-                       &iblock_template, se_dev, dev_flags, ib_dev,
-                       &dev_limits, "IBLOCK", IBLOCK_VERSION);
-       if (!dev)
-               goto failed;
+       q = bdev_get_queue(bd);
+
+       dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
+       dev->dev_attrib.hw_max_sectors = UINT_MAX;
+       dev->dev_attrib.hw_queue_depth = q->nr_requests;
 
        /*
         * Check if the underlying struct block_device request_queue supports
@@ -154,38 +131,41 @@ static struct se_device *iblock_create_virtdevice(
         * in ATA and we need to set TPE=1
         */
        if (blk_queue_discard(q)) {
-               dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count =
+               dev->dev_attrib.max_unmap_lba_count =
                                q->limits.max_discard_sectors;
+
                /*
                 * Currently hardcoded to 1 in Linux/SCSI code..
                 */
-               dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
-               dev->se_sub_dev->se_dev_attrib.unmap_granularity =
+               dev->dev_attrib.max_unmap_block_desc_count = 1;
+               dev->dev_attrib.unmap_granularity =
                                q->limits.discard_granularity >> 9;
-               dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
+               dev->dev_attrib.unmap_granularity_alignment =
                                q->limits.discard_alignment;
 
                pr_debug("IBLOCK: BLOCK Discard support available,"
                                " disabled by default\n");
        }
+       /*
+        * Enable write same emulation for IBLOCK and use 0xFFFF as
+        * the smaller WRITE_SAME(10) only has a two-byte block count.
+        */
+       dev->dev_attrib.max_write_same_len = 0xFFFF;
 
        if (blk_queue_nonrot(q))
-               dev->se_sub_dev->se_dev_attrib.is_nonrot = 1;
-
-       return dev;
+               dev->dev_attrib.is_nonrot = 1;
+       return 0;
 
-failed:
-       if (ib_dev->ibd_bio_set) {
-               bioset_free(ib_dev->ibd_bio_set);
-               ib_dev->ibd_bio_set = NULL;
-       }
-       ib_dev->ibd_bd = NULL;
-       return ERR_PTR(ret);
+out_free_bioset:
+       bioset_free(ib_dev->ibd_bio_set);
+       ib_dev->ibd_bio_set = NULL;
+out:
+       return ret;
 }
 
-static void iblock_free_device(void *p)
+static void iblock_free_device(struct se_device *dev)
 {
-       struct iblock_dev *ib_dev = p;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
 
        if (ib_dev->ibd_bd != NULL)
                blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
@@ -203,12 +183,12 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                                        bdev_logical_block_size(bd)) - 1);
        u32 block_size = bdev_logical_block_size(bd);
 
-       if (block_size == dev->se_sub_dev->se_dev_attrib.block_size)
+       if (block_size == dev->dev_attrib.block_size)
                return blocks_long;
 
        switch (block_size) {
        case 4096:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 2048:
                        blocks_long <<= 1;
                        break;
@@ -222,7 +202,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 2048:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 1;
                        break;
@@ -237,7 +217,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 1024:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 2;
                        break;
@@ -252,7 +232,7 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
                }
                break;
        case 512:
-               switch (dev->se_sub_dev->se_dev_attrib.block_size) {
+               switch (dev->dev_attrib.block_size) {
                case 4096:
                        blocks_long >>= 3;
                        break;
@@ -273,6 +253,87 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(
        return blocks_long;
 }
 
+static void iblock_complete_cmd(struct se_cmd *cmd)
+{
+       struct iblock_req *ibr = cmd->priv;
+       u8 status;
+
+       if (!atomic_dec_and_test(&ibr->pending))
+               return;
+
+       if (atomic_read(&ibr->ib_bio_err_cnt))
+               status = SAM_STAT_CHECK_CONDITION;
+       else
+               status = SAM_STAT_GOOD;
+
+       target_complete_cmd(cmd, status);
+       kfree(ibr);
+}
+
+static void iblock_bio_done(struct bio *bio, int err)
+{
+       struct se_cmd *cmd = bio->bi_private;
+       struct iblock_req *ibr = cmd->priv;
+
+       /*
+        * Set -EIO if !BIO_UPTODATE and the passed is still err=0
+        */
+       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
+               err = -EIO;
+
+       if (err != 0) {
+               pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
+                       " err: %d\n", bio, err);
+               /*
+                * Bump the ib_bio_err_cnt and release bio.
+                */
+               atomic_inc(&ibr->ib_bio_err_cnt);
+               smp_mb__after_atomic_inc();
+       }
+
+       bio_put(bio);
+
+       iblock_complete_cmd(cmd);
+}
+
+static struct bio *
+iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
+{
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+       struct bio *bio;
+
+       /*
+        * Only allocate as many vector entries as the bio code allows us to,
+        * we'll loop later on until we have handled the whole request.
+        */
+       if (sg_num > BIO_MAX_PAGES)
+               sg_num = BIO_MAX_PAGES;
+
+       bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+       if (!bio) {
+               pr_err("Unable to allocate memory for bio\n");
+               return NULL;
+       }
+
+       bio->bi_bdev = ib_dev->ibd_bd;
+       bio->bi_private = cmd;
+       bio->bi_end_io = &iblock_bio_done;
+       bio->bi_sector = lba;
+
+       return bio;
+}
+
+static void iblock_submit_bios(struct bio_list *list, int rw)
+{
+       struct blk_plug plug;
+       struct bio *bio;
+
+       blk_start_plug(&plug);
+       while ((bio = bio_list_pop(list)))
+               submit_bio(rw, bio);
+       blk_finish_plug(&plug);
+}
+
 static void iblock_end_io_flush(struct bio *bio, int err)
 {
        struct se_cmd *cmd = bio->bi_private;
@@ -281,13 +342,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
                pr_err("IBLOCK: cache flush failed: %d\n", err);
 
        if (cmd) {
-               if (err) {
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               if (err)
                        target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
-               } else {
+               else
                        target_complete_cmd(cmd, SAM_STAT_GOOD);
-               }
        }
 
        bio_put(bio);
@@ -297,9 +355,10 @@ static void iblock_end_io_flush(struct bio *bio, int err)
  * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must
  * always flush the whole cache.
  */
-static int iblock_execute_sync_cache(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_sync_cache(struct se_cmd *cmd)
 {
-       struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
        int immed = (cmd->t_task_cdb[1] & 0x2);
        struct bio *bio;
 
@@ -319,25 +378,27 @@ static int iblock_execute_sync_cache(struct se_cmd *cmd)
        return 0;
 }
 
-static int iblock_execute_unmap(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_unmap(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
-       struct iblock_dev *ibd = dev->dev_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        unsigned char *buf, *ptr = NULL;
        sector_t lba;
        int size;
        u32 range;
-       int ret = 0;
-       int dl, bd_dl;
+       sense_reason_t ret = 0;
+       int dl, bd_dl, err;
 
        if (cmd->data_length < 8) {
                pr_warn("UNMAP parameter list length %u too small\n",
                        cmd->data_length);
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               return -EINVAL;
+               return TCM_INVALID_PARAMETER_LIST;
        }
 
        buf = transport_kmap_data_sg(cmd);
+       if (!buf)
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 
        dl = get_unaligned_be16(&buf[0]);
        bd_dl = get_unaligned_be16(&buf[2]);
@@ -349,9 +410,8 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
        else
                size = bd_dl;
 
-       if (size / 16 > dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
-               cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-               ret = -EINVAL;
+       if (size / 16 > dev->dev_attrib.max_unmap_block_desc_count) {
+               ret = TCM_INVALID_PARAMETER_LIST;
                goto err;
        }
 
@@ -366,23 +426,22 @@ static int iblock_execute_unmap(struct se_cmd *cmd)
                pr_debug("UNMAP: Using lba: %llu and range: %u\n",
                                 (unsigned long long)lba, range);
 
-               if (range > dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count) {
-                       cmd->scsi_sense_reason = TCM_INVALID_PARAMETER_LIST;
-                       ret = -EINVAL;
+               if (range > dev->dev_attrib.max_unmap_lba_count) {
+                       ret = TCM_INVALID_PARAMETER_LIST;
                        goto err;
                }
 
                if (lba + range > dev->transport->get_blocks(dev) + 1) {
-                       cmd->scsi_sense_reason = TCM_ADDRESS_OUT_OF_RANGE;
-                       ret = -EINVAL;
+                       ret = TCM_ADDRESS_OUT_OF_RANGE;
                        goto err;
                }
 
-               ret = blkdev_issue_discard(ibd->ibd_bd, lba, range,
+               err = blkdev_issue_discard(ib_dev->ibd_bd, lba, range,
                                           GFP_KERNEL, 0);
-               if (ret < 0) {
+               if (err < 0) {
                        pr_err("blkdev_issue_discard() failed: %d\n",
-                                       ret);
+                                       err);
+                       ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                        goto err;
                }
 
@@ -397,23 +456,86 @@ err:
        return ret;
 }
 
-static int iblock_execute_write_same(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_write_same_unmap(struct se_cmd *cmd)
 {
-       struct iblock_dev *ibd = cmd->se_dev->dev_ptr;
-       int ret;
-
-       ret = blkdev_issue_discard(ibd->ibd_bd, cmd->t_task_lba,
-                                  spc_get_write_same_sectors(cmd), GFP_KERNEL,
-                                  0);
-       if (ret < 0) {
-               pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
-               return ret;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
+       int rc;
+
+       rc = blkdev_issue_discard(ib_dev->ibd_bd, cmd->t_task_lba,
+                       spc_get_write_same_sectors(cmd), GFP_KERNEL, 0);
+       if (rc < 0) {
+               pr_warn("blkdev_issue_discard() failed: %d\n", rc);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        target_complete_cmd(cmd, GOOD);
        return 0;
 }
 
+static sense_reason_t
+iblock_execute_write_same(struct se_cmd *cmd)
+{
+       struct iblock_req *ibr;
+       struct scatterlist *sg;
+       struct bio *bio;
+       struct bio_list list;
+       sector_t block_lba = cmd->t_task_lba;
+       sector_t sectors = spc_get_write_same_sectors(cmd);
+
+       sg = &cmd->t_data_sg[0];
+
+       if (cmd->t_data_nents > 1 ||
+           sg->length != cmd->se_dev->dev_attrib.block_size) {
+               pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
+                       " block_size: %u\n", cmd->t_data_nents, sg->length,
+                       cmd->se_dev->dev_attrib.block_size);
+               return TCM_INVALID_CDB_FIELD;
+       }
+
+       ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
+       if (!ibr)
+               goto fail;
+       cmd->priv = ibr;
+
+       bio = iblock_get_bio(cmd, block_lba, 1);
+       if (!bio)
+               goto fail_free_ibr;
+
+       bio_list_init(&list);
+       bio_list_add(&list, bio);
+
+       atomic_set(&ibr->pending, 1);
+
+       while (sectors) {
+               while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
+                               != sg->length) {
+
+                       bio = iblock_get_bio(cmd, block_lba, 1);
+                       if (!bio)
+                               goto fail_put_bios;
+
+                       atomic_inc(&ibr->pending);
+                       bio_list_add(&list, bio);
+               }
+
+               /* Always in 512 byte units for Linux/Block */
+               block_lba += sg->length >> IBLOCK_LBA_SHIFT;
+               sectors -= 1;
+       }
+
+       iblock_submit_bios(&list, WRITE);
+       return 0;
+
+fail_put_bios:
+       while ((bio = bio_list_pop(&list)))
+               bio_put(bio);
+fail_free_ibr:
+       kfree(ibr);
+fail:
+       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+}
+
 enum {
        Opt_udev_path, Opt_readonly, Opt_force, Opt_err
 };
@@ -425,11 +547,10 @@ static match_table_t tokens = {
        {Opt_err, NULL}
 };
 
-static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
-                                              struct se_subsystem_dev *se_dev,
-                                              const char *page, ssize_t count)
+static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
+               const char *page, ssize_t count)
 {
-       struct iblock_dev *ib_dev = se_dev->se_dev_su_ptr;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        char *orig, *ptr, *arg_p, *opts;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
@@ -491,43 +612,26 @@ out:
        return (!ret) ? count : ret;
 }
 
-static ssize_t iblock_check_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev)
+static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
 {
-       struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-
-       if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
-               pr_err("Missing udev_path= parameters for IBLOCK\n");
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-static ssize_t iblock_show_configfs_dev_params(
-       struct se_hba *hba,
-       struct se_subsystem_dev *se_dev,
-       char *b)
-{
-       struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
-       struct block_device *bd = ibd->ibd_bd;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bd = ib_dev->ibd_bd;
        char buf[BDEVNAME_SIZE];
        ssize_t bl = 0;
 
        if (bd)
                bl += sprintf(b + bl, "iBlock device: %s",
                                bdevname(bd, buf));
-       if (ibd->ibd_flags & IBDF_HAS_UDEV_PATH)
+       if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
                bl += sprintf(b + bl, "  UDEV PATH: %s",
-                               ibd->ibd_udev_path);
-       bl += sprintf(b + bl, "  readonly: %d\n", ibd->ibd_readonly);
+                               ib_dev->ibd_udev_path);
+       bl += sprintf(b + bl, "  readonly: %d\n", ib_dev->ibd_readonly);
 
        bl += sprintf(b + bl, "        ");
        if (bd) {
                bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
                        MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
-                       "" : (bd->bd_holder == ibd) ?
+                       "" : (bd->bd_holder == ib_dev) ?
                        "CLAIMED: IBLOCK" : "CLAIMED: OS");
        } else {
                bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -536,61 +640,8 @@ static ssize_t iblock_show_configfs_dev_params(
        return bl;
 }
 
-static void iblock_complete_cmd(struct se_cmd *cmd)
-{
-       struct iblock_req *ibr = cmd->priv;
-       u8 status;
-
-       if (!atomic_dec_and_test(&ibr->pending))
-               return;
-
-       if (atomic_read(&ibr->ib_bio_err_cnt))
-               status = SAM_STAT_CHECK_CONDITION;
-       else
-               status = SAM_STAT_GOOD;
-
-       target_complete_cmd(cmd, status);
-       kfree(ibr);
-}
-
-static struct bio *
-iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
-{
-       struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;
-       struct bio *bio;
-
-       /*
-        * Only allocate as many vector entries as the bio code allows us to,
-        * we'll loop later on until we have handled the whole request.
-        */
-       if (sg_num > BIO_MAX_PAGES)
-               sg_num = BIO_MAX_PAGES;
-
-       bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
-       if (!bio) {
-               pr_err("Unable to allocate memory for bio\n");
-               return NULL;
-       }
-
-       bio->bi_bdev = ib_dev->ibd_bd;
-       bio->bi_private = cmd;
-       bio->bi_end_io = &iblock_bio_done;
-       bio->bi_sector = lba;
-       return bio;
-}
-
-static void iblock_submit_bios(struct bio_list *list, int rw)
-{
-       struct blk_plug plug;
-       struct bio *bio;
-
-       blk_start_plug(&plug);
-       while ((bio = bio_list_pop(list)))
-               submit_bio(rw, bio);
-       blk_finish_plug(&plug);
-}
-
-static int iblock_execute_rw(struct se_cmd *cmd)
+static sense_reason_t
+iblock_execute_rw(struct se_cmd *cmd)
 {
        struct scatterlist *sgl = cmd->t_data_sg;
        u32 sgl_nents = cmd->t_data_nents;
@@ -611,8 +662,8 @@ static int iblock_execute_rw(struct se_cmd *cmd)
                 * Force data to disk if we pretend to not have a volatile
                 * write cache, or the initiator set the Force Unit Access bit.
                 */
-               if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 ||
-                   (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
+               if (dev->dev_attrib.emulate_write_cache == 0 ||
+                   (dev->dev_attrib.emulate_fua_write > 0 &&
                     (cmd->se_cmd_flags & SCF_FUA)))
                        rw = WRITE_FUA;
                else
@@ -625,19 +676,18 @@ static int iblock_execute_rw(struct se_cmd *cmd)
         * Convert the blocksize advertised to the initiator to the 512 byte
         * units unconditionally used by the Linux block layer.
         */
-       if (dev->se_sub_dev->se_dev_attrib.block_size == 4096)
+       if (dev->dev_attrib.block_size == 4096)
                block_lba = (cmd->t_task_lba << 3);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 2048)
+       else if (dev->dev_attrib.block_size == 2048)
                block_lba = (cmd->t_task_lba << 2);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 1024)
+       else if (dev->dev_attrib.block_size == 1024)
                block_lba = (cmd->t_task_lba << 1);
-       else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
+       else if (dev->dev_attrib.block_size == 512)
                block_lba = cmd->t_task_lba;
        else {
                pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
-                               " %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               return -ENOSYS;
+                               " %u\n", dev->dev_attrib.block_size);
+               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
 
        ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
@@ -697,83 +747,48 @@ fail_put_bios:
                bio_put(bio);
 fail_free_ibr:
        kfree(ibr);
-       cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 fail:
-       return -ENOMEM;
-}
-
-static u32 iblock_get_device_rev(struct se_device *dev)
-{
-       return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */
-}
-
-static u32 iblock_get_device_type(struct se_device *dev)
-{
-       return TYPE_DISK;
+       return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
 static sector_t iblock_get_blocks(struct se_device *dev)
 {
-       struct iblock_dev *ibd = dev->dev_ptr;
-       struct block_device *bd = ibd->ibd_bd;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct block_device *bd = ib_dev->ibd_bd;
        struct request_queue *q = bdev_get_queue(bd);
 
        return iblock_emulate_read_cap_with_block_size(dev, bd, q);
 }
 
-static void iblock_bio_done(struct bio *bio, int err)
-{
-       struct se_cmd *cmd = bio->bi_private;
-       struct iblock_req *ibr = cmd->priv;
-
-       /*
-        * Set -EIO if !BIO_UPTODATE and the passed is still err=0
-        */
-       if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
-               err = -EIO;
-
-       if (err != 0) {
-               pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
-                       " err: %d\n", bio, err);
-               /*
-                * Bump the ib_bio_err_cnt and release bio.
-                */
-               atomic_inc(&ibr->ib_bio_err_cnt);
-               smp_mb__after_atomic_inc();
-       }
-
-       bio_put(bio);
-
-       iblock_complete_cmd(cmd);
-}
-
-static struct spc_ops iblock_spc_ops = {
+static struct sbc_ops iblock_sbc_ops = {
        .execute_rw             = iblock_execute_rw,
        .execute_sync_cache     = iblock_execute_sync_cache,
        .execute_write_same     = iblock_execute_write_same,
+       .execute_write_same_unmap = iblock_execute_write_same_unmap,
        .execute_unmap          = iblock_execute_unmap,
 };
 
-static int iblock_parse_cdb(struct se_cmd *cmd)
+static sense_reason_t
+iblock_parse_cdb(struct se_cmd *cmd)
 {
-       return sbc_parse_cdb(cmd, &iblock_spc_ops);
+       return sbc_parse_cdb(cmd, &iblock_sbc_ops);
 }
 
 static struct se_subsystem_api iblock_template = {
        .name                   = "iblock",
+       .inquiry_prod           = "IBLOCK",
+       .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
        .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = iblock_attach_hba,
        .detach_hba             = iblock_detach_hba,
-       .allocate_virtdevice    = iblock_allocate_virtdevice,
-       .create_virtdevice      = iblock_create_virtdevice,
+       .alloc_device           = iblock_alloc_device,
+       .configure_device       = iblock_configure_device,
        .free_device            = iblock_free_device,
        .parse_cdb              = iblock_parse_cdb,
-       .check_configfs_dev_params = iblock_check_configfs_dev_params,
        .set_configfs_dev_params = iblock_set_configfs_dev_params,
        .show_configfs_dev_params = iblock_show_configfs_dev_params,
-       .get_device_rev         = iblock_get_device_rev,
-       .get_device_type        = iblock_get_device_type,
+       .get_device_type        = sbc_get_device_type,
        .get_blocks             = iblock_get_blocks,
 };