Merge tag 'efi-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi...
[firefly-linux-kernel-4.4.55.git] / drivers / target / target_core_transport.c
index bd68727a6806eabecd6608c85c6d5b506ebcce2f..5bacc7b5ed6d85cf54d6d8fe445dcac08ee8081b 100644 (file)
@@ -1075,6 +1075,55 @@ transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
 }
 EXPORT_SYMBOL(transport_set_vpd_ident);
 
+static sense_reason_t
+target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
+                              unsigned int size)
+{
+       u32 mtl;
+
+       if (!cmd->se_tfo->max_data_sg_nents)
+               return TCM_NO_SENSE;
+       /*
+        * Check if fabric enforced maximum SGL entries per I/O descriptor
+        * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
+        * residual_count and reduce original cmd->data_length to maximum
+        * length based on single PAGE_SIZE entry scatter-lists.
+        */
+       mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
+       if (cmd->data_length > mtl) {
+               /*
+                * If an existing CDB overflow is present, calculate new residual
+                * based on CDB size minus fabric maximum transfer length.
+                *
+                * If an existing CDB underflow is present, calculate new residual
+                * based on original cmd->data_length minus fabric maximum transfer
+                * length.
+                *
+                * Otherwise, set the underflow residual based on cmd->data_length
+                * minus fabric maximum transfer length.
+                */
+               if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
+                       cmd->residual_count = (size - mtl);
+               } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+                       u32 orig_dl = size + cmd->residual_count;
+                       cmd->residual_count = (orig_dl - mtl);
+               } else {
+                       cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+                       cmd->residual_count = (cmd->data_length - mtl);
+               }
+               cmd->data_length = mtl;
+               /*
+                * Reset sbc_check_prot() calculated protection payload
+                * length based upon the new smaller MTL.
+                */
+               if (cmd->prot_length) {
+                       u32 sectors = (mtl / dev->dev_attrib.block_size);
+                       cmd->prot_length = dev->prot_length * sectors;
+               }
+       }
+       return TCM_NO_SENSE;
+}
+
 sense_reason_t
 target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
 {
@@ -1120,7 +1169,7 @@ target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
                }
        }
 
-       return 0;
+       return target_check_max_data_sg_nents(cmd, dev, size);
 
 }
 
@@ -1178,14 +1227,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
                        " emulation is not supported\n");
                return TCM_INVALID_CDB_FIELD;
        }
-       /*
-        * Used to determine when ORDERED commands should go from
-        * Dormant to Active status.
-        */
-       cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
-       pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
-                       cmd->se_ordered_id, cmd->sam_task_attr,
-                       dev->transport->name);
+
        return 0;
 }
 
@@ -1699,8 +1741,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
 
 check_stop:
        transport_lun_remove_cmd(cmd);
-       if (!transport_cmd_check_stop_to_fabric(cmd))
-               ;
+       transport_cmd_check_stop_to_fabric(cmd);
        return;
 
 queue_full:
@@ -1773,16 +1814,14 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
         */
        switch (cmd->sam_task_attr) {
        case TCM_HEAD_TAG:
-               pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x, "
-                        "se_ordered_id: %u\n",
-                        cmd->t_task_cdb[0], cmd->se_ordered_id);
+               pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
+                        cmd->t_task_cdb[0]);
                return false;
        case TCM_ORDERED_TAG:
                atomic_inc_mb(&dev->dev_ordered_sync);
 
-               pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
-                        " se_ordered_id: %u\n",
-                        cmd->t_task_cdb[0], cmd->se_ordered_id);
+               pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
+                        cmd->t_task_cdb[0]);
 
                /*
                 * Execute an ORDERED command if no other older commands
@@ -1806,10 +1845,8 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
        list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
        spin_unlock(&dev->delayed_cmd_lock);
 
-       pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
-               " delayed CMD list, se_ordered_id: %u\n",
-               cmd->t_task_cdb[0], cmd->sam_task_attr,
-               cmd->se_ordered_id);
+       pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
+               cmd->t_task_cdb[0], cmd->sam_task_attr);
        return true;
 }
 
@@ -1894,20 +1931,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
                atomic_dec_mb(&dev->simple_cmds);
                dev->dev_cur_ordered_id++;
-               pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
-                       " SIMPLE: %u\n", dev->dev_cur_ordered_id,
-                       cmd->se_ordered_id);
+               pr_debug("Incremented dev->dev_cur_ordered_id: %u for SIMPLE\n",
+                        dev->dev_cur_ordered_id);
        } else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
                dev->dev_cur_ordered_id++;
-               pr_debug("Incremented dev_cur_ordered_id: %u for"
-                       " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
-                       cmd->se_ordered_id);
+               pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
+                        dev->dev_cur_ordered_id);
        } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
                atomic_dec_mb(&dev->dev_ordered_sync);
 
                dev->dev_cur_ordered_id++;
-               pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
-                       " %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
+               pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
+                        dev->dev_cur_ordered_id);
        }
 
        target_restart_delayed_cmds(dev);