target: Drop se_device TCQ queue_depth usage from I/O path
authorNicholas Bellinger <nab@linux-iscsi.org>
Wed, 30 Nov 2011 09:25:21 +0000 (01:25 -0800)
committerNicholas Bellinger <nab@linux-iscsi.org>
Wed, 14 Dec 2011 11:42:13 +0000 (11:42 +0000)
Historically, pSCSI devices have been the ones that required target-core
to enforce a per se_device->depth_left.  This patch changes target-core
to no longer (by default) enforce a per se_device->depth_left or sleep in
transport_tcq_window_closed() when we out of queue slots for all backend
export cases.

Cc: Christoph Hellwig <hch@lst.de>
Cc: Roland Dreier <roland@purestorage.com>
Cc: Joern Engel <joern@logfs.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
drivers/target/target_core_device.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_transport.c
include/target/target_core_base.h

index 1f74de25a92fe854e855fc2291540e8875686799..0c5992f0d9469cd1c34f63ef22296aa7519498e8 100644 (file)
@@ -1132,8 +1132,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  */
 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 {
-       u32 orig_queue_depth = dev->queue_depth;
-
        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
                pr_err("dev[%p]: Unable to change SE Device TCQ while"
                        " dev_export_obj: %d count exists\n", dev,
@@ -1167,11 +1165,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
        }
 
        dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
-       if (queue_depth > orig_queue_depth)
-               atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
-       else if (queue_depth < orig_queue_depth)
-               atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
-
        pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
                        dev, queue_depth);
        return 0;
index b73a399cdd54e0d7fb89b235b8c4c89d835e172e..d35467d42e12da52a7ac28d28cb01b78f741d129 100644 (file)
@@ -350,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
         * scsi_device_put() and the pdv->pdv_sd cleared.
         */
        pdv->pdv_sd = sd;
-
        dev = transport_add_device_to_core_hba(hba, &pscsi_template,
                                se_dev, dev_flags, pdv,
                                &dev_limits, NULL, NULL);
index 1cc7e920ab0b4497c1fc7539d012a078852fd41a..7c2def7e2593e1a4658d984c4c47d456227272ef 100644 (file)
@@ -691,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
        struct se_cmd *cmd = task->task_se_cmd;
        struct se_device *dev = cmd->se_dev;
        unsigned long flags;
-#if 0
-       pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-                       cmd->t_task_cdb[0], dev);
-#endif
-       if (dev)
-               atomic_inc(&dev->depth_left);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags &= ~TF_ACTIVE;
@@ -971,9 +965,8 @@ void transport_dump_dev_state(
                break;
        }
 
-       *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
-               atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
-               dev->queue_depth);
+       *bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
+               atomic_read(&dev->execute_tasks), dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
                dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
        *bl += sprintf(b + *bl, "        ");
@@ -1328,9 +1321,6 @@ struct se_device *transport_add_device_to_core_hba(
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
-
-       dev->queue_depth        = dev_limits->queue_depth;
-       atomic_set(&dev->depth_left, dev->queue_depth);
        atomic_set(&dev->dev_ordered_id, 0);
 
        se_dev_set_default_attribs(dev, dev_limits);
@@ -1982,18 +1972,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
-       if (dev->dev_tcq_window_closed++ <
-                       PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
-       } else
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
-       wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
-       return 0;
-}
-
 /*
  * Called from Fabric Module context from transport_execute_tasks()
  *
@@ -2126,16 +2104,7 @@ static int __transport_execute_tasks(struct se_device *dev)
        struct se_task *task = NULL;
        unsigned long flags;
 
-       /*
-        * Check if there is enough room in the device and HBA queue to send
-        * struct se_tasks to the selected transport.
-        */
 check_depth:
-       if (!atomic_read(&dev->depth_left))
-               return transport_tcq_window_closed(dev);
-
-       dev->dev_tcq_window_closed = 0;
-
        spin_lock_irq(&dev->execute_task_lock);
        if (list_empty(&dev->execute_task_list)) {
                spin_unlock_irq(&dev->execute_task_lock);
@@ -2146,10 +2115,7 @@ check_depth:
        __transport_remove_task_from_execute_queue(task, dev);
        spin_unlock_irq(&dev->execute_task_lock);
 
-       atomic_dec(&dev->depth_left);
-
        cmd = task->task_se_cmd;
-
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags |= (TF_ACTIVE | TF_SENT);
        atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2170,7 +2136,6 @@ check_depth:
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
-               atomic_inc(&dev->depth_left);
                transport_generic_request_failure(cmd);
        }
 
index 28190dc10eef9eb361aceeb933cab4be725c08f9..cd4caf3a598f4284ac7e94abcfa45d72a09784c3 100644 (file)
@@ -798,7 +798,6 @@ struct se_device {
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
        atomic_t                simple_cmds;
-       atomic_t                depth_left;
        atomic_t                dev_ordered_id;
        atomic_t                execute_tasks;
        atomic_t                dev_ordered_sync;