drm/i915/bdw: Help out the ctx switch interrupt handler
authorOscar Mateo <oscar.mateo@intel.com>
Thu, 24 Jul 2014 16:04:41 +0000 (17:04 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 14 Aug 2014 20:44:04 +0000 (22:44 +0200)
If we receive a storm of requests for the same context (see gem_storedw_loop_*)
we might end up iterating over too many elements in interrupt time, looking for
contexts to squash together. Instead, share the burden by giving more
intelligence to the queue function. At most, the interrupt will iterate over
three elements.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
[danvet: Checkpatch.]
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/intel_lrc.c

index 0f1b6b2b0f0e007098bd1a8021dce7096684337a..6f6c5a931fafb188122d90e67ded09d4397e946f 100644 (file)
@@ -392,10 +392,10 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
                                   struct intel_context *to,
                                   u32 tail)
 {
-       struct intel_ctx_submit_request *req = NULL;
+       struct intel_ctx_submit_request *req = NULL, *cursor;
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        unsigned long flags;
-       bool was_empty;
+       int num_elements = 0;
 
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (req == NULL)
@@ -410,9 +410,27 @@ static int execlists_context_queue(struct intel_engine_cs *ring,
 
        spin_lock_irqsave(&ring->execlist_lock, flags);
 
-       was_empty = list_empty(&ring->execlist_queue);
+       list_for_each_entry(cursor, &ring->execlist_queue, execlist_link)
+               if (++num_elements > 2)
+                       break;
+
+       if (num_elements > 2) {
+               struct intel_ctx_submit_request *tail_req;
+
+               tail_req = list_last_entry(&ring->execlist_queue,
+                                          struct intel_ctx_submit_request,
+                                          execlist_link);
+
+               if (to == tail_req->ctx) {
+                       WARN(tail_req->elsp_submitted != 0,
+                            "More than 2 already-submitted reqs queued\n");
+                       list_del(&tail_req->execlist_link);
+                       queue_work(dev_priv->wq, &tail_req->work);
+               }
+       }
+
        list_add_tail(&req->execlist_link, &ring->execlist_queue);
-       if (was_empty)
+       if (num_elements == 0)
                execlists_context_unqueue(ring);
 
        spin_unlock_irqrestore(&ring->execlist_lock, flags);