drm/i915: drop polled waits from i915_wait_request
authorBen Widawsky <ben@bwidawsk.net>
Thu, 26 Apr 2012 23:03:02 +0000 (16:03 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 3 May 2012 09:18:22 +0000 (11:18 +0200)
The only time irq_get should fail is during unload or suspend. Both of
these points should try to quiesce the GPU before disabling interrupts
and so the atomic polling should never occur.

This was recommended by Chris Wilson as a way of reducing added
complexity to the polled wait which I introduced in an RFC patch.

09:57 < ickle_> it's only there as a fudge for waiting after irqs
after uninstalled during s&r, we aren't actually meant to hit it
09:57 < ickle_> so maybe we should just kill the code there and fix the breakage

v2: return -ENODEV instead of -EBUSY when irq_get fails

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c

index 292b4a9e7bab7cb8d0a24a89ba4ab2983aa9e9f7..148e04baceba0d50873e58c182cf41bffd3bd957 100644 (file)
@@ -1864,22 +1864,19 @@ i915_wait_request(struct intel_ring_buffer *ring,
        if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
                trace_i915_gem_request_wait_begin(ring, seqno);
 
-               if (ring->irq_get(ring)) {
-                       if (dev_priv->mm.interruptible)
-                               ret = wait_event_interruptible(ring->irq_queue,
-                                                              i915_seqno_passed(ring->get_seqno(ring), seqno)
-                                                              || atomic_read(&dev_priv->mm.wedged));
-                       else
-                               wait_event(ring->irq_queue,
-                                          i915_seqno_passed(ring->get_seqno(ring), seqno)
-                                          || atomic_read(&dev_priv->mm.wedged));
+               if (WARN_ON(!ring->irq_get(ring)))
+                       return -ENODEV;
 
-                       ring->irq_put(ring);
-               } else if (wait_for_atomic(i915_seqno_passed(ring->get_seqno(ring),
-                                                            seqno) ||
-                                          atomic_read(&dev_priv->mm.wedged), 3000))
-                       ret = -EBUSY;
+               if (dev_priv->mm.interruptible)
+                       ret = wait_event_interruptible(ring->irq_queue,
+                                                      i915_seqno_passed(ring->get_seqno(ring), seqno)
+                                                      || atomic_read(&dev_priv->mm.wedged));
+               else
+                       wait_event(ring->irq_queue,
+                                  i915_seqno_passed(ring->get_seqno(ring), seqno)
+                                  || atomic_read(&dev_priv->mm.wedged));
 
+               ring->irq_put(ring);
                trace_i915_gem_request_wait_end(ring, seqno);
        }
        if (atomic_read(&dev_priv->mm.wedged))