drm/i915: remove do_retire from i915_wait_request
authorBen Widawsky <ben@bwidawsk.net>
Thu, 26 Apr 2012 23:02:58 +0000 (16:02 -0700)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Thu, 3 May 2012 09:18:20 +0000 (11:18 +0200)
This originates from a hack by me to quickly fix a bug in an earlier
patch where we needed control over whether or not waiting on a seqno
actually did any retire list processing. Since the two operations aren't
clearly related, we should pull the parameter out of the wait function,
and make the caller responsible for retiring if the action is desired.

The only function call site which did not get an explicit retire_request call
(on purpose) is i915_gem_inactive_shrink(). That code was already calling
retire_request a second time.

v2: don't modify any behavior excepit i915_gem_inactive_shrink(Daniel)

Signed-off-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_evict.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index 7a55abbddc9908d5d9d644a15b5e733f960219c7..17081dad3faac584419cdc81270259925fa72c93 100644 (file)
@@ -2009,9 +2009,10 @@ int i915_driver_unload(struct drm_device *dev)
                unregister_shrinker(&dev_priv->mm.inactive_shrinker);
 
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev, true);
+       ret = i915_gpu_idle(dev);
        if (ret)
                DRM_ERROR("failed to idle hardware: %d\n", ret);
+       i915_gem_retire_requests(dev);
        mutex_unlock(&dev->struct_mutex);
 
        /* Cancel the retire work handler, which should be idle now. */
index 0095c8d12569bc6665c71aa8ddb7fc395bcf7766..2113a1ab9625e0156a4869545109fe69aec43c17 100644 (file)
@@ -1297,14 +1297,13 @@ int __must_check i915_gem_init_hw(struct drm_device *dev);
 void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_init_ppgtt(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int __must_check i915_gpu_idle(struct drm_device *dev, bool do_retire);
+int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_idle(struct drm_device *dev);
 int __must_check i915_add_request(struct intel_ring_buffer *ring,
                                  struct drm_file *file,
                                  struct drm_i915_gem_request *request);
 int __must_check i915_wait_request(struct intel_ring_buffer *ring,
-                                  uint32_t seqno,
-                                  bool do_retire);
+                                  uint32_t seqno);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int __must_check
 i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
index b46a3fd1774688e43d08d60774a8170cd3881d8b..e378204970fdf37e5f7da61ec173d865fb56e204 100644 (file)
@@ -1825,8 +1825,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
  */
 int
 i915_wait_request(struct intel_ring_buffer *ring,
-                 uint32_t seqno,
-                 bool do_retire)
+                 uint32_t seqno)
 {
        drm_i915_private_t *dev_priv = ring->dev->dev_private;
        u32 ier;
@@ -1902,14 +1901,6 @@ i915_wait_request(struct intel_ring_buffer *ring,
        if (atomic_read(&dev_priv->mm.wedged))
                ret = -EAGAIN;
 
-       /* Directly dispatch request retiring.  While we have the work queue
-        * to handle this, the waiter on a request often wants an associated
-        * buffer to have made it to the inactive list, and we would need
-        * a separate wait queue to handle that.
-        */
-       if (ret == 0 && do_retire)
-               i915_gem_retire_requests_ring(ring);
-
        return ret;
 }
 
@@ -1931,10 +1922,10 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj)
         * it.
         */
        if (obj->active) {
-               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno,
-                                       true);
+               ret = i915_wait_request(obj->ring, obj->last_rendering_seqno);
                if (ret)
                        return ret;
+               i915_gem_retire_requests_ring(obj->ring);
        }
 
        return 0;
@@ -2117,7 +2108,7 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
+static int i915_ring_idle(struct intel_ring_buffer *ring)
 {
        int ret;
 
@@ -2131,18 +2122,17 @@ static int i915_ring_idle(struct intel_ring_buffer *ring, bool do_retire)
                        return ret;
        }
 
-       return i915_wait_request(ring, i915_gem_next_request_seqno(ring),
-                                do_retire);
+       return i915_wait_request(ring, i915_gem_next_request_seqno(ring));
 }
 
-int i915_gpu_idle(struct drm_device *dev, bool do_retire)
+int i915_gpu_idle(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = dev->dev_private;
        int ret, i;
 
        /* Flush everything onto the inactive list. */
        for (i = 0; i < I915_NUM_RINGS; i++) {
-               ret = i915_ring_idle(&dev_priv->ring[i], do_retire);
+               ret = i915_ring_idle(&dev_priv->ring[i]);
                if (ret)
                        return ret;
        }
@@ -2331,9 +2321,7 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
        }
 
        if (obj->last_fenced_seqno) {
-               ret = i915_wait_request(obj->ring,
-                                       obj->last_fenced_seqno,
-                                       false);
+               ret = i915_wait_request(obj->ring, obj->last_fenced_seqno);
                if (ret)
                        return ret;
 
@@ -3394,11 +3382,12 @@ i915_gem_idle(struct drm_device *dev)
                return 0;
        }
 
-       ret = i915_gpu_idle(dev, true);
+       ret = i915_gpu_idle(dev);
        if (ret) {
                mutex_unlock(&dev->struct_mutex);
                return ret;
        }
+       i915_gem_retire_requests(dev);
 
        /* Under UMS, be paranoid and evict. */
        if (!drm_core_check_feature(dev, DRIVER_MODESET))
@@ -4025,7 +4014,7 @@ rescan:
                 * This has a dramatic impact to reduce the number of
                 * OOM-killer events whilst running the GPU aggressively.
                 */
-               if (i915_gpu_idle(dev, true) == 0)
+               if (i915_gpu_idle(dev) == 0)
                        goto rescan;
        }
        mutex_unlock(&dev->struct_mutex);
index 91ebb94d7c8be35b668fe194643d7b7c4c5df058..3bcf0451d07c45323b3be5bff25d04006a538c49 100644 (file)
@@ -168,7 +168,7 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
        drm_i915_private_t *dev_priv = dev->dev_private;
        struct drm_i915_gem_object *obj, *next;
        bool lists_empty;
-       int ret;
+       int ret,i;
 
        lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
                       list_empty(&dev_priv->mm.flushing_list) &&
@@ -178,11 +178,20 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
 
        trace_i915_gem_evict_everything(dev, purgeable_only);
 
-       /* Flush everything (on to the inactive lists) and evict */
-       ret = i915_gpu_idle(dev, true);
+       ret = i915_gpu_idle(dev);
        if (ret)
                return ret;
 
+       /* The gpu_idle will flush everything in the write domain to the
+        * active list. Then we must move everything off the active list
+        * with retire requests.
+        */
+       for (i = 0; i < I915_NUM_RINGS; i++)
+               if (WARN_ON(!list_empty(&dev_priv->ring[i].gpu_write_list)))
+                       return -EBUSY;
+
+       i915_gem_retire_requests(dev);
+
        BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
 
        /* Having flushed everything, unbind() should never raise an error */
index 68ec0130a6269c72124f18410620a285750e201c..cbba0aa6104b7891fdf4356dd97de7f2e854ff86 100644 (file)
@@ -1220,9 +1220,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                         * so every billion or so execbuffers, we need to stall
                         * the GPU in order to reset the counters.
                         */
-                       ret = i915_gpu_idle(dev, true);
+                       ret = i915_gpu_idle(dev);
                        if (ret)
                                goto err;
+                       i915_gem_retire_requests(dev);
 
                        BUG_ON(ring->sync_seqno[i]);
                }
index 25c8bf9d1d4e8526b81d8e22d244a1629bb65752..29d573c27b35acad54ea603f256698c2a57f13c7 100644 (file)
@@ -317,7 +317,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
 
        if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
                dev_priv->mm.interruptible = false;
-               if (i915_gpu_idle(dev_priv->dev, false)) {
+               if (i915_gpu_idle(dev_priv->dev)) {
                        DRM_ERROR("Couldn't idle GPU\n");
                        /* Wait a bit, in hopes it avoids the hang */
                        udelay(10);
index a0b5053c5a32761232009b21798a3ca3d62843c2..e06e46a3075789410a4952fb617e5e0e739c0244 100644 (file)
@@ -225,10 +225,10 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
        }
        overlay->last_flip_req = request->seqno;
        overlay->flip_tail = tail;
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-                               true);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
        if (ret)
                return ret;
+       i915_gem_retire_requests(dev);
 
        overlay->last_flip_req = 0;
        return 0;
@@ -447,10 +447,10 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
        if (overlay->last_flip_req == 0)
                return 0;
 
-       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req,
-                               true);
+       ret = i915_wait_request(LP_RING(dev_priv), overlay->last_flip_req);
        if (ret)
                return ret;
+       i915_gem_retire_requests(dev);
 
        if (overlay->flip_tail)
                overlay->flip_tail(overlay);
index 427b7c55ffe0de9feddb3ba878f2b060a603cdb6..a7d97d17b285a0774ac3a586e35b909251cc6602 100644 (file)
@@ -1088,9 +1088,11 @@ static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
        was_interruptible = dev_priv->mm.interruptible;
        dev_priv->mm.interruptible = false;
 
-       ret = i915_wait_request(ring, seqno, true);
+       ret = i915_wait_request(ring, seqno);
 
        dev_priv->mm.interruptible = was_interruptible;
+       if (!ret)
+               i915_gem_retire_requests_ring(ring);
 
        return ret;
 }