drm/i915: Make 'i915_gem_check_olr' actually check by request not seqno
authorJohn Harrison <John.C.Harrison@Intel.com>
Mon, 24 Nov 2014 18:49:30 +0000 (18:49 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 3 Dec 2014 08:35:16 +0000 (09:35 +0100)
Updated the _check_olr() function to actually take a request object and compare
it to the OLR rather than extracting seqnos and comparing those.

Note that there is one use case where the request object being processed is no
longer available at that point in the call stack. Hence a temporary copy of the
original function is still present (but called _check_ols() instead). This will
be removed in a subsequent patch.

Also, downgraded a BUG_ON to a WARN_ON as apparently the former is frowned upon
for shipping code.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c

index 4924f1d3d4b415b8099d29267f07d379a20ee54e..e6a997cd0cd0a79c3e858ba59a48e061910bc675 100644 (file)
@@ -2577,7 +2577,7 @@ bool i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);
 int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
                                      bool interruptible);
-int __must_check i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno);
+int __must_check i915_gem_check_olr(struct drm_i915_gem_request *req);
 
 static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
 {
@@ -3117,4 +3117,20 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
        }
 }
 
+/* XXX: Temporary solution to be removed later in patch series. */
+static inline int __must_check i915_gem_check_ols(
+                                    struct intel_engine_cs *ring, u32 seqno)
+{
+       int ret;
+
+       WARN_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+
+       ret = 0;
+       if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
+               ret = i915_add_request(ring, NULL);
+
+       return ret;
+}
+/* XXX: Temporary solution to be removed later in patch series. */
+
 #endif
index fba22a56650b3d568b5199ea70d0c7f3f2e16747..7d6f9bc9ebb57e52fd07d60b5c89544e44da74ef 100644 (file)
@@ -1153,19 +1153,18 @@ i915_gem_check_wedge(struct i915_gpu_error *error,
 }
 
 /*
- * Compare seqno against outstanding lazy request. Emit a request if they are
- * equal.
+ * Compare arbitrary request against outstanding lazy request. Emit on match.
  */
 int
-i915_gem_check_olr(struct intel_engine_cs *ring, u32 seqno)
+i915_gem_check_olr(struct drm_i915_gem_request *req)
 {
        int ret;
 
-       BUG_ON(!mutex_is_locked(&ring->dev->struct_mutex));
+       WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
 
        ret = 0;
-       if (seqno == i915_gem_request_get_seqno(ring->outstanding_lazy_request))
-               ret = i915_add_request(ring, NULL);
+       if (req == req->ring->outstanding_lazy_request)
+               ret = i915_add_request(req->ring, NULL);
 
        return ret;
 }
@@ -1328,7 +1327,7 @@ i915_wait_seqno(struct intel_engine_cs *ring, uint32_t seqno)
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_ols(ring, seqno);
        if (ret)
                return ret;
 
@@ -1395,7 +1394,6 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *ring = obj->ring;
        unsigned reset_counter;
-       u32 seqno;
        int ret;
 
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -1405,22 +1403,19 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        if (!req)
                return 0;
 
-       seqno = i915_gem_request_get_seqno(req);
-       WARN_ON(seqno == 0);
-
        ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
        if (ret)
                return ret;
 
-       ret = i915_gem_check_olr(ring, seqno);
+       ret = i915_gem_check_olr(req);
        if (ret)
                return ret;
 
        reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        i915_gem_request_reference(req);
        mutex_unlock(&dev->struct_mutex);
-       ret = __i915_wait_seqno(ring, seqno, reset_counter, true, NULL,
-                               file_priv);
+       ret = __i915_wait_seqno(ring, i915_gem_request_get_seqno(req),
+                               reset_counter, true, NULL, file_priv);
        mutex_lock(&dev->struct_mutex);
        i915_gem_request_unreference(req);
        if (ret)
@@ -2880,8 +2875,7 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
        int ret;
 
        if (obj->active) {
-               ret = i915_gem_check_olr(obj->ring,
-                            i915_gem_request_get_seqno(obj->last_read_req));
+               ret = i915_gem_check_olr(obj->last_read_req);
                if (ret)
                        return ret;
 
@@ -3011,7 +3005,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (seqno <= from->semaphore.sync_seqno[idx])
                return 0;
 
-       ret = i915_gem_check_olr(obj->ring, seqno);
+       ret = i915_gem_check_olr(obj->last_read_req);
        if (ret)
                return ret;