drm/i915: Update ring->add_request() to take a request structure
authorJohn Harrison <John.C.Harrison@Intel.com>
Fri, 29 May 2015 16:44:00 +0000 (17:44 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 23 Jun 2015 12:02:24 +0000 (14:02 +0200)
Updated the various ring->add_request() implementations to take a request
instead of a ring. This removes their reliance on the OLR to obtain the seqno
value that the request should be tagged with.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 25fe1ef32eaacdc2ecda562b6b1fd3f05f760efe..6d511d32f72aa3738693777624fcbfd73c740f00 100644 (file)
@@ -2524,7 +2524,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
        if (i915.enable_execlists)
                ret = ring->emit_request(ringbuf, request);
        else {
-               ret = ring->add_request(ring);
+               ret = ring->add_request(request);
 
                request->tail = intel_ring_get_tail(ringbuf);
        }
index e0aa008f05550903164e0d1f4afb4280287621d3..28d7801a8fa5cb3a2ba218d4ea9c8391de443fd3 100644 (file)
@@ -1288,16 +1288,16 @@ static int gen6_signal(struct intel_engine_cs *signaller,
 
 /**
  * gen6_add_request - Update the semaphore mailbox registers
- * 
- * @ring - ring that is adding a request
- * @seqno - return seqno stuck into the ring
+ *
+ * @request - request to write to the ring
  *
  * Update the mailbox registers in the *other* rings with the current seqno.
  * This acts like a signal in the canonical semaphore.
  */
 static int
-gen6_add_request(struct intel_engine_cs *ring)
+gen6_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        if (ring->semaphore.signal)
@@ -1310,8 +1310,7 @@ gen6_add_request(struct intel_engine_cs *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
@@ -1408,8 +1407,9 @@ do {                                                                      \
 } while (0)
 
 static int
-pc_render_add_request(struct intel_engine_cs *ring)
+pc_render_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
        int ret;
 
@@ -1429,8 +1429,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
                        PIPE_CONTROL_WRITE_FLUSH |
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, 0);
        PIPE_CONTROL_FLUSH(ring, scratch_addr);
        scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1449,8 +1448,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
                        PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
                        PIPE_CONTROL_NOTIFY);
        intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, 0);
        __intel_ring_advance(ring);
 
@@ -1619,8 +1617,9 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
 }
 
 static int
-i9xx_add_request(struct intel_engine_cs *ring)
+i9xx_add_request(struct drm_i915_gem_request *req)
 {
+       struct intel_engine_cs *ring = req->ring;
        int ret;
 
        ret = intel_ring_begin(ring, 4);
@@ -1629,8 +1628,7 @@ i9xx_add_request(struct intel_engine_cs *ring)
 
        intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
        intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-       intel_ring_emit(ring,
-                   i915_gem_request_get_seqno(ring->outstanding_lazy_request));
+       intel_ring_emit(ring, i915_gem_request_get_seqno(req));
        intel_ring_emit(ring, MI_USER_INTERRUPT);
        __intel_ring_advance(ring);
 
index 8c713f625755b3d6f0ee1615a8fb4fe0d07ff63b..cb6d3d0b25307909743de9f3653301e3b84ddee7 100644 (file)
@@ -183,7 +183,7 @@ struct  intel_engine_cs {
        int __must_check (*flush)(struct drm_i915_gem_request *req,
                                  u32   invalidate_domains,
                                  u32   flush_domains);
-       int             (*add_request)(struct intel_engine_cs *ring);
+       int             (*add_request)(struct drm_i915_gem_request *req);
        /* Some chipsets are not quite as coherent as advertised and need
         * an expensive kick to force a true read of the up-to-date seqno.
         * However, the up-to-date seqno is not always required and the last