drm/i915: Update add_request() to take a request structure
authorJohn Harrison <John.C.Harrison@Intel.com>
Fri, 29 May 2015 16:43:49 +0000 (17:43 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 23 Jun 2015 12:02:15 +0000 (14:02 +0200)
Now that all callers of i915_add_request() have a request pointer to hand, it is
possible to update the add request function to take a request pointer rather
than pulling it out of the OLR.

For: VIZ-5115
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Tomas Elf <tomas.elf@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_overlay.c
drivers/gpu/drm/i915/intel_ringbuffer.c

index 0bb6a340d1c961a9eb2f83a9c2cc230cc5b7bb5a..da7cb141a4d502a2375620c47ccf3e661b098623 100644 (file)
@@ -2890,14 +2890,14 @@ void i915_gem_init_swizzling(struct drm_device *dev);
 void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int __must_check i915_gpu_idle(struct drm_device *dev);
 int __must_check i915_gem_suspend(struct drm_device *dev);
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *req,
                        struct drm_file *file,
                        struct drm_i915_gem_object *batch_obj,
                        bool flush_caches);
-#define i915_add_request(ring) \
-       __i915_add_request(ring, NULL, NULL, true)
-#define i915_add_request_no_flush(ring) \
-       __i915_add_request(ring, NULL, NULL, false)
+#define i915_add_request(req) \
+       __i915_add_request(req, NULL, NULL, true)
+#define i915_add_request_no_flush(req) \
+       __i915_add_request(req, NULL, NULL, false)
 int __i915_wait_request(struct drm_i915_gem_request *req,
                        unsigned reset_counter,
                        bool interruptible,
index e80b08b864e77b27a5debd01fa56f6961b21e3d8..c12bdd855be7b3600cbaf664bff97b68b5da0584 100644 (file)
@@ -1158,7 +1158,7 @@ i915_gem_check_olr(struct drm_i915_gem_request *req)
        WARN_ON(!mutex_is_locked(&req->ring->dev->struct_mutex));
 
        if (req == req->ring->outstanding_lazy_request)
-               i915_add_request(req->ring);
+               i915_add_request(req);
 
        return 0;
 }
@@ -2468,25 +2468,25 @@ i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
  * request is not being tracked for completion but the work itself is
  * going to happen on the hardware. This would be a Bad Thing(tm).
  */
-void __i915_add_request(struct intel_engine_cs *ring,
+void __i915_add_request(struct drm_i915_gem_request *request,
                        struct drm_file *file,
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
 {
-       struct drm_i915_private *dev_priv = ring->dev->dev_private;
-       struct drm_i915_gem_request *request;
+       struct intel_engine_cs *ring;
+       struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        int ret;
 
-       request = ring->outstanding_lazy_request;
        if (WARN_ON(request == NULL))
                return;
 
-       if (i915.enable_execlists) {
-               ringbuf = request->ctx->engine[ring->id].ringbuf;
-       } else
-               ringbuf = ring->buffer;
+       ring = request->ring;
+       dev_priv = ring->dev->dev_private;
+       ringbuf = request->ringbuf;
+
+       WARN_ON(request != ring->outstanding_lazy_request);
 
        /*
         * To ensure that this call will not fail, space for its emissions
@@ -3338,7 +3338,7 @@ int i915_gpu_idle(struct drm_device *dev)
                                return ret;
                        }
 
-                       i915_add_request_no_flush(req->ring);
+                       i915_add_request_no_flush(req);
                }
 
                WARN_ON(ring->outstanding_lazy_request);
@@ -5122,7 +5122,7 @@ i915_gem_init_hw(struct drm_device *dev)
                        goto out;
                }
 
-               i915_add_request_no_flush(ring);
+               i915_add_request_no_flush(req);
        }
 
 out:
index 9968c02f76f33db10cf1d9d2d022f5e6f81a2b89..896f7a117b995fe6fcb19add0300da4d118abdd4 100644 (file)
@@ -1066,7 +1066,7 @@ i915_gem_execbuffer_retire_commands(struct i915_execbuffer_params *params)
        params->ring->gpu_caches_dirty = true;
 
        /* Add a breadcrumb for the completion of the batch buffer */
-       __i915_add_request(params->ring, params->file, params->batch_obj, true);
+       __i915_add_request(params->request, params->file, params->batch_obj, true);
 }
 
 static int
index 36d8cdeaed035763c72e062f37ccc9f798be4963..7ec2421f0a9709987e40b87ebb51d996020b998f 100644 (file)
@@ -11497,7 +11497,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
        }
 
        if (request)
-               i915_add_request_no_flush(request->ring);
+               i915_add_request_no_flush(request);
 
        work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
        work->enable_stall_check = true;
index 7bcf1ec4d6aa0711ec32d76b6c4dc6e0441d1e30..d142d284afd7169ad1c3c53df0bfa260c2787bf9 100644 (file)
@@ -2242,7 +2242,7 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
                                goto error;
                        }
 
-                       i915_add_request_no_flush(req->ring);
+                       i915_add_request_no_flush(req);
                }
 
                ctx->rcs_initialized = true;
index 3adb63eb0b99d795d2bbfdd30695e2c08a315e1d..3f709042b86cf161b2dda263c04689576d03994f 100644 (file)
@@ -217,7 +217,7 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
 
        WARN_ON(overlay->last_flip_req);
        i915_gem_request_assign(&overlay->last_flip_req, req);
-       i915_add_request(req->ring);
+       i915_add_request(req);
 
        overlay->flip_tail = tail;
        ret = i915_wait_request(overlay->last_flip_req);
@@ -299,7 +299,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 
        WARN_ON(overlay->last_flip_req);
        i915_gem_request_assign(&overlay->last_flip_req, req);
-       i915_add_request(req->ring);
+       i915_add_request(req);
 
        return 0;
 }
index 38fa1fad594f845532d85bdb1193f761ddeb8700..049bc7fa3c42662c35fa66282ef487f743766a85 100644 (file)
@@ -2167,8 +2167,9 @@ int intel_ring_idle(struct intel_engine_cs *ring)
        struct drm_i915_gem_request *req;
 
        /* We need to add any requests required to flush the objects and ring */
+       WARN_ON(ring->outstanding_lazy_request);
        if (ring->outstanding_lazy_request)
-               i915_add_request(ring);
+               i915_add_request(ring->outstanding_lazy_request);
 
        /* Wait upon the last request to be completed */
        if (list_empty(&ring->request_list))