drm/i915: Free requests after object release when retiring requests
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 7 Jan 2014 11:45:14 +0000 (11:45 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Fri, 10 Jan 2014 07:21:52 +0000 (08:21 +0100)
Freeing a request triggers the destruction of the context. This needs to
occur after all objects are themselves unbound from the context, and so
the free request needs to occur after the object release during retire.

This tidies up

commit e20780439b26ba95aeb29d3e27cd8cc32bc82a4c
Author: Ben Widawsky <ben@bwidawsk.net>
Date:   Fri Dec 6 14:11:22 2013 -0800

    drm/i915: Defer request freeing

by simply swapping the order of operations rather than introducing
further complexity - as noted during review.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Ben Widawsky <ben@bwidawsk.net>
Reviewed-by: Ben Widawsky <ben@bwidawsk.net>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c

index ef274f69c00c39aa7ec6ee7cdd1104cc973ad00e..4f54a13231715d9a15e3ec2626e7bebed9d556c4 100644 (file)
@@ -2426,8 +2426,6 @@ void i915_gem_reset(struct drm_device *dev)
 void
 i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 {
-       LIST_HEAD(deferred_request_free);
-       struct drm_i915_gem_request *request;
        uint32_t seqno;
 
        if (list_empty(&ring->request_list))
@@ -2437,7 +2435,27 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 
        seqno = ring->get_seqno(ring, true);
 
+       /* Move any buffers on the active list that are no longer referenced
+        * by the ringbuffer to the flushing/inactive lists as appropriate,
+        * before we free the context associated with the requests.
+        */
+       while (!list_empty(&ring->active_list)) {
+               struct drm_i915_gem_object *obj;
+
+               obj = list_first_entry(&ring->active_list,
+                                     struct drm_i915_gem_object,
+                                     ring_list);
+
+               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
+                       break;
+
+               i915_gem_object_move_to_inactive(obj);
+       }
+
+
        while (!list_empty(&ring->request_list)) {
+               struct drm_i915_gem_request *request;
+
                request = list_first_entry(&ring->request_list,
                                           struct drm_i915_gem_request,
                                           list);
@@ -2453,23 +2471,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                 */
                ring->last_retired_head = request->tail;
 
-               list_move_tail(&request->list, &deferred_request_free);
-       }
-
-       /* Move any buffers on the active list that are no longer referenced
-        * by the ringbuffer to the flushing/inactive lists as appropriate.
-        */
-       while (!list_empty(&ring->active_list)) {
-               struct drm_i915_gem_object *obj;
-
-               obj = list_first_entry(&ring->active_list,
-                                     struct drm_i915_gem_object,
-                                     ring_list);
-
-               if (!i915_seqno_passed(seqno, obj->last_read_seqno))
-                       break;
-
-               i915_gem_object_move_to_inactive(obj);
+               i915_gem_free_request(request);
        }
 
        if (unlikely(ring->trace_irq_seqno &&
@@ -2478,13 +2480,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
                ring->trace_irq_seqno = 0;
        }
 
-       /* Finish processing active list before freeing request */
-       while (!list_empty(&deferred_request_free)) {
-               request = list_first_entry(&deferred_request_free,
-                                          struct drm_i915_gem_request,
-                                          list);
-               i915_gem_free_request(request);
-       }
        WARN_ON(i915_verify_lists(ring->dev));
 }