drm/i915: Convert trace functions from seqno to request
authorJohn Harrison <John.C.Harrison@Intel.com>
Mon, 24 Nov 2014 18:49:38 +0000 (18:49 +0000)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Wed, 3 Dec 2014 08:35:21 +0000 (09:35 +0100)
All the code above is now using requests not seqnos so it is possible to convert
the trace functions across. Note that rather than get into problematic reference
counting issues, the trace code only saves the seqno and ring values from the
request structure not the structure pointer itself.

For: VIZ-4377
Signed-off-by: John Harrison <John.C.Harrison@Intel.com>
Reviewed-by: Thomas Daniel <Thomas.Daniel@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_trace.h

index c403bacee1615127a6f28fd8094f8b1f3318a881..e79815531e1a8ee66e70c484e032c5d9a1b17f63 100644 (file)
@@ -1241,8 +1241,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                return -ENODEV;
 
        /* Record current time in case interrupted by signal, or wedged */
-       trace_i915_gem_request_wait_begin(i915_gem_request_get_ring(req),
-                                         i915_gem_request_get_seqno(req));
+       trace_i915_gem_request_wait_begin(req);
        before = ktime_get_raw_ns();
        for (;;) {
                struct timer_list timer;
@@ -1294,8 +1293,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
                }
        }
        now = ktime_get_raw_ns();
-       trace_i915_gem_request_wait_end(i915_gem_request_get_ring(req),
-                                       i915_gem_request_get_seqno(req));
+       trace_i915_gem_request_wait_end(req);
 
        if (!irq_test_in_progress)
                ring->irq_put(ring);
@@ -2500,7 +2498,7 @@ int __i915_add_request(struct intel_engine_cs *ring,
                spin_unlock(&file_priv->mm.lock);
        }
 
-       trace_i915_gem_request_add(ring, request->seqno);
+       trace_i915_gem_request_add(request);
        ring->outstanding_lazy_request = NULL;
 
        i915_queue_hangcheck(ring->dev);
@@ -2776,7 +2774,7 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
                if (!i915_seqno_passed(seqno, request->seqno))
                        break;
 
-               trace_i915_gem_request_retire(ring, request->seqno);
+               trace_i915_gem_request_retire(request);
 
                /* This is one of the few common intersection points
                 * between legacy ringbuffer submission and execlists:
@@ -3006,7 +3004,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       trace_i915_gem_ring_sync_to(from, to, seqno);
+       trace_i915_gem_ring_sync_to(from, to, obj->last_read_req);
        ret = to->semaphore.sync_to(to, from, seqno);
        if (!ret)
                /* We use last_read_req because sync_to()
index faada754405083249a11e55e0a4b88882eb5c054..0c25f6202ca4bb41775d9485754faedaf2624be3 100644 (file)
@@ -1211,9 +1211,7 @@ i915_gem_ringbuffer_submission(struct drm_device *dev, struct drm_file *file,
                        return ret;
        }
 
-       trace_i915_gem_ring_dispatch(ring,
-                   i915_gem_request_get_seqno(intel_ring_get_request(ring)),
-                   flags);
+       trace_i915_gem_ring_dispatch(intel_ring_get_request(ring), flags);
 
        i915_gem_execbuffer_move_to_active(vmas, ring);
        i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
index 751d4ad14d6224028c93b1bdcdeec1f2d1b46a16..2c0327b6661863f95190f7108f0a2127941b3264 100644 (file)
@@ -328,8 +328,8 @@ TRACE_EVENT(i915_gem_evict_vm,
 TRACE_EVENT(i915_gem_ring_sync_to,
            TP_PROTO(struct intel_engine_cs *from,
                     struct intel_engine_cs *to,
-                    u32 seqno),
-           TP_ARGS(from, to, seqno),
+                    struct drm_i915_gem_request *req),
+           TP_ARGS(from, to, req),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -342,7 +342,7 @@ TRACE_EVENT(i915_gem_ring_sync_to,
                           __entry->dev = from->dev->primary->index;
                           __entry->sync_from = from->id;
                           __entry->sync_to = to->id;
-                          __entry->seqno = seqno;
+                          __entry->seqno = i915_gem_request_get_seqno(req);
                           ),
 
            TP_printk("dev=%u, sync-from=%u, sync-to=%u, seqno=%u",
@@ -352,8 +352,8 @@ TRACE_EVENT(i915_gem_ring_sync_to,
 );
 
 TRACE_EVENT(i915_gem_ring_dispatch,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno, u32 flags),
-           TP_ARGS(ring, seqno, flags),
+           TP_PROTO(struct drm_i915_gem_request *req, u32 flags),
+           TP_ARGS(req, flags),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -363,11 +363,13 @@ TRACE_EVENT(i915_gem_ring_dispatch,
                             ),
 
            TP_fast_assign(
+                          struct intel_engine_cs *ring =
+                                               i915_gem_request_get_ring(req);
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->seqno = seqno;
+                          __entry->seqno = i915_gem_request_get_seqno(req);
                           __entry->flags = flags;
-                          i915_trace_irq_get(ring, seqno);
+                          i915_trace_irq_get(ring, __entry->seqno);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, flags=%x",
@@ -398,8 +400,8 @@ TRACE_EVENT(i915_gem_ring_flush,
 );
 
 DECLARE_EVENT_CLASS(i915_gem_request,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
-           TP_ARGS(ring, seqno),
+           TP_PROTO(struct drm_i915_gem_request *req),
+           TP_ARGS(req),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -408,9 +410,11 @@ DECLARE_EVENT_CLASS(i915_gem_request,
                             ),
 
            TP_fast_assign(
+                          struct intel_engine_cs *ring =
+                                               i915_gem_request_get_ring(req);
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->seqno = seqno;
+                          __entry->seqno = i915_gem_request_get_seqno(req);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u",
@@ -418,8 +422,8 @@ DECLARE_EVENT_CLASS(i915_gem_request,
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_add,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
-           TP_ARGS(ring, seqno)
+           TP_PROTO(struct drm_i915_gem_request *req),
+           TP_ARGS(req)
 );
 
 TRACE_EVENT(i915_gem_request_complete,
@@ -443,13 +447,13 @@ TRACE_EVENT(i915_gem_request_complete,
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_retire,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
-           TP_ARGS(ring, seqno)
+           TP_PROTO(struct drm_i915_gem_request *req),
+           TP_ARGS(req)
 );
 
 TRACE_EVENT(i915_gem_request_wait_begin,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
-           TP_ARGS(ring, seqno),
+           TP_PROTO(struct drm_i915_gem_request *req),
+           TP_ARGS(req),
 
            TP_STRUCT__entry(
                             __field(u32, dev)
@@ -465,10 +469,13 @@ TRACE_EVENT(i915_gem_request_wait_begin,
             * less desirable.
             */
            TP_fast_assign(
+                          struct intel_engine_cs *ring =
+                                               i915_gem_request_get_ring(req);
                           __entry->dev = ring->dev->primary->index;
                           __entry->ring = ring->id;
-                          __entry->seqno = seqno;
-                          __entry->blocking = mutex_is_locked(&ring->dev->struct_mutex);
+                          __entry->seqno = i915_gem_request_get_seqno(req);
+                          __entry->blocking =
+                                    mutex_is_locked(&ring->dev->struct_mutex);
                           ),
 
            TP_printk("dev=%u, ring=%u, seqno=%u, blocking=%s",
@@ -477,8 +484,8 @@ TRACE_EVENT(i915_gem_request_wait_begin,
 );
 
 DEFINE_EVENT(i915_gem_request, i915_gem_request_wait_end,
-           TP_PROTO(struct intel_engine_cs *ring, u32 seqno),
-           TP_ARGS(ring, seqno)
+           TP_PROTO(struct drm_i915_gem_request *req),
+           TP_ARGS(req)
 );
 
 DECLARE_EVENT_CLASS(i915_ring,