drm/i915/lrc: Prevent preemption when lite-restore is disabled
authorMichel Thierry <michel.thierry@intel.com>
Fri, 4 Sep 2015 11:59:15 +0000 (12:59 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 14 Sep 2015 08:25:01 +0000 (10:25 +0200)
When WaEnableForceRestoreInCtxtDescForVCS is required, it is only
safe to send new contexts if the last reported event is "active to
idle". Otherwise the same context can fully preempt itself because
lite-restore is disabled.

Testcase: igt/gem_concurrent_blit
Reported-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Michel Thierry <michel.thierry@intel.com>
Reviewed-by: Arun Siluvery <arun.siluvery@linux.intel.com>
Tested-by: Daniele Ceraolo Spurio <daniele.ceraolospurio@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/intel_lrc.c

index d8b605fc4aa9bc79eba5e686d92b40a20a81f88a..c3fca4be7a8c429a42b984192f96e6604452b369 100644 (file)
@@ -277,10 +277,18 @@ u32 intel_execlists_ctx_id(struct drm_i915_gem_object *ctx_obj)
        return lrca >> 12;
 }
 
+static bool disable_lite_restore_wa(struct intel_engine_cs *ring)
+{
+       struct drm_device *dev = ring->dev;
+
+       return ((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
+               (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
+              (ring->id == VCS || ring->id == VCS2);
+}
+
 uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
                                     struct intel_engine_cs *ring)
 {
-       struct drm_device *dev = ring->dev;
        struct drm_i915_gem_object *ctx_obj = ctx->engine[ring->id].state;
        uint64_t desc;
        uint64_t lrca = i915_gem_obj_ggtt_offset(ctx_obj) +
@@ -302,9 +310,7 @@ uint64_t intel_lr_context_descriptor(struct intel_context *ctx,
 
        /* WaEnableForceRestoreInCtxtDescForVCS:skl */
        /* WaEnableForceRestoreInCtxtDescForVCS:bxt */
-       if (((IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) ||
-            (IS_BROXTON(dev) && INTEL_REVID(dev) == BXT_REVID_A0)) &&
-           (ring->id == VCS || ring->id == VCS2))
+       if (disable_lite_restore_wa(ring))
                desc |= GEN8_CTX_FORCE_RESTORE;
 
        return desc;
@@ -495,7 +501,7 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
        u32 status_pointer;
        u8 read_pointer;
        u8 write_pointer;
-       u32 status;
+       u32 status = 0;
        u32 status_id;
        u32 submit_contexts = 0;
 
@@ -533,8 +539,14 @@ void intel_lrc_irq_handler(struct intel_engine_cs *ring)
                }
        }
 
-       if (submit_contexts != 0)
+       if (disable_lite_restore_wa(ring)) {
+               /* Prevent a ctx to preempt itself */
+               if ((status & GEN8_CTX_STATUS_ACTIVE_IDLE) &&
+                   (submit_contexts != 0))
+                       execlists_context_unqueue(ring);
+       } else if (submit_contexts != 0) {
                execlists_context_unqueue(ring);
+       }
 
        spin_unlock(&ring->execlist_lock);