drm/i915: Add GPGPU_THREADS_DISPATCHED to the register whitelist
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_cmd_parser.c
index b882bf2a238828075d3be855817d38d02e12dfad..806e812340d0d96b06811fa6db19f9b78efaa2aa 100644 (file)
@@ -405,6 +405,7 @@ static const struct drm_i915_cmd_table hsw_blt_ring_cmds[] = {
 #define REG64(addr) (addr), (addr + sizeof(u32))
 
 static const u32 gen7_render_regs[] = {
+       REG64(GPGPU_THREADS_DISPATCHED),
        REG64(HS_INVOCATION_COUNT),
        REG64(DS_INVOCATION_COUNT),
        REG64(IA_VERTICES_COUNT),
@@ -848,6 +849,69 @@ finish:
        return (u32*)addr;
 }
 
+/* Returns a vmap'd pointer to dest_obj, which the caller must unmap */
+static u32 *copy_batch(struct drm_i915_gem_object *dest_obj,
+                      struct drm_i915_gem_object *src_obj,
+                      u32 batch_start_offset,
+                      u32 batch_len)
+{
+       int ret = 0;
+       int needs_clflush = 0;
+       u32 *src_base, *dest_base = NULL;
+       u32 *src_addr, *dest_addr;
+       u32 offset = batch_start_offset / sizeof(*dest_addr);
+       u32 end = batch_start_offset + batch_len;
+
+       if (end > dest_obj->base.size || end > src_obj->base.size)
+               return ERR_PTR(-E2BIG);
+
+       ret = i915_gem_obj_prepare_shmem_read(src_obj, &needs_clflush);
+       if (ret) {
+               DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
+               return ERR_PTR(ret);
+       }
+
+       src_base = vmap_batch(src_obj);
+       if (!src_base) {
+               DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
+               ret = -ENOMEM;
+               goto unpin_src;
+       }
+
+       src_addr = src_base + offset;
+
+       if (needs_clflush)
+               drm_clflush_virt_range((char *)src_addr, batch_len);
+
+       ret = i915_gem_object_set_to_cpu_domain(dest_obj, true);
+       if (ret) {
+               DRM_DEBUG_DRIVER("CMD: Failed to set batch CPU domain\n");
+               goto unmap_src;
+       }
+
+       dest_base = vmap_batch(dest_obj);
+       if (!dest_base) {
+               DRM_DEBUG_DRIVER("CMD: Failed to vmap shadow batch\n");
+               ret = -ENOMEM;
+               goto unmap_src;
+       }
+
+       dest_addr = dest_base + offset;
+
+       if (batch_start_offset != 0)
+               memset((u8 *)dest_base, 0, batch_start_offset);
+
+       memcpy(dest_addr, src_addr, batch_len);
+       memset((u8 *)dest_addr + batch_len, 0, dest_obj->base.size - end);
+
+unmap_src:
+       vunmap(src_base);
+unpin_src:
+       i915_gem_object_unpin_pages(src_obj);
+
+       return ret ? ERR_PTR(ret) : dest_base;
+}
+
 /**
  * i915_needs_cmd_parser() - should a given ring use software command parsing?
  * @ring: the ring in question
@@ -964,7 +1028,9 @@ static bool check_cmd(const struct intel_engine_cs *ring,
  * i915_parse_cmds() - parse a submitted batch buffer for privilege violations
  * @ring: the ring on which the batch is to execute
  * @batch_obj: the batch buffer in question
+ * @shadow_batch_obj: copy of the batch buffer in question
  * @batch_start_offset: byte offset in the batch at which execution starts
+ * @batch_len: length of the commands in batch_obj
  * @is_master: is the submitting process the drm master?
  *
  * Parses the specified batch buffer looking for privilege violations as
@@ -975,33 +1041,38 @@ static bool check_cmd(const struct intel_engine_cs *ring,
  */
 int i915_parse_cmds(struct intel_engine_cs *ring,
                    struct drm_i915_gem_object *batch_obj,
+                   struct drm_i915_gem_object *shadow_batch_obj,
                    u32 batch_start_offset,
+                   u32 batch_len,
                    bool is_master)
 {
        int ret = 0;
        u32 *cmd, *batch_base, *batch_end;
        struct drm_i915_cmd_descriptor default_desc = { 0 };
-       int needs_clflush = 0;
        bool oacontrol_set = false; /* OACONTROL tracking. See check_cmd() */
 
-       ret = i915_gem_obj_prepare_shmem_read(batch_obj, &needs_clflush);
+       ret = i915_gem_obj_ggtt_pin(shadow_batch_obj, 4096, 0);
        if (ret) {
-               DRM_DEBUG_DRIVER("CMD: failed to prep read\n");
-               return ret;
+               DRM_DEBUG_DRIVER("CMD: Failed to pin shadow batch\n");
+               return -1;
        }
 
-       batch_base = vmap_batch(batch_obj);
-       if (!batch_base) {
-               DRM_DEBUG_DRIVER("CMD: Failed to vmap batch\n");
-               i915_gem_object_unpin_pages(batch_obj);
-               return -ENOMEM;
+       batch_base = copy_batch(shadow_batch_obj, batch_obj,
+                               batch_start_offset, batch_len);
+       if (IS_ERR(batch_base)) {
+               DRM_DEBUG_DRIVER("CMD: Failed to copy batch\n");
+               i915_gem_object_ggtt_unpin(shadow_batch_obj);
+               return PTR_ERR(batch_base);
        }
 
-       if (needs_clflush)
-               drm_clflush_virt_range((char *)batch_base, batch_obj->base.size);
-
        cmd = batch_base + (batch_start_offset / sizeof(*cmd));
-       batch_end = cmd + (batch_obj->base.size / sizeof(*batch_end));
+
+       /*
+        * We use the batch length as size because the shadow object is as
+        * large or larger and copy_batch() will write MI_NOPs to the extra
+        * space. Parsing should be faster in some cases this way.
+        */
+       batch_end = cmd + (batch_len / sizeof(*batch_end));
 
        while (cmd < batch_end) {
                const struct drm_i915_cmd_descriptor *desc;
@@ -1061,8 +1132,7 @@ int i915_parse_cmds(struct intel_engine_cs *ring,
        }
 
        vunmap(batch_base);
-
-       i915_gem_object_unpin_pages(batch_obj);
+       i915_gem_object_ggtt_unpin(shadow_batch_obj);
 
        return ret;
 }
@@ -1084,6 +1154,7 @@ int i915_cmd_parser_get_version(void)
         *    hardware parsing enabled (so does not allow new use cases).
         * 2. Allow access to the MI_PREDICATE_SRC0 and
         *    MI_PREDICATE_SRC1 registers.
+        * 3. Allow access to the GPGPU_THREADS_DISPATCHED register.
         */
-       return 2;
+       return 3;
 }