drm/i915: report all active objects as busy
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / i915 / i915_gem.c
index 2a4ed7ca8b4ec6b1e415c2367144a4c6af158ae5..24ee4622484f7832533e77827d71ca49b39298c0 100644 (file)
@@ -3117,6 +3117,7 @@ static void
 i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
 {
        struct drm_device               *dev = obj->dev;
+       drm_i915_private_t              *dev_priv = dev->dev_private;
        struct drm_i915_gem_object      *obj_priv = to_intel_bo(obj);
        uint32_t                        invalidate_domains = 0;
        uint32_t                        flush_domains = 0;
@@ -3179,6 +3180,13 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
                obj->pending_write_domain = obj->write_domain;
        obj->read_domains = obj->pending_read_domains;
 
+       if (flush_domains & I915_GEM_GPU_DOMAINS) {
+               if (obj_priv->ring == &dev_priv->render_ring)
+                       dev_priv->flush_rings |= FLUSH_RENDER_RING;
+               else if (obj_priv->ring == &dev_priv->bsd_ring)
+                       dev_priv->flush_rings |= FLUSH_BSD_RING;
+       }
+
        dev->invalidate_domains |= invalidate_domains;
        dev->flush_domains |= flush_domains;
 #if WATCH_BUF
@@ -3718,7 +3726,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                ring = &dev_priv->render_ring;
        }
 
-
        if (args->buffer_count < 1) {
                DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
                return -EINVAL;
@@ -3892,6 +3899,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
         */
        dev->invalidate_domains = 0;
        dev->flush_domains = 0;
+       dev_priv->flush_rings = 0;
 
        for (i = 0; i < args->buffer_count; i++) {
                struct drm_gem_object *obj = object_list[i];
@@ -3912,16 +3920,14 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
                i915_gem_flush(dev,
                               dev->invalidate_domains,
                               dev->flush_domains);
-               if (dev->flush_domains & I915_GEM_GPU_DOMAINS) {
+               if (dev_priv->flush_rings & FLUSH_RENDER_RING)
                        (void)i915_add_request(dev, file_priv,
-                                       dev->flush_domains,
-                                       &dev_priv->render_ring);
-
-                       if (HAS_BSD(dev))
-                               (void)i915_add_request(dev, file_priv,
-                                               dev->flush_domains,
-                                               &dev_priv->bsd_ring);
-               }
+                                              dev->flush_domains,
+                                              &dev_priv->render_ring);
+               if (dev_priv->flush_rings & FLUSH_BSD_RING)
+                       (void)i915_add_request(dev, file_priv,
+                                              dev->flush_domains,
+                                              &dev_priv->bsd_ring);
        }
 
        for (i = 0; i < args->buffer_count; i++) {
@@ -4359,22 +4365,34 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
 
        mutex_lock(&dev->struct_mutex);
-       /* Update the active list for the hardware's current position.
-        * Otherwise this only updates on a delayed timer or when irqs are
-        * actually unmasked, and our working set ends up being larger than
-        * required.
-        */
-       i915_gem_retire_requests(dev);
 
-       obj_priv = to_intel_bo(obj);
-       /* Don't count being on the flushing list against the object being
-        * done.  Otherwise, a buffer left on the flushing list but not getting
-        * flushed (because nobody's flushing that domain) won't ever return
-        * unbusy and get reused by libdrm's bo cache.  The other expected
-        * consumer of this interface, OpenGL's occlusion queries, also specs
-        * that the objects get unbusy "eventually" without any interference.
+       /* Count all active objects as busy, even if they are currently not used
+        * by the gpu. Users of this interface expect objects to eventually
+        * become non-busy without any further actions, therefore emit any
+        * necessary flushes here.
         */
-       args->busy = obj_priv->active && obj_priv->last_rendering_seqno != 0;
+       obj_priv = to_intel_bo(obj);
+       args->busy = obj_priv->active;
+       if (args->busy) {
+               /* Unconditionally flush objects, even when the gpu still uses this
+                * object. Userspace calling this function indicates that it wants to
+                * use this buffer rather sooner than later, so issuing the required
+                * flush earlier is beneficial.
+                */
+               if (obj->write_domain) {
+                       i915_gem_flush(dev, 0, obj->write_domain);
+                       (void)i915_add_request(dev, file_priv, obj->write_domain, obj_priv->ring);
+               }
+
+               /* Update the active list for the hardware's current position.
+                * Otherwise this only updates on a delayed timer or when irqs
+                * are actually unmasked, and our working set ends up being
+                * larger than required.
+                */
+               i915_gem_retire_requests_ring(dev, obj_priv->ring);
+
+               args->busy = obj_priv->active;
+       }
 
        drm_gem_object_unreference(obj);
        mutex_unlock(&dev->struct_mutex);