drm/i915/ringbuffer: Simplify the ring irq refcounting
authorChris Wilson <chris@chris-wilson.co.uk>
Tue, 4 Jan 2011 22:22:56 +0000 (22:22 +0000)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 11 Jan 2011 20:43:57 +0000 (20:43 +0000)
... and move it under the spinlock to gain the appropriate memory
barriers.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=32752
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_ringbuffer.h

index 3bff7fb723419d0d9a4d2726c293737fa6fa36d1..13cad981713befbe2abce68a4afdb92ea737ebf1 100644 (file)
@@ -521,22 +521,20 @@ static bool
 render_ring_get_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!dev->irq_enabled)
                return false;
 
-       if (atomic_inc_return(&ring->irq_refcount) == 1) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (ring->irq_refcount++ == 0) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_enable_irq(dev_priv,
                                            GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
                else
                        i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+       spin_unlock(&dev_priv->irq_lock);
 
        return true;
 }
@@ -545,20 +543,18 @@ static void
 render_ring_put_irq(struct intel_ring_buffer *ring)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (atomic_dec_and_test(&ring->irq_refcount)) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (--ring->irq_refcount == 0) {
                if (HAS_PCH_SPLIT(dev))
                        ironlake_disable_irq(dev_priv,
                                             GT_USER_INTERRUPT |
                                             GT_PIPE_NOTIFY);
                else
                        i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+       spin_unlock(&dev_priv->irq_lock);
 }
 
 void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
@@ -619,18 +615,15 @@ static bool
 ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!dev->irq_enabled)
               return false;
 
-       if (atomic_inc_return(&ring->irq_refcount) == 1) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (ring->irq_refcount++ == 0)
                ironlake_enable_irq(dev_priv, flag);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       }
+       spin_unlock(&dev_priv->irq_lock);
 
        return true;
 }
@@ -639,35 +632,30 @@ static void
 ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (atomic_dec_and_test(&ring->irq_refcount)) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (--ring->irq_refcount == 0)
                ironlake_disable_irq(dev_priv, flag);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
-       }
+       spin_unlock(&dev_priv->irq_lock);
 }
 
 static bool
 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
        if (!dev->irq_enabled)
               return false;
 
-       if (atomic_inc_return(&ring->irq_refcount) == 1) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (ring->irq_refcount++ == 0) {
                ring->irq_mask &= ~rflag;
                I915_WRITE_IMR(ring, ring->irq_mask);
                ironlake_enable_irq(dev_priv, gflag);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+       spin_unlock(&dev_priv->irq_lock);
 
        return true;
 }
@@ -676,17 +664,15 @@ static void
 gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
        struct drm_device *dev = ring->dev;
+       drm_i915_private_t *dev_priv = dev->dev_private;
 
-       if (atomic_dec_and_test(&ring->irq_refcount)) {
-               drm_i915_private_t *dev_priv = dev->dev_private;
-               unsigned long irqflags;
-
-               spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+       spin_lock(&dev_priv->irq_lock);
+       if (--ring->irq_refcount == 0) {
                ring->irq_mask |= rflag;
                I915_WRITE_IMR(ring, ring->irq_mask);
                ironlake_disable_irq(dev_priv, gflag);
-               spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
        }
+       spin_unlock(&dev_priv->irq_lock);
 }
 
 static bool
index 9b134b8643cb5b670f3ff5567cd0afa496ef0fbd..6b1d9a5a7d0712865c5c9f24625e13960d66f29a 100644 (file)
@@ -55,11 +55,11 @@ struct  intel_ring_buffer {
        int             effective_size;
        struct intel_hw_status_page status_page;
 
+       u32             irq_refcount;
        u32             irq_mask;
        u32             irq_seqno;              /* last seq seem at irq time */
        u32             waiting_seqno;
        u32             sync_seqno[I915_NUM_RINGS-1];
-       atomic_t        irq_refcount;
        bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
        void            (*irq_put)(struct intel_ring_buffer *ring);