drm/nouveau: protect channel create/destroy and irq handler with a spinlock
authorMaarten Maathuis <madman2003@gmail.com>
Mon, 1 Feb 2010 19:58:27 +0000 (20:58 +0100)
committerBen Skeggs <bskeggs@redhat.com>
Thu, 25 Feb 2010 05:07:53 +0000 (15:07 +1000)
The nv50 pgraph handler (for example) could reenable pgraph fifo access
and that would be bad when pgraph context is being unloaded (we need the
guarantee a ctxprog isn't running).

Signed-off-by: Maarten Maathuis <madman2003@gmail.com>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_irq.c
drivers/gpu/drm/nouveau/nouveau_state.c
drivers/gpu/drm/nouveau/nv04_fifo.c
drivers/gpu/drm/nouveau/nv40_fifo.c
drivers/gpu/drm/nouveau/nv50_fifo.c

index 2281f99da7fcef31da401ea5d31811e78e93cb93..f7ca95003f54386bc45143e93226f3d0c950f515 100644 (file)
@@ -275,9 +275,18 @@ nouveau_channel_free(struct nouveau_channel *chan)
         */
        nouveau_fence_fini(chan);
 
-       /* Ensure the channel is no longer active on the GPU */
+       /* This will prevent pfifo from switching channels. */
        pfifo->reassign(dev, false);
 
+       /* We want to give pgraph a chance to idle and get rid of all potential
+        * errors. We need to do this before the lock, otherwise the irq handler
+        * is unable to process them.
+        */
+       if (pgraph->channel(dev) == chan)
+               nouveau_wait_for_idle(dev);
+
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
        pgraph->fifo_access(dev, false);
        if (pgraph->channel(dev) == chan)
                pgraph->unload_context(dev);
@@ -293,6 +302,8 @@ nouveau_channel_free(struct nouveau_channel *chan)
 
        pfifo->reassign(dev, true);
 
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
        /* Release the channel's resources */
        nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
        if (chan->pushbuf_bo) {
index 1c15ef37b71cffc9c234f08b0337e50e1af4ff6e..52cc13bd02b9e6527fa3d4a2c14a9d0180f33c70 100644 (file)
@@ -533,6 +533,9 @@ struct drm_nouveau_private {
        struct nouveau_engine engine;
        struct nouveau_channel *channel;
 
+       /* For PFIFO and PGRAPH. */
+       spinlock_t context_switch_lock;
+
        /* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
        struct nouveau_gpuobj *ramht;
        uint32_t ramin_rsvd_vram;
index 447f9f69d6b14fd28b5d01de67f0771c9aefa913..95220ddebb45a0f1880e3b200e161480588a9133 100644 (file)
@@ -691,11 +691,14 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
        struct drm_device *dev = (struct drm_device *)arg;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        uint32_t status, fbdev_flags = 0;
+       unsigned long flags;
 
        status = nv_rd32(dev, NV03_PMC_INTR_0);
        if (!status)
                return IRQ_NONE;
 
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
        if (dev_priv->fbdev_info) {
                fbdev_flags = dev_priv->fbdev_info->flags;
                dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
@@ -733,5 +736,7 @@ nouveau_irq_handler(DRM_IRQ_ARGS)
        if (dev_priv->fbdev_info)
                dev_priv->fbdev_info->flags = fbdev_flags;
 
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
+
        return IRQ_HANDLED;
 }
index a4851af5b05ec535816eeb98973675c6cbb03aa4..ed5ac0b9a0acd0382f71feeaca379b6bb2f816b1 100644 (file)
@@ -391,6 +391,7 @@ nouveau_card_init(struct drm_device *dev)
                goto out;
        engine = &dev_priv->engine;
        dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
+       spin_lock_init(&dev_priv->context_switch_lock);
 
        /* Parse BIOS tables / Run init tables if card not POSTed */
        if (drm_core_check_feature(dev, DRIVER_MODESET)) {
index f31347b8c9b05da1977fcd00b5108c725434f689..66fe55983b6e3b7b9cf7b14700a7c1ba5b5681c9 100644 (file)
@@ -117,6 +117,7 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
 {
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
+       unsigned long flags;
        int ret;
 
        ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
@@ -127,6 +128,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
        if (ret)
                return ret;
 
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
        /* Setup initial state */
        dev_priv->engine.instmem.prepare_access(dev, true);
        RAMFC_WR(DMA_PUT, chan->pushbuf_base);
@@ -144,6 +147,8 @@ nv04_fifo_create_context(struct nouveau_channel *chan)
        /* enable the fifo dma operation */
        nv_wr32(dev, NV04_PFIFO_MODE,
                nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
        return 0;
 }
 
index b4f19ccb8b41a2c9f96f460b734aa2971bdd9ca8..6b2ef4a9fce17c135c17ebe410866ef8f60d4b30 100644 (file)
@@ -37,6 +37,7 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        uint32_t fc = NV40_RAMFC(chan->id);
+       unsigned long flags;
        int ret;
 
        ret = nouveau_gpuobj_new_fake(dev, NV40_RAMFC(chan->id), ~0,
@@ -45,6 +46,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
        if (ret)
                return ret;
 
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
        dev_priv->engine.instmem.prepare_access(dev, true);
        nv_wi32(dev, fc +  0, chan->pushbuf_base);
        nv_wi32(dev, fc +  4, chan->pushbuf_base);
@@ -63,6 +66,8 @@ nv40_fifo_create_context(struct nouveau_channel *chan)
        /* enable the fifo dma operation */
        nv_wr32(dev, NV04_PFIFO_MODE,
                nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
+
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
        return 0;
 }
 
index 204a79ff10f4e529479bc0136f4209282e085d8c..369ecb4cee57f84bd7a94555f5880862dd1ec878 100644 (file)
@@ -243,6 +243,7 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        struct drm_device *dev = chan->dev;
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *ramfc = NULL;
+       unsigned long flags;
        int ret;
 
        NV_DEBUG(dev, "ch%d\n", chan->id);
@@ -278,6 +279,8 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
                        return ret;
        }
 
+       spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
+
        dev_priv->engine.instmem.prepare_access(dev, true);
 
        nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
@@ -306,10 +309,12 @@ nv50_fifo_create_context(struct nouveau_channel *chan)
        ret = nv50_fifo_channel_enable(dev, chan->id, false);
        if (ret) {
                NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
+               spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
                nouveau_gpuobj_ref_del(dev, &chan->ramfc);
                return ret;
        }
 
+       spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
        return 0;
 }