drm/nouveau: tidy up and extend dma object creation interfaces
authorBen Skeggs <bskeggs@redhat.com>
Tue, 16 Nov 2010 01:50:09 +0000 (11:50 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Fri, 3 Dec 2010 05:11:59 +0000 (15:11 +1000)
Reviewed-by: Francisco Jerez <currojerez@riseup.net>
Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/nouveau_channel.c
drivers/gpu/drm/nouveau/nouveau_drv.h
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_notifier.c
drivers/gpu/drm/nouveau/nouveau_object.c
drivers/gpu/drm/nouveau/nouveau_reg.h
drivers/gpu/drm/nouveau/nouveau_sgdma.c
drivers/gpu/drm/nouveau/nouveau_state.c

index 11b2370e16da2dab6034e93f5a9fef485050336f..0f33132fba3b7e9418cf66757da36a2cd80335f5 100644 (file)
@@ -39,22 +39,22 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
 
        if (dev_priv->card_type >= NV_50) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
-                                            dev_priv->vm_end, NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_AGP, &pushbuf);
+                                            dev_priv->vm_end, NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_VM, &pushbuf);
                chan->pushbuf_base = pb->bo.offset;
        } else
        if (pb->bo.mem.mem_type == TTM_PL_TT) {
-               ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-                                                 dev_priv->gart_info.aper_size,
-                                                 NV_DMA_ACCESS_RO, &pushbuf,
-                                                 NULL);
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
+                                            dev_priv->gart_info.aper_size,
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_GART, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else
        if (dev_priv->card_type != NV_04) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
                                             dev_priv->fb_available_size,
-                                            NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_VIDMEM, &pushbuf);
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_VRAM, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        } else {
                /* NV04 cmdbuf hack, from original ddx.. not sure of it's
@@ -62,11 +62,10 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
                 * VRAM.
                 */
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-                                            pci_resource_start(dev->pdev,
-                                            1),
+                                            pci_resource_start(dev->pdev, 1),
                                             dev_priv->fb_available_size,
-                                            NV_DMA_ACCESS_RO,
-                                            NV_DMA_TARGET_PCI, &pushbuf);
+                                            NV_MEM_ACCESS_RO,
+                                            NV_MEM_TARGET_PCI, &pushbuf);
                chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
        }
 
index d76d2c09049d34f4a753be121cf07d2348fa6cb0..a52b1da32031a795f24a9e7377dfd81f1fc495ca 100644 (file)
@@ -886,12 +886,14 @@ extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
                                  uint64_t offset, uint64_t size, int access,
                                  int target, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
-                                      uint64_t offset, uint64_t size,
-                                      int access, struct nouveau_gpuobj **,
-                                      uint32_t *o_ret);
 extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
                                 struct nouveau_gpuobj **);
+extern int nv50_gpuobj_dma_new(struct nouveau_channel *, int class, u64 base,
+                              u64 size, int target, int access, u32 type,
+                              u32 comp, struct nouveau_gpuobj **pobj);
+extern void nv50_gpuobj_dma_init(struct nouveau_gpuobj *, u32 offset,
+                                int class, u64 base, u64 size, int target,
+                                int access, u32 type, u32 comp);
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
                                     struct drm_file *);
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -1545,6 +1547,22 @@ nv_match_device(struct drm_device *dev, unsigned device,
                dev->pdev->subsystem_device == sub_device;
 }
 
+/* memory type/access flags, do not match hardware values */
+#define NV_MEM_ACCESS_RO 1
+#define NV_MEM_ACCESS_WO 2
+#define NV_MEM_ACCESS_RW (NV_MEM_ACCESS_RO | NV_MEM_ACCESS_WO)
+#define NV_MEM_ACCESS_VM 4
+
+#define NV_MEM_TARGET_VRAM        0
+#define NV_MEM_TARGET_PCI         1
+#define NV_MEM_TARGET_PCI_NOSNOOP 2
+#define NV_MEM_TARGET_VM          3
+#define NV_MEM_TARGET_GART        4
+
+#define NV_MEM_TYPE_VM 0x7f
+#define NV_MEM_COMP_VM 0x03
+
+/* NV_SW object class */
 #define NV_SW                                                        0x0000506e
 #define NV_SW_DMA_SEMAPHORE                                          0x00000060
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
index 91aa6c54cc9654196b39096b452b3855eb75074b..2579fc69d18233d4725f284e2f79e52239500990 100644 (file)
@@ -459,8 +459,8 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
                                             mem->start << PAGE_SHIFT,
                                             mem->size << PAGE_SHIFT,
-                                            NV_DMA_ACCESS_RW,
-                                            NV_DMA_TARGET_VIDMEM, &obj);
+                                            NV_MEM_ACCESS_RW,
+                                            NV_MEM_TARGET_VRAM, &obj);
                if (ret)
                        return ret;
 
index 2c5a1f66f7f0cd0813dd8c4ee72395607239efab..a050b7b69782494fb35cb13e15ca673529bdc607 100644 (file)
@@ -99,7 +99,6 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
                       int size, uint32_t *b_offset)
 {
        struct drm_device *dev = chan->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nouveau_gpuobj *nobj = NULL;
        struct drm_mm_node *mem;
        uint32_t offset;
@@ -113,31 +112,15 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
                return -ENOMEM;
        }
 
-       offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
-       if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
-               target = NV_DMA_TARGET_VIDMEM;
-       } else
-       if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
-               if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
-                   dev_priv->card_type < NV_50) {
-                       ret = nouveau_sgdma_get_page(dev, offset, &offset);
-                       if (ret)
-                               return ret;
-                       target = NV_DMA_TARGET_PCI;
-               } else {
-                       target = NV_DMA_TARGET_AGP;
-                       if (dev_priv->card_type >= NV_50)
-                               offset += dev_priv->vm_gart_base;
-               }
-       } else {
-               NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
-                        chan->notifier_bo->bo.mem.mem_type);
-               return -EINVAL;
-       }
+       if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
+               target = NV_MEM_TARGET_VRAM;
+       else
+               target = NV_MEM_TARGET_GART;
+       offset  = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
        offset += mem->start;
 
        ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
-                                    mem->size, NV_DMA_ACCESS_RW, target,
+                                    mem->size, NV_MEM_ACCESS_RW, target,
                                     &nobj);
        if (ret) {
                drm_mm_put_block(mem);
index e8c74de905ecab2888592fea79f26ca658f8f5b1..924653c30783c01594f2fbfb32df7163e7a94ed2 100644 (file)
@@ -404,113 +404,157 @@ nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
    The method below creates a DMA object in instance RAM and returns a handle
    to it that can be used to set up context objects.
 */
-int
-nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
-                      uint64_t offset, uint64_t size, int access,
-                      int target, struct nouveau_gpuobj **gpuobj)
+
+void
+nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
+                    u64 base, u64 size, int target, int access,
+                    u32 type, u32 comp)
 {
-       struct drm_device *dev = chan->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
-       struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
-       int ret;
+       struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
+       struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
+       u32 flags0;
 
-       NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
-                chan->id, class, offset, size);
-       NV_DEBUG(dev, "access=%d target=%d\n", access, target);
+       flags0  = (comp << 29) | (type << 22) | class;
+       flags0 |= 0x00100000;
+
+       switch (access) {
+       case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
+       case NV_MEM_ACCESS_RW:
+       case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
+       default:
+               break;
+       }
 
        switch (target) {
-       case NV_DMA_TARGET_AGP:
-               offset += dev_priv->gart_info.aper_base;
+       case NV_MEM_TARGET_VRAM:
+               flags0 |= 0x00010000;
+               break;
+       case NV_MEM_TARGET_PCI:
+               flags0 |= 0x00020000;
+               break;
+       case NV_MEM_TARGET_PCI_NOSNOOP:
+               flags0 |= 0x00030000;
                break;
+       case NV_MEM_TARGET_GART:
+               base += dev_priv->vm_gart_base;
        default:
+               flags0 &= ~0x00100000;
                break;
        }
 
-       ret = nouveau_gpuobj_new(dev, chan,
-                                nouveau_gpuobj_class_instmem_size(dev, class),
-                                16, NVOBJ_FLAG_ZERO_ALLOC |
-                                NVOBJ_FLAG_ZERO_FREE, gpuobj);
-       if (ret) {
-               NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
-               return ret;
-       }
+       /* convert to base + limit */
+       size = (base + size) - 1;
 
-       if (dev_priv->card_type < NV_50) {
-               uint32_t frame, adjust, pte_flags = 0;
-
-               if (access != NV_DMA_ACCESS_RO)
-                       pte_flags |= (1<<1);
-               adjust = offset &  0x00000fff;
-               frame  = offset & ~0x00000fff;
-
-               nv_wo32(*gpuobj,  0, ((1<<12) | (1<<13) | (adjust << 20) |
-                                     (access << 14) | (target << 16) |
-                                     class));
-               nv_wo32(*gpuobj,  4, size - 1);
-               nv_wo32(*gpuobj,  8, frame | pte_flags);
-               nv_wo32(*gpuobj, 12, frame | pte_flags);
-       } else {
-               uint64_t limit = offset + size - 1;
-               uint32_t flags0, flags5;
+       nv_wo32(obj, offset + 0x00, flags0);
+       nv_wo32(obj, offset + 0x04, lower_32_bits(size));
+       nv_wo32(obj, offset + 0x08, lower_32_bits(base));
+       nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
+                                   upper_32_bits(base));
+       nv_wo32(obj, offset + 0x10, 0x00000000);
+       nv_wo32(obj, offset + 0x14, 0x00000000);
 
-               if (target == NV_DMA_TARGET_VIDMEM) {
-                       flags0 = 0x00190000;
-                       flags5 = 0x00010000;
-               } else {
-                       flags0 = 0x7fc00000;
-                       flags5 = 0x00080000;
-               }
+       pinstmem->flush(obj->dev);
+}
 
-               nv_wo32(*gpuobj,  0, flags0 | class);
-               nv_wo32(*gpuobj,  4, lower_32_bits(limit));
-               nv_wo32(*gpuobj,  8, lower_32_bits(offset));
-               nv_wo32(*gpuobj, 12, ((upper_32_bits(limit) & 0xff) << 24) |
-                                     (upper_32_bits(offset) & 0xff));
-               nv_wo32(*gpuobj, 20, flags5);
-       }
+int
+nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
+                   int target, int access, u32 type, u32 comp,
+                   struct nouveau_gpuobj **pobj)
+{
+       struct drm_device *dev = chan->dev;
+       int ret;
 
-       instmem->flush(dev);
+       ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_ALLOC |
+                                NVOBJ_FLAG_ZERO_FREE, pobj);
+       if (ret)
+               return ret;
 
-       (*gpuobj)->engine = NVOBJ_ENGINE_SW;
-       (*gpuobj)->class  = class;
+       nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
+                            access, type, comp);
        return 0;
 }
 
 int
-nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
-                           uint64_t offset, uint64_t size, int access,
-                           struct nouveau_gpuobj **gpuobj,
-                           uint32_t *o_ret)
+nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
+                      u64 size, int access, int target,
+                      struct nouveau_gpuobj **pobj)
 {
+       struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
        struct drm_device *dev = chan->dev;
-       struct drm_nouveau_private *dev_priv = dev->dev_private;
+       struct nouveau_gpuobj *obj;
+       u32 page_addr, flags0, flags2;
        int ret;
 
-       if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
-           (dev_priv->card_type >= NV_50 &&
-            dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
-               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
-                                            offset + dev_priv->vm_gart_base,
-                                            size, access, NV_DMA_TARGET_AGP,
-                                            gpuobj);
-               if (o_ret)
-                       *o_ret = 0;
-       } else
-       if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
-               nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, gpuobj);
-               if (offset & ~0xffffffffULL) {
-                       NV_ERROR(dev, "obj offset exceeds 32-bits\n");
-                       return -EINVAL;
+       if (dev_priv->card_type >= NV_50) {
+               u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
+               u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
+
+               return nv50_gpuobj_dma_new(chan, class, base, size,
+                                          target, access, type, comp, pobj);
+       }
+
+       if (target == NV_MEM_TARGET_GART) {
+               if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+                       target = NV_MEM_TARGET_PCI_NOSNOOP;
+                       base  += dev_priv->gart_info.aper_base;
+               } else
+               if (base != 0) {
+                       ret = nouveau_sgdma_get_page(dev, base, &page_addr);
+                       if (ret)
+                               return ret;
+
+                       target = NV_MEM_TARGET_PCI;
+                       base   = page_addr;
+               } else {
+                       nouveau_gpuobj_ref(dev_priv->gart_info.sg_ctxdma, pobj);
+                       return 0;
                }
-               if (o_ret)
-                       *o_ret = (uint32_t)offset;
-               ret = (*gpuobj != NULL) ? 0 : -EINVAL;
-       } else {
-               NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-               return -EINVAL;
        }
 
-       return ret;
+       flags0  = class;
+       flags0 |= 0x00003000; /* PT present, PT linear */
+       flags2  = 0;
+
+       switch (target) {
+       case NV_MEM_TARGET_PCI:
+               flags0 |= 0x00020000;
+               break;
+       case NV_MEM_TARGET_PCI_NOSNOOP:
+               flags0 |= 0x00030000;
+               break;
+       default:
+               break;
+       }
+
+       switch (access) {
+       case NV_MEM_ACCESS_RO:
+               flags0 |= 0x00004000;
+               break;
+       case NV_MEM_ACCESS_WO:
+               flags0 |= 0x00008000;
+       default:
+               flags2 |= 0x00000002;
+               break;
+       }
+
+       flags0 |= (base & 0x00000fff) << 20;
+       flags2 |= (base & 0xfffff000);
+
+       ret = nouveau_gpuobj_new(dev, chan, (dev_priv->card_type >= NV_40) ?
+                                32 : 16, 16, NVOBJ_FLAG_ZERO_ALLOC |
+                                NVOBJ_FLAG_ZERO_FREE, &obj);
+       if (ret)
+               return ret;
+
+       nv_wo32(obj, 0x00, flags0);
+       nv_wo32(obj, 0x04, size - 1);
+       nv_wo32(obj, 0x08, flags2);
+       nv_wo32(obj, 0x0c, flags2);
+
+       obj->engine = NVOBJ_ENGINE_SW;
+       obj->class  = class;
+       *pobj = obj;
+       return 0;
 }
 
 /* Context objects in the instance RAM have the following structure.
@@ -806,8 +850,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
        if (dev_priv->card_type >= NV_50) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
                                             0, dev_priv->vm_end,
-                                            NV_DMA_ACCESS_RW,
-                                            NV_DMA_TARGET_AGP, &vram);
+                                            NV_MEM_ACCESS_RW,
+                                            NV_MEM_TARGET_VM, &vram);
                if (ret) {
                        NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
                        return ret;
@@ -815,8 +859,8 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
        } else {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
                                             0, dev_priv->fb_available_size,
-                                            NV_DMA_ACCESS_RW,
-                                            NV_DMA_TARGET_VIDMEM, &vram);
+                                            NV_MEM_ACCESS_RW,
+                                            NV_MEM_TARGET_VRAM, &vram);
                if (ret) {
                        NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
                        return ret;
@@ -834,20 +878,13 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
        if (dev_priv->card_type >= NV_50) {
                ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
                                             0, dev_priv->vm_end,
-                                            NV_DMA_ACCESS_RW,
-                                            NV_DMA_TARGET_AGP, &tt);
-               if (ret) {
-                       NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
-                       return ret;
-               }
-       } else
-       if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
-               ret = nouveau_gpuobj_gart_dma_new(chan, 0,
-                                                 dev_priv->gart_info.aper_size,
-                                                 NV_DMA_ACCESS_RW, &tt, NULL);
+                                            NV_MEM_ACCESS_RW,
+                                            NV_MEM_TARGET_VM, &tt);
        } else {
-               NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
-               ret = -EINVAL;
+               ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+                                            0, dev_priv->gart_info.aper_size,
+                                            NV_MEM_ACCESS_RW,
+                                            NV_MEM_TARGET_GART, &tt);
        }
 
        if (ret) {
index df3a87e792f2d1d1a5a3317c482c50dbacacc0d3..04e8fb7952692ccfde1524439d12e51a7cd88f64 100644 (file)
 #    define NV40_RAMHT_CONTEXT_ENGINE_SHIFT                20
 #    define NV40_RAMHT_CONTEXT_INSTANCE_SHIFT              0
 
-/* DMA object defines */
-#define NV_DMA_ACCESS_RW 0
-#define NV_DMA_ACCESS_RO 1
-#define NV_DMA_ACCESS_WO 2
-#define NV_DMA_TARGET_VIDMEM 0
-#define NV_DMA_TARGET_PCI    2
-#define NV_DMA_TARGET_AGP    3
-/* The following is not a real value used by the card, it's changed by
- * nouveau_object_dma_create */
-#define NV_DMA_TARGET_PCI_NONLINEAR 8
-
 /* Some object classes we care about in the drm */
 #define NV_CLASS_DMA_FROM_MEMORY                           0x00000002
 #define NV_CLASS_DMA_TO_MEMORY                             0x00000003
index 54af7608d45c4dd9edce2cef04534406df54667e..db32644f61140b22c7e8eb38179726c1fc5b5830 100644 (file)
@@ -247,14 +247,11 @@ nouveau_sgdma_init(struct drm_device *dev)
                 */
                gpuobj->cinst = gpuobj->pinst;
 
-               /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
-                * confirmed to work on c51.  Perhaps means NV_DMA_TARGET_PCIE
-                * on those cards? */
                nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
                                   (1 << 12) /* PT present */ |
                                   (0 << 13) /* PT *not* linear */ |
-                                  (NV_DMA_ACCESS_RW  << 14) |
-                                  (NV_DMA_TARGET_PCI << 16));
+                                  (0 << 14) /* RW */ |
+                                  (2 << 16) /* PCI */);
                nv_wo32(gpuobj, 4, aper_size - 1);
                for (i = 2; i < 2 + (aper_size >> 12); i++)
                        nv_wo32(gpuobj, i * 4, 0x00000000);
index 35b28406caf6b50dd33f8dc14ae745b0ff320fff..e779e93204538129ca10656909ef3f165fd501ad 100644 (file)
@@ -536,7 +536,7 @@ nouveau_card_init_channel(struct drm_device *dev)
 
        ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
                                     0, dev_priv->vram_size,
-                                    NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
+                                    NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM,
                                     &gpuobj);
        if (ret)
                goto out_err;
@@ -546,9 +546,10 @@ nouveau_card_init_channel(struct drm_device *dev)
        if (ret)
                goto out_err;
 
-       ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
-                                         dev_priv->gart_info.aper_size,
-                                         NV_DMA_ACCESS_RW, &gpuobj, NULL);
+       ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY,
+                                    0, dev_priv->gart_info.aper_size,
+                                    NV_MEM_ACCESS_RW, NV_MEM_TARGET_GART,
+                                    &gpuobj);
        if (ret)
                goto out_err;