drm/nouveau/pm: several fixes for nvc0 memory timings
[firefly-linux-kernel-4.4.55.git] / drivers / gpu / drm / nouveau / nouveau_mem.c
1 /*
2  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
3  * Copyright 2005 Stephane Marchesin
4  *
5  * The Weather Channel (TM) funded Tungsten Graphics to develop the
6  * initial release of the Radeon 8500 driver under the XFree86 license.
7  * This notice must be preserved.
8  *
9  * Permission is hereby granted, free of charge, to any person obtaining a
10  * copy of this software and associated documentation files (the "Software"),
11  * to deal in the Software without restriction, including without limitation
12  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13  * and/or sell copies of the Software, and to permit persons to whom the
14  * Software is furnished to do so, subject to the following conditions:
15  *
16  * The above copyright notice and this permission notice (including the next
17  * paragraph) shall be included in all copies or substantial portions of the
18  * Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
23  * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26  * DEALINGS IN THE SOFTWARE.
27  *
28  * Authors:
29  *    Ben Skeggs <bskeggs@redhat.com>
30  *    Roy Spliet <r.spliet@student.tudelft.nl>
31  */
32
33
34 #include "drmP.h"
35 #include "drm.h"
36 #include "drm_sarea.h"
37
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include "nouveau_mm.h"
41 #include "nouveau_vm.h"
42
43 /*
44  * NV10-NV40 tiling helpers
45  */
46
47 static void
48 nv10_mem_update_tile_region(struct drm_device *dev,
49                             struct nouveau_tile_reg *tile, uint32_t addr,
50                             uint32_t size, uint32_t pitch, uint32_t flags)
51 {
52         struct drm_nouveau_private *dev_priv = dev->dev_private;
53         struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
54         struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
55         int i = tile - dev_priv->tile.reg, j;
56         unsigned long save;
57
58         nouveau_fence_unref(&tile->fence);
59
60         if (tile->pitch)
61                 pfb->free_tile_region(dev, i);
62
63         if (pitch)
64                 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
65
66         spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67         pfifo->reassign(dev, false);
68         pfifo->cache_pull(dev, false);
69
70         nouveau_wait_for_idle(dev);
71
72         pfb->set_tile_region(dev, i);
73         for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74                 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75                         dev_priv->eng[j]->set_tile_region(dev, i);
76         }
77
78         pfifo->cache_pull(dev, true);
79         pfifo->reassign(dev, true);
80         spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
81 }
82
83 static struct nouveau_tile_reg *
84 nv10_mem_get_tile_region(struct drm_device *dev, int i)
85 {
86         struct drm_nouveau_private *dev_priv = dev->dev_private;
87         struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
88
89         spin_lock(&dev_priv->tile.lock);
90
91         if (!tile->used &&
92             (!tile->fence || nouveau_fence_signalled(tile->fence)))
93                 tile->used = true;
94         else
95                 tile = NULL;
96
97         spin_unlock(&dev_priv->tile.lock);
98         return tile;
99 }
100
101 void
102 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
103                          struct nouveau_fence *fence)
104 {
105         struct drm_nouveau_private *dev_priv = dev->dev_private;
106
107         if (tile) {
108                 spin_lock(&dev_priv->tile.lock);
109                 if (fence) {
110                         /* Mark it as pending. */
111                         tile->fence = fence;
112                         nouveau_fence_ref(fence);
113                 }
114
115                 tile->used = false;
116                 spin_unlock(&dev_priv->tile.lock);
117         }
118 }
119
120 struct nouveau_tile_reg *
121 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
122                     uint32_t pitch, uint32_t flags)
123 {
124         struct drm_nouveau_private *dev_priv = dev->dev_private;
125         struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
126         struct nouveau_tile_reg *tile, *found = NULL;
127         int i;
128
129         for (i = 0; i < pfb->num_tiles; i++) {
130                 tile = nv10_mem_get_tile_region(dev, i);
131
132                 if (pitch && !found) {
133                         found = tile;
134                         continue;
135
136                 } else if (tile && tile->pitch) {
137                         /* Kill an unused tile region. */
138                         nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
139                 }
140
141                 nv10_mem_put_tile_region(dev, tile, NULL);
142         }
143
144         if (found)
145                 nv10_mem_update_tile_region(dev, found, addr, size,
146                                             pitch, flags);
147         return found;
148 }
149
150 /*
151  * Cleanup everything
152  */
153 void
154 nouveau_mem_vram_fini(struct drm_device *dev)
155 {
156         struct drm_nouveau_private *dev_priv = dev->dev_private;
157
158         ttm_bo_device_release(&dev_priv->ttm.bdev);
159
160         nouveau_ttm_global_release(dev_priv);
161
162         if (dev_priv->fb_mtrr >= 0) {
163                 drm_mtrr_del(dev_priv->fb_mtrr,
164                              pci_resource_start(dev->pdev, 1),
165                              pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
166                 dev_priv->fb_mtrr = -1;
167         }
168 }
169
170 void
171 nouveau_mem_gart_fini(struct drm_device *dev)
172 {
173         nouveau_sgdma_takedown(dev);
174
175         if (drm_core_has_AGP(dev) && dev->agp) {
176                 struct drm_agp_mem *entry, *tempe;
177
178                 /* Remove AGP resources, but leave dev->agp
179                    intact until drv_cleanup is called. */
180                 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
181                         if (entry->bound)
182                                 drm_unbind_agp(entry->memory);
183                         drm_free_agp(entry->memory, entry->pages);
184                         kfree(entry);
185                 }
186                 INIT_LIST_HEAD(&dev->agp->memory);
187
188                 if (dev->agp->acquired)
189                         drm_agp_release(dev);
190
191                 dev->agp->acquired = 0;
192                 dev->agp->enabled = 0;
193         }
194 }
195
196 bool
197 nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
198 {
199         if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
200                 return true;
201
202         return false;
203 }
204
205 #if __OS_HAS_AGP
206 static unsigned long
207 get_agp_mode(struct drm_device *dev, unsigned long mode)
208 {
209         struct drm_nouveau_private *dev_priv = dev->dev_private;
210
211         /*
212          * FW seems to be broken on nv18, it makes the card lock up
213          * randomly.
214          */
215         if (dev_priv->chipset == 0x18)
216                 mode &= ~PCI_AGP_COMMAND_FW;
217
218         /*
219          * AGP mode set in the command line.
220          */
221         if (nouveau_agpmode > 0) {
222                 bool agpv3 = mode & 0x8;
223                 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
224
225                 mode = (mode & ~0x7) | (rate & 0x7);
226         }
227
228         return mode;
229 }
230 #endif
231
232 int
233 nouveau_mem_reset_agp(struct drm_device *dev)
234 {
235 #if __OS_HAS_AGP
236         uint32_t saved_pci_nv_1, pmc_enable;
237         int ret;
238
239         /* First of all, disable fast writes, otherwise if it's
240          * already enabled in the AGP bridge and we disable the card's
241          * AGP controller we might be locking ourselves out of it. */
242         if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
243              dev->agp->mode) & PCI_AGP_COMMAND_FW) {
244                 struct drm_agp_info info;
245                 struct drm_agp_mode mode;
246
247                 ret = drm_agp_info(dev, &info);
248                 if (ret)
249                         return ret;
250
251                 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
252                 ret = drm_agp_enable(dev, mode);
253                 if (ret)
254                         return ret;
255         }
256
257         saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
258
259         /* clear busmaster bit */
260         nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
261         /* disable AGP */
262         nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
263
264         /* power cycle pgraph, if enabled */
265         pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
266         if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
267                 nv_wr32(dev, NV03_PMC_ENABLE,
268                                 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
269                 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
270                                 NV_PMC_ENABLE_PGRAPH);
271         }
272
273         /* and restore (gives effect of resetting AGP) */
274         nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
275 #endif
276
277         return 0;
278 }
279
280 int
281 nouveau_mem_init_agp(struct drm_device *dev)
282 {
283 #if __OS_HAS_AGP
284         struct drm_nouveau_private *dev_priv = dev->dev_private;
285         struct drm_agp_info info;
286         struct drm_agp_mode mode;
287         int ret;
288
289         if (!dev->agp->acquired) {
290                 ret = drm_agp_acquire(dev);
291                 if (ret) {
292                         NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
293                         return ret;
294                 }
295         }
296
297         nouveau_mem_reset_agp(dev);
298
299         ret = drm_agp_info(dev, &info);
300         if (ret) {
301                 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
302                 return ret;
303         }
304
305         /* see agp.h for the AGPSTAT_* modes available */
306         mode.mode = get_agp_mode(dev, info.mode);
307         ret = drm_agp_enable(dev, mode);
308         if (ret) {
309                 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
310                 return ret;
311         }
312
313         dev_priv->gart_info.type        = NOUVEAU_GART_AGP;
314         dev_priv->gart_info.aper_base   = info.aperture_base;
315         dev_priv->gart_info.aper_size   = info.aperture_size;
316 #endif
317         return 0;
318 }
319
320 static const struct vram_types {
321         int value;
322         const char *name;
323 } vram_type_map[] = {
324         { NV_MEM_TYPE_STOLEN , "stolen system memory" },
325         { NV_MEM_TYPE_SGRAM  , "SGRAM" },
326         { NV_MEM_TYPE_SDRAM  , "SDRAM" },
327         { NV_MEM_TYPE_DDR1   , "DDR1" },
328         { NV_MEM_TYPE_DDR2   , "DDR2" },
329         { NV_MEM_TYPE_DDR3   , "DDR3" },
330         { NV_MEM_TYPE_GDDR2  , "GDDR2" },
331         { NV_MEM_TYPE_GDDR3  , "GDDR3" },
332         { NV_MEM_TYPE_GDDR4  , "GDDR4" },
333         { NV_MEM_TYPE_GDDR5  , "GDDR5" },
334         { NV_MEM_TYPE_UNKNOWN, "unknown type" }
335 };
336
337 int
338 nouveau_mem_vram_init(struct drm_device *dev)
339 {
340         struct drm_nouveau_private *dev_priv = dev->dev_private;
341         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
342         const struct vram_types *vram_type;
343         int ret, dma_bits;
344
345         dma_bits = 32;
346         if (dev_priv->card_type >= NV_50) {
347                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
348                         dma_bits = 40;
349         } else
350         if (0 && pci_is_pcie(dev->pdev) &&
351             dev_priv->chipset  > 0x40 &&
352             dev_priv->chipset != 0x45) {
353                 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
354                         dma_bits = 39;
355         }
356
357         ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
358         if (ret)
359                 return ret;
360         ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
361         if (ret) {
362                 /* Reset to default value. */
363                 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
364         }
365
366
367         ret = nouveau_ttm_global_init(dev_priv);
368         if (ret)
369                 return ret;
370
371         ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
372                                  dev_priv->ttm.bo_global_ref.ref.object,
373                                  &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
374                                  dma_bits <= 32 ? true : false);
375         if (ret) {
376                 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
377                 return ret;
378         }
379
380         vram_type = vram_type_map;
381         while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
382                 if (nouveau_vram_type) {
383                         if (!strcasecmp(nouveau_vram_type, vram_type->name))
384                                 break;
385                         dev_priv->vram_type = vram_type->value;
386                 } else {
387                         if (vram_type->value == dev_priv->vram_type)
388                                 break;
389                 }
390                 vram_type++;
391         }
392
393         NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
394                 (int)(dev_priv->vram_size >> 20), vram_type->name);
395         if (dev_priv->vram_sys_base) {
396                 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
397                         dev_priv->vram_sys_base);
398         }
399
400         dev_priv->fb_available_size = dev_priv->vram_size;
401         dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
402         if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
403                 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
404         dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
405
406         dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
407         dev_priv->fb_aper_free = dev_priv->fb_available_size;
408
409         /* mappable vram */
410         ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
411                              dev_priv->fb_available_size >> PAGE_SHIFT);
412         if (ret) {
413                 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
414                 return ret;
415         }
416
417         if (dev_priv->card_type < NV_50) {
418                 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
419                                      0, 0, &dev_priv->vga_ram);
420                 if (ret == 0)
421                         ret = nouveau_bo_pin(dev_priv->vga_ram,
422                                              TTM_PL_FLAG_VRAM);
423
424                 if (ret) {
425                         NV_WARN(dev, "failed to reserve VGA memory\n");
426                         nouveau_bo_ref(NULL, &dev_priv->vga_ram);
427                 }
428         }
429
430         dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
431                                          pci_resource_len(dev->pdev, 1),
432                                          DRM_MTRR_WC);
433         return 0;
434 }
435
436 int
437 nouveau_mem_gart_init(struct drm_device *dev)
438 {
439         struct drm_nouveau_private *dev_priv = dev->dev_private;
440         struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
441         int ret;
442
443         dev_priv->gart_info.type = NOUVEAU_GART_NONE;
444
445 #if !defined(__powerpc__) && !defined(__ia64__)
446         if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
447                 ret = nouveau_mem_init_agp(dev);
448                 if (ret)
449                         NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
450         }
451 #endif
452
453         if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
454                 ret = nouveau_sgdma_init(dev);
455                 if (ret) {
456                         NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
457                         return ret;
458                 }
459         }
460
461         NV_INFO(dev, "%d MiB GART (aperture)\n",
462                 (int)(dev_priv->gart_info.aper_size >> 20));
463         dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
464
465         ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
466                              dev_priv->gart_info.aper_size >> PAGE_SHIFT);
467         if (ret) {
468                 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
469                 return ret;
470         }
471
472         return 0;
473 }
474
475 static int
476 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
477                      struct nouveau_pm_tbl_entry *e, u8 len,
478                      struct nouveau_pm_memtiming *boot,
479                      struct nouveau_pm_memtiming *t)
480 {
481         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
482
483         /* XXX: I don't trust the -1's and +1's... they must come
484          *      from somewhere! */
485         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
486                     1 << 16 |
487                     (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
488                     (e->tCL + 2 - (t->tCWL - 1));
489
490         t->reg[2] = 0x20200000 |
491                     ((t->tCWL - 1) << 24 |
492                      e->tRRD << 16 |
493                      e->tRCDWR << 8 |
494                      e->tRCDRD);
495
496         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
497                  t->reg[0], t->reg[1], t->reg[2]);
498         return 0;
499 }
500
501 static int
502 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
503                      struct nouveau_pm_tbl_entry *e, u8 len,
504                      struct nouveau_pm_memtiming *boot,
505                      struct nouveau_pm_memtiming *t)
506 {
507         struct drm_nouveau_private *dev_priv = dev->dev_private;
508         struct bit_entry P;
509         uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
510
511         if (bit_table(dev, 'P', &P))
512                 return -EINVAL;
513
514         switch (min(len, (u8) 22)) {
515         case 22:
516                 unk21 = e->tUNK_21;
517         case 21:
518                 unk20 = e->tUNK_20;
519         case 20:
520                 if (e->tCWL > 0)
521                         t->tCWL = e->tCWL;
522         case 19:
523                 unk18 = e->tUNK_18;
524                 break;
525         }
526
527         t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
528
529         t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
530                                 max(unk18, (u8) 1) << 16 |
531                                 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
532
533         t->reg[2] = ((t->tCWL - 1) << 24 |
534                     e->tRRD << 16 |
535                     e->tRCDWR << 8 |
536                     e->tRCDRD);
537
538         t->reg[4] = e->tUNK_13 << 8  | e->tUNK_13;
539
540         t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
541
542         t->reg[8] = boot->reg[8] & 0xffffff00;
543
544         if (P.version == 1) {
545                 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
546
547                 t->reg[3] = (0x14 + e->tCL) << 24 |
548                             0x16 << 16 |
549                             (e->tCL - 1) << 8 |
550                             (e->tCL - 1);
551
552                 t->reg[4] |= boot->reg[4] & 0xffff0000;
553
554                 t->reg[6] = (0x33 - t->tCWL) << 16 |
555                             t->tCWL << 8 |
556                             (0x2e + e->tCL - t->tCWL);
557
558                 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
559
560                 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
561                 if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
562                         t->reg[5] |= (e->tCL + 3) << 8;
563                         t->reg[6] |= (t->tCWL - 2) << 8;
564                         t->reg[8] |= (e->tCL - 4);
565                 } else {
566                         t->reg[5] |= (e->tCL + 2) << 8;
567                         t->reg[6] |= t->tCWL << 8;
568                         t->reg[8] |= (e->tCL - 2);
569                 }
570         } else {
571                 t->reg[1] |= (5 + e->tCL - (t->tCWL));
572
573                 /* XXX: 0xb? 0x30? */
574                 t->reg[3] = (0x30 + e->tCL) << 24 |
575                             (boot->reg[3] & 0x00ff0000)|
576                             (0xb + e->tCL) << 8 |
577                             (e->tCL - 1);
578
579                 t->reg[4] |= (unk20 << 24 | unk21 << 16);
580
581                 /* XXX: +6? */
582                 t->reg[5] |= (t->tCWL + 6) << 8;
583
584                 t->reg[6] = (0x5a + e->tCL) << 16 |
585                             (6 - e->tCL + t->tCWL) << 8 |
586                             (0x50 + e->tCL - t->tCWL);
587
588                 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
589                 t->reg[7] = (tmp7_3 << 24) |
590                             ((tmp7_3 - 6 + e->tCL) << 16) |
591                             0x202;
592         }
593
594         NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
595                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
596         NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
597                  t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
598         NV_DEBUG(dev, "         240: %08x\n", t->reg[8]);
599         return 0;
600 }
601
602 static int
603 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
604                      struct nouveau_pm_tbl_entry *e, u8 len,
605                      struct nouveau_pm_memtiming *boot,
606                      struct nouveau_pm_memtiming *t)
607 {
608         if (e->tCWL > 0)
609                 t->tCWL = e->tCWL;
610
611         t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
612                      e->tRFC << 8 | e->tRC);
613
614         t->reg[1] = (boot->reg[1] & 0xff000000) |
615                     (e->tRCDWR & 0x0f) << 20 |
616                     (e->tRCDRD & 0x0f) << 14 |
617                     (t->tCWL << 7) |
618                     (e->tCL & 0x0f);
619
620         t->reg[2] = (boot->reg[2] & 0xff0000ff) |
621                     e->tWR << 16 | e->tWTR << 8;
622
623         t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
624                     (e->tUNK_21 & 0xf) << 5 |
625                     (e->tUNK_13 & 0x1f);
626
627         t->reg[4] = (boot->reg[4] & 0xfff00fff) |
628                     (e->tRRD&0x1f) << 15;
629
630         NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
631                  t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
632         NV_DEBUG(dev, "         2a0: %08x\n", t->reg[4]);
633         return 0;
634 }
635
636 /**
637  * MR generation methods
638  */
639
640 static int
641 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
642                     struct nouveau_pm_tbl_entry *e, u8 len,
643                     struct nouveau_pm_memtiming *boot,
644                     struct nouveau_pm_memtiming *t)
645 {
646         t->drive_strength = 0;
647         if (len < 15) {
648                 t->odt = boot->odt;
649         } else {
650                 t->odt = e->RAM_FT1 & 0x07;
651         }
652
653         if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
654                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
655                 return -ERANGE;
656         }
657
658         if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
659                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
660                 return -ERANGE;
661         }
662
663         if (t->odt > 3) {
664                 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
665                         t->id, t->odt);
666                 t->odt = 0;
667         }
668
669         t->mr[0] = (boot->mr[0] & 0x100f) |
670                    (e->tCL) << 4 |
671                    (e->tWR - 1) << 9;
672         t->mr[1] = (boot->mr[1] & 0x101fbb) |
673                    (t->odt & 0x1) << 2 |
674                    (t->odt & 0x2) << 5;
675
676         NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
677         return 0;
678 }
679
680 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
681         0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
682
683 static int
684 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
685                     struct nouveau_pm_tbl_entry *e, u8 len,
686                     struct nouveau_pm_memtiming *boot,
687                     struct nouveau_pm_memtiming *t)
688 {
689         u8 cl = e->tCL - 4;
690
691         t->drive_strength = 0;
692         if (len < 15) {
693                 t->odt = boot->odt;
694         } else {
695                 t->odt = e->RAM_FT1 & 0x07;
696         }
697
698         if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
699                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
700                 return -ERANGE;
701         }
702
703         if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
704                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
705                 return -ERANGE;
706         }
707
708         if (e->tCWL < 5) {
709                 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
710                 return -ERANGE;
711         }
712
713         t->mr[0] = (boot->mr[0] & 0x180b) |
714                    /* CAS */
715                    (cl & 0x7) << 4 |
716                    (cl & 0x8) >> 1 |
717                    (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
718         t->mr[1] = (boot->mr[1] & 0x101dbb) |
719                    (t->odt & 0x1) << 2 |
720                    (t->odt & 0x2) << 5 |
721                    (t->odt & 0x4) << 7;
722         t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
723
724         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
725         return 0;
726 }
727
728 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
729         0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
730 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
731         0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
732
733 static int
734 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
735                      struct nouveau_pm_tbl_entry *e, u8 len,
736                      struct nouveau_pm_memtiming *boot,
737                      struct nouveau_pm_memtiming *t)
738 {
739         if (len < 15) {
740                 t->drive_strength = boot->drive_strength;
741                 t->odt = boot->odt;
742         } else {
743                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
744                 t->odt = e->RAM_FT1 & 0x07;
745         }
746
747         if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
748                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
749                 return -ERANGE;
750         }
751
752         if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
753                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
754                 return -ERANGE;
755         }
756
757         if (t->odt > 3) {
758                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
759                         t->id, t->odt);
760                 t->odt = 0;
761         }
762
763         t->mr[0] = (boot->mr[0] & 0xe0b) |
764                    /* CAS */
765                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
766                    ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
767         t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
768                    (t->odt << 2) |
769                    (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
770         t->mr[2] = boot->mr[2];
771
772         NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
773                       t->mr[0], t->mr[1], t->mr[2]);
774         return 0;
775 }
776
777 static int
778 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
779                      struct nouveau_pm_tbl_entry *e, u8 len,
780                      struct nouveau_pm_memtiming *boot,
781                      struct nouveau_pm_memtiming *t)
782 {
783         if (len < 15) {
784                 t->drive_strength = boot->drive_strength;
785                 t->odt = boot->odt;
786         } else {
787                 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
788                 t->odt = e->RAM_FT1 & 0x03;
789         }
790
791         if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
792                 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
793                 return -ERANGE;
794         }
795
796         if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
797                 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
798                 return -ERANGE;
799         }
800
801         if (t->odt > 3) {
802                 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
803                         t->id, t->odt);
804                 t->odt = 0;
805         }
806
807         t->mr[0] = (boot->mr[0] & 0x007) |
808                    ((e->tCL - 5) << 3) |
809                    ((e->tWR - 4) << 8);
810         t->mr[1] = (boot->mr[1] & 0x1007f0) |
811                    t->drive_strength |
812                    (t->odt << 2);
813
814         NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
815         return 0;
816 }
817
818 int
819 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
820                         struct nouveau_pm_memtiming *t)
821 {
822         struct drm_nouveau_private *dev_priv = dev->dev_private;
823         struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
824         struct nouveau_pm_memtiming *boot = &pm->boot.timing;
825         struct nouveau_pm_tbl_entry *e;
826         u8 ver, len, *ptr, *ramcfg;
827         int ret;
828
829         ptr = nouveau_perf_timing(dev, freq, &ver, &len);
830         if (!ptr || ptr[0] == 0x00) {
831                 *t = *boot;
832                 return 0;
833         }
834         e = (struct nouveau_pm_tbl_entry *)ptr;
835
836         t->tCWL = boot->tCWL;
837
838         switch (dev_priv->card_type) {
839         case NV_40:
840                 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
841                 break;
842         case NV_50:
843                 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
844                 break;
845         case NV_C0:
846                 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
847                 break;
848         default:
849                 ret = -ENODEV;
850                 break;
851         }
852
853         switch (dev_priv->vram_type * !ret) {
854         case NV_MEM_TYPE_GDDR3:
855                 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
856                 break;
857         case NV_MEM_TYPE_GDDR5:
858                 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
859                 break;
860         case NV_MEM_TYPE_DDR2:
861                 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
862                 break;
863         case NV_MEM_TYPE_DDR3:
864                 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
865                 break;
866         default:
867                 ret = -EINVAL;
868                 break;
869         }
870
871         ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
872         if (ramcfg) {
873                 int dll_off;
874
875                 if (ver == 0x00)
876                         dll_off = !!(ramcfg[3] & 0x04);
877                 else
878                         dll_off = !!(ramcfg[2] & 0x40);
879
880                 switch (dev_priv->vram_type) {
881                 case NV_MEM_TYPE_GDDR3:
882                         t->mr[1] &= ~0x00000040;
883                         t->mr[1] |=  0x00000040 * dll_off;
884                         break;
885                 default:
886                         t->mr[1] &= ~0x00000001;
887                         t->mr[1] |=  0x00000001 * dll_off;
888                         break;
889                 }
890         }
891
892         return ret;
893 }
894
895 void
896 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
897 {
898         struct drm_nouveau_private *dev_priv = dev->dev_private;
899         u32 timing_base, timing_regs, mr_base;
900         int i;
901
902         if (dev_priv->card_type >= 0xC0) {
903                 timing_base = 0x10f290;
904                 mr_base = 0x10f300;
905         } else {
906                 timing_base = 0x100220;
907                 mr_base = 0x1002c0;
908         }
909
910         t->id = -1;
911
912         switch (dev_priv->card_type) {
913         case NV_50:
914                 timing_regs = 9;
915                 break;
916         case NV_C0:
917         case NV_D0:
918                 timing_regs = 5;
919                 break;
920         case NV_30:
921         case NV_40:
922                 timing_regs = 3;
923                 break;
924         default:
925                 timing_regs = 0;
926                 return;
927         }
928         for(i = 0; i < timing_regs; i++)
929                 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
930
931         t->tCWL = 0;
932         if (dev_priv->card_type < NV_C0) {
933                 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
934         } else if (dev_priv->card_type <= NV_D0) {
935                 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
936         }
937
938         t->mr[0] = nv_rd32(dev, mr_base);
939         t->mr[1] = nv_rd32(dev, mr_base + 0x04);
940         t->mr[2] = nv_rd32(dev, mr_base + 0x20);
941         t->mr[3] = nv_rd32(dev, mr_base + 0x24);
942
943         t->odt = 0;
944         t->drive_strength = 0;
945
946         switch (dev_priv->vram_type) {
947         case NV_MEM_TYPE_DDR3:
948                 t->odt |= (t->mr[1] & 0x200) >> 7;
949         case NV_MEM_TYPE_DDR2:
950                 t->odt |= (t->mr[1] & 0x04) >> 2 |
951                           (t->mr[1] & 0x40) >> 5;
952                 break;
953         case NV_MEM_TYPE_GDDR3:
954         case NV_MEM_TYPE_GDDR5:
955                 t->drive_strength = t->mr[1] & 0x03;
956                 t->odt = (t->mr[1] & 0x0c) >> 2;
957                 break;
958         default:
959                 break;
960         }
961 }
962
963 int
964 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
965                  struct nouveau_pm_level *perflvl)
966 {
967         struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
968         struct nouveau_pm_memtiming *info = &perflvl->timing;
969         u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
970         u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
971         u32 mr1_dlloff;
972
973         switch (dev_priv->vram_type) {
974         case NV_MEM_TYPE_DDR2:
975                 tDLLK = 2000;
976                 mr1_dlloff = 0x00000001;
977                 break;
978         case NV_MEM_TYPE_DDR3:
979                 tDLLK = 12000;
980                 mr1_dlloff = 0x00000001;
981                 break;
982         case NV_MEM_TYPE_GDDR3:
983                 tDLLK = 40000;
984                 mr1_dlloff = 0x00000040;
985                 break;
986         default:
987                 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
988                 return -ENODEV;
989         }
990
991         /* fetch current MRs */
992         switch (dev_priv->vram_type) {
993         case NV_MEM_TYPE_GDDR3:
994         case NV_MEM_TYPE_DDR3:
995                 mr[2] = exec->mrg(exec, 2);
996         default:
997                 mr[1] = exec->mrg(exec, 1);
998                 mr[0] = exec->mrg(exec, 0);
999                 break;
1000         }
1001
1002         /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh  */
1003         if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
1004                 exec->precharge(exec);
1005                 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
1006                 exec->wait(exec, tMRD);
1007         }
1008
1009         /* enter self-refresh mode */
1010         exec->precharge(exec);
1011         exec->refresh(exec);
1012         exec->refresh(exec);
1013         exec->refresh_auto(exec, false);
1014         exec->refresh_self(exec, true);
1015         exec->wait(exec, tCKSRE);
1016
1017         /* modify input clock frequency */
1018         exec->clock_set(exec);
1019
1020         /* exit self-refresh mode */
1021         exec->wait(exec, tCKSRX);
1022         exec->precharge(exec);
1023         exec->refresh_self(exec, false);
1024         exec->refresh_auto(exec, true);
1025         exec->wait(exec, tXS);
1026
1027         /* update MRs */
1028         if (mr[2] != info->mr[2]) {
1029                 exec->mrs (exec, 2, info->mr[2]);
1030                 exec->wait(exec, tMRD);
1031         }
1032
1033         if (mr[1] != info->mr[1]) {
1034                 /* need to keep DLL off until later, at least on GDDR3 */
1035                 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
1036                 exec->wait(exec, tMRD);
1037         }
1038
1039         if (mr[0] != info->mr[0]) {
1040                 exec->mrs (exec, 0, info->mr[0]);
1041                 exec->wait(exec, tMRD);
1042         }
1043
1044         /* update PFB timing registers */
1045         exec->timing_set(exec);
1046
1047         /* DLL (enable + ) reset */
1048         if (!(info->mr[1] & mr1_dlloff)) {
1049                 if (mr[1] & mr1_dlloff) {
1050                         exec->mrs (exec, 1, info->mr[1]);
1051                         exec->wait(exec, tMRD);
1052                 }
1053                 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
1054                 exec->wait(exec, tMRD);
1055                 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1056                 exec->wait(exec, tMRD);
1057                 exec->wait(exec, tDLLK);
1058                 if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
1059                         exec->precharge(exec);
1060         }
1061
1062         return 0;
1063 }
1064
1065 int
1066 nouveau_mem_vbios_type(struct drm_device *dev)
1067 {
1068         struct bit_entry M;
1069         u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1070         if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1071                 u8 *table = ROMPTR(dev, M.data[3]);
1072                 if (table && table[0] == 0x10 && ramcfg < table[3]) {
1073                         u8 *entry = table + table[1] + (ramcfg * table[2]);
1074                         switch (entry[0] & 0x0f) {
1075                         case 0: return NV_MEM_TYPE_DDR2;
1076                         case 1: return NV_MEM_TYPE_DDR3;
1077                         case 2: return NV_MEM_TYPE_GDDR3;
1078                         case 3: return NV_MEM_TYPE_GDDR5;
1079                         default:
1080                                 break;
1081                         }
1082
1083                 }
1084         }
1085         return NV_MEM_TYPE_UNKNOWN;
1086 }
1087
1088 static int
1089 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1090 {
1091         /* nothing to do */
1092         return 0;
1093 }
1094
1095 static int
1096 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1097 {
1098         /* nothing to do */
1099         return 0;
1100 }
1101
1102 static inline void
1103 nouveau_mem_node_cleanup(struct nouveau_mem *node)
1104 {
1105         if (node->vma[0].node) {
1106                 nouveau_vm_unmap(&node->vma[0]);
1107                 nouveau_vm_put(&node->vma[0]);
1108         }
1109
1110         if (node->vma[1].node) {
1111                 nouveau_vm_unmap(&node->vma[1]);
1112                 nouveau_vm_put(&node->vma[1]);
1113         }
1114 }
1115
1116 static void
1117 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1118                          struct ttm_mem_reg *mem)
1119 {
1120         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1121         struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1122         struct drm_device *dev = dev_priv->dev;
1123
1124         nouveau_mem_node_cleanup(mem->mm_node);
1125         vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1126 }
1127
1128 static int
1129 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1130                          struct ttm_buffer_object *bo,
1131                          struct ttm_placement *placement,
1132                          struct ttm_mem_reg *mem)
1133 {
1134         struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1135         struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1136         struct drm_device *dev = dev_priv->dev;
1137         struct nouveau_bo *nvbo = nouveau_bo(bo);
1138         struct nouveau_mem *node;
1139         u32 size_nc = 0;
1140         int ret;
1141
1142         if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1143                 size_nc = 1 << nvbo->page_shift;
1144
1145         ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1146                         mem->page_alignment << PAGE_SHIFT, size_nc,
1147                         (nvbo->tile_flags >> 8) & 0x3ff, &node);
1148         if (ret) {
1149                 mem->mm_node = NULL;
1150                 return (ret == -ENOSPC) ? 0 : ret;
1151         }
1152
1153         node->page_shift = nvbo->page_shift;
1154
1155         mem->mm_node = node;
1156         mem->start   = node->offset >> PAGE_SHIFT;
1157         return 0;
1158 }
1159
1160 void
1161 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1162 {
1163         struct nouveau_mm *mm = man->priv;
1164         struct nouveau_mm_node *r;
1165         u32 total = 0, free = 0;
1166
1167         mutex_lock(&mm->mutex);
1168         list_for_each_entry(r, &mm->nodes, nl_entry) {
1169                 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1170                        prefix, r->type, ((u64)r->offset << 12),
1171                        (((u64)r->offset + r->length) << 12));
1172
1173                 total += r->length;
1174                 if (!r->type)
1175                         free += r->length;
1176         }
1177         mutex_unlock(&mm->mutex);
1178
1179         printk(KERN_DEBUG "%s  total: 0x%010llx free: 0x%010llx\n",
1180                prefix, (u64)total << 12, (u64)free << 12);
1181         printk(KERN_DEBUG "%s  block: 0x%08x\n",
1182                prefix, mm->block_size << 12);
1183 }
1184
1185 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1186         nouveau_vram_manager_init,
1187         nouveau_vram_manager_fini,
1188         nouveau_vram_manager_new,
1189         nouveau_vram_manager_del,
1190         nouveau_vram_manager_debug
1191 };
1192
1193 static int
1194 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1195 {
1196         return 0;
1197 }
1198
1199 static int
1200 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1201 {
1202         return 0;
1203 }
1204
1205 static void
1206 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1207                          struct ttm_mem_reg *mem)
1208 {
1209         nouveau_mem_node_cleanup(mem->mm_node);
1210         kfree(mem->mm_node);
1211         mem->mm_node = NULL;
1212 }
1213
1214 static int
1215 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1216                          struct ttm_buffer_object *bo,
1217                          struct ttm_placement *placement,
1218                          struct ttm_mem_reg *mem)
1219 {
1220         struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1221         struct nouveau_mem *node;
1222
1223         if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1224                      dev_priv->gart_info.aper_size))
1225                 return -ENOMEM;
1226
1227         node = kzalloc(sizeof(*node), GFP_KERNEL);
1228         if (!node)
1229                 return -ENOMEM;
1230         node->page_shift = 12;
1231
1232         mem->mm_node = node;
1233         mem->start   = 0;
1234         return 0;
1235 }
1236
1237 void
1238 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1239 {
1240 }
1241
1242 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1243         nouveau_gart_manager_init,
1244         nouveau_gart_manager_fini,
1245         nouveau_gart_manager_new,
1246         nouveau_gart_manager_del,
1247         nouveau_gart_manager_debug
1248 };