2 * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
3 * Copyright 2005 Stephane Marchesin
5 * The Weather Channel (TM) funded Tungsten Graphics to develop the
6 * initial release of the Radeon 8500 driver under the XFree86 license.
7 * This notice must be preserved.
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
29 * Ben Skeggs <bskeggs@redhat.com>
30 * Roy Spliet <r.spliet@student.tudelft.nl>
36 #include "drm_sarea.h"
38 #include "nouveau_drv.h"
39 #include "nouveau_pm.h"
40 #include "nouveau_mm.h"
41 #include "nouveau_vm.h"
44 * NV10-NV40 tiling helpers
48 nv10_mem_update_tile_region(struct drm_device *dev,
49 struct nouveau_tile_reg *tile, uint32_t addr,
50 uint32_t size, uint32_t pitch, uint32_t flags)
52 struct drm_nouveau_private *dev_priv = dev->dev_private;
53 struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
54 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
55 int i = tile - dev_priv->tile.reg, j;
58 nouveau_fence_unref(&tile->fence);
61 pfb->free_tile_region(dev, i);
64 pfb->init_tile_region(dev, i, addr, size, pitch, flags);
66 spin_lock_irqsave(&dev_priv->context_switch_lock, save);
67 pfifo->reassign(dev, false);
68 pfifo->cache_pull(dev, false);
70 nouveau_wait_for_idle(dev);
72 pfb->set_tile_region(dev, i);
73 for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
74 if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
75 dev_priv->eng[j]->set_tile_region(dev, i);
78 pfifo->cache_pull(dev, true);
79 pfifo->reassign(dev, true);
80 spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
83 static struct nouveau_tile_reg *
84 nv10_mem_get_tile_region(struct drm_device *dev, int i)
86 struct drm_nouveau_private *dev_priv = dev->dev_private;
87 struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
89 spin_lock(&dev_priv->tile.lock);
92 (!tile->fence || nouveau_fence_signalled(tile->fence)))
97 spin_unlock(&dev_priv->tile.lock);
102 nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
103 struct nouveau_fence *fence)
105 struct drm_nouveau_private *dev_priv = dev->dev_private;
108 spin_lock(&dev_priv->tile.lock);
110 /* Mark it as pending. */
112 nouveau_fence_ref(fence);
116 spin_unlock(&dev_priv->tile.lock);
120 struct nouveau_tile_reg *
121 nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
122 uint32_t pitch, uint32_t flags)
124 struct drm_nouveau_private *dev_priv = dev->dev_private;
125 struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
126 struct nouveau_tile_reg *tile, *found = NULL;
129 for (i = 0; i < pfb->num_tiles; i++) {
130 tile = nv10_mem_get_tile_region(dev, i);
132 if (pitch && !found) {
136 } else if (tile && tile->pitch) {
137 /* Kill an unused tile region. */
138 nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
141 nv10_mem_put_tile_region(dev, tile, NULL);
145 nv10_mem_update_tile_region(dev, found, addr, size,
154 nouveau_mem_vram_fini(struct drm_device *dev)
156 struct drm_nouveau_private *dev_priv = dev->dev_private;
158 ttm_bo_device_release(&dev_priv->ttm.bdev);
160 nouveau_ttm_global_release(dev_priv);
162 if (dev_priv->fb_mtrr >= 0) {
163 drm_mtrr_del(dev_priv->fb_mtrr,
164 pci_resource_start(dev->pdev, 1),
165 pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
166 dev_priv->fb_mtrr = -1;
171 nouveau_mem_gart_fini(struct drm_device *dev)
173 nouveau_sgdma_takedown(dev);
175 if (drm_core_has_AGP(dev) && dev->agp) {
176 struct drm_agp_mem *entry, *tempe;
178 /* Remove AGP resources, but leave dev->agp
179 intact until drv_cleanup is called. */
180 list_for_each_entry_safe(entry, tempe, &dev->agp->memory, head) {
182 drm_unbind_agp(entry->memory);
183 drm_free_agp(entry->memory, entry->pages);
186 INIT_LIST_HEAD(&dev->agp->memory);
188 if (dev->agp->acquired)
189 drm_agp_release(dev);
191 dev->agp->acquired = 0;
192 dev->agp->enabled = 0;
197 nouveau_mem_flags_valid(struct drm_device *dev, u32 tile_flags)
199 if (!(tile_flags & NOUVEAU_GEM_TILE_LAYOUT_MASK))
207 get_agp_mode(struct drm_device *dev, unsigned long mode)
209 struct drm_nouveau_private *dev_priv = dev->dev_private;
212 * FW seems to be broken on nv18, it makes the card lock up
215 if (dev_priv->chipset == 0x18)
216 mode &= ~PCI_AGP_COMMAND_FW;
219 * AGP mode set in the command line.
221 if (nouveau_agpmode > 0) {
222 bool agpv3 = mode & 0x8;
223 int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
225 mode = (mode & ~0x7) | (rate & 0x7);
233 nouveau_mem_reset_agp(struct drm_device *dev)
236 uint32_t saved_pci_nv_1, pmc_enable;
239 /* First of all, disable fast writes, otherwise if it's
240 * already enabled in the AGP bridge and we disable the card's
241 * AGP controller we might be locking ourselves out of it. */
242 if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
243 dev->agp->mode) & PCI_AGP_COMMAND_FW) {
244 struct drm_agp_info info;
245 struct drm_agp_mode mode;
247 ret = drm_agp_info(dev, &info);
251 mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
252 ret = drm_agp_enable(dev, mode);
257 saved_pci_nv_1 = nv_rd32(dev, NV04_PBUS_PCI_NV_1);
259 /* clear busmaster bit */
260 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1 & ~0x4);
262 nv_wr32(dev, NV04_PBUS_PCI_NV_19, 0);
264 /* power cycle pgraph, if enabled */
265 pmc_enable = nv_rd32(dev, NV03_PMC_ENABLE);
266 if (pmc_enable & NV_PMC_ENABLE_PGRAPH) {
267 nv_wr32(dev, NV03_PMC_ENABLE,
268 pmc_enable & ~NV_PMC_ENABLE_PGRAPH);
269 nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
270 NV_PMC_ENABLE_PGRAPH);
273 /* and restore (gives effect of resetting AGP) */
274 nv_wr32(dev, NV04_PBUS_PCI_NV_1, saved_pci_nv_1);
281 nouveau_mem_init_agp(struct drm_device *dev)
284 struct drm_nouveau_private *dev_priv = dev->dev_private;
285 struct drm_agp_info info;
286 struct drm_agp_mode mode;
289 if (!dev->agp->acquired) {
290 ret = drm_agp_acquire(dev);
292 NV_ERROR(dev, "Unable to acquire AGP: %d\n", ret);
297 nouveau_mem_reset_agp(dev);
299 ret = drm_agp_info(dev, &info);
301 NV_ERROR(dev, "Unable to get AGP info: %d\n", ret);
305 /* see agp.h for the AGPSTAT_* modes available */
306 mode.mode = get_agp_mode(dev, info.mode);
307 ret = drm_agp_enable(dev, mode);
309 NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
313 dev_priv->gart_info.type = NOUVEAU_GART_AGP;
314 dev_priv->gart_info.aper_base = info.aperture_base;
315 dev_priv->gart_info.aper_size = info.aperture_size;
320 static const struct vram_types {
323 } vram_type_map[] = {
324 { NV_MEM_TYPE_STOLEN , "stolen system memory" },
325 { NV_MEM_TYPE_SGRAM , "SGRAM" },
326 { NV_MEM_TYPE_SDRAM , "SDRAM" },
327 { NV_MEM_TYPE_DDR1 , "DDR1" },
328 { NV_MEM_TYPE_DDR2 , "DDR2" },
329 { NV_MEM_TYPE_DDR3 , "DDR3" },
330 { NV_MEM_TYPE_GDDR2 , "GDDR2" },
331 { NV_MEM_TYPE_GDDR3 , "GDDR3" },
332 { NV_MEM_TYPE_GDDR4 , "GDDR4" },
333 { NV_MEM_TYPE_GDDR5 , "GDDR5" },
334 { NV_MEM_TYPE_UNKNOWN, "unknown type" }
338 nouveau_mem_vram_init(struct drm_device *dev)
340 struct drm_nouveau_private *dev_priv = dev->dev_private;
341 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
342 const struct vram_types *vram_type;
346 if (dev_priv->card_type >= NV_50) {
347 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
350 if (0 && pci_is_pcie(dev->pdev) &&
351 dev_priv->chipset > 0x40 &&
352 dev_priv->chipset != 0x45) {
353 if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
357 ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
360 ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
362 /* Reset to default value. */
363 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
367 ret = nouveau_ttm_global_init(dev_priv);
371 ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
372 dev_priv->ttm.bo_global_ref.ref.object,
373 &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
374 dma_bits <= 32 ? true : false);
376 NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
380 vram_type = vram_type_map;
381 while (vram_type->value != NV_MEM_TYPE_UNKNOWN) {
382 if (nouveau_vram_type) {
383 if (!strcasecmp(nouveau_vram_type, vram_type->name))
385 dev_priv->vram_type = vram_type->value;
387 if (vram_type->value == dev_priv->vram_type)
393 NV_INFO(dev, "Detected %dMiB VRAM (%s)\n",
394 (int)(dev_priv->vram_size >> 20), vram_type->name);
395 if (dev_priv->vram_sys_base) {
396 NV_INFO(dev, "Stolen system memory at: 0x%010llx\n",
397 dev_priv->vram_sys_base);
400 dev_priv->fb_available_size = dev_priv->vram_size;
401 dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
402 if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
403 dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
404 dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
406 dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
407 dev_priv->fb_aper_free = dev_priv->fb_available_size;
410 ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
411 dev_priv->fb_available_size >> PAGE_SHIFT);
413 NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
417 if (dev_priv->card_type < NV_50) {
418 ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
419 0, 0, NULL, &dev_priv->vga_ram);
421 ret = nouveau_bo_pin(dev_priv->vga_ram,
425 NV_WARN(dev, "failed to reserve VGA memory\n");
426 nouveau_bo_ref(NULL, &dev_priv->vga_ram);
430 dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
431 pci_resource_len(dev->pdev, 1),
437 nouveau_mem_gart_init(struct drm_device *dev)
439 struct drm_nouveau_private *dev_priv = dev->dev_private;
440 struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
443 dev_priv->gart_info.type = NOUVEAU_GART_NONE;
445 #if !defined(__powerpc__) && !defined(__ia64__)
446 if (drm_pci_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
447 ret = nouveau_mem_init_agp(dev);
449 NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
453 if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
454 ret = nouveau_sgdma_init(dev);
456 NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
461 NV_INFO(dev, "%d MiB GART (aperture)\n",
462 (int)(dev_priv->gart_info.aper_size >> 20));
463 dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
465 ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
466 dev_priv->gart_info.aper_size >> PAGE_SHIFT);
468 NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
476 nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
477 struct nouveau_pm_tbl_entry *e, u8 len,
478 struct nouveau_pm_memtiming *boot,
479 struct nouveau_pm_memtiming *t)
481 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
483 /* XXX: I don't trust the -1's and +1's... they must come
485 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
487 (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
488 (e->tCL + 2 - (t->tCWL - 1));
490 t->reg[2] = 0x20200000 |
491 ((t->tCWL - 1) << 24 |
496 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
497 t->reg[0], t->reg[1], t->reg[2]);
502 nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
503 struct nouveau_pm_tbl_entry *e, u8 len,
504 struct nouveau_pm_memtiming *boot,
505 struct nouveau_pm_memtiming *t)
507 struct drm_nouveau_private *dev_priv = dev->dev_private;
509 uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
511 if (bit_table(dev, 'P', &P))
514 switch (min(len, (u8) 22)) {
527 t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
529 t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
530 max(unk18, (u8) 1) << 16 |
531 (e->tWTR + 2 + (t->tCWL - 1)) << 8;
533 t->reg[2] = ((t->tCWL - 1) << 24 |
538 t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
540 t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
542 t->reg[8] = boot->reg[8] & 0xffffff00;
544 if (P.version == 1) {
545 t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
547 t->reg[3] = (0x14 + e->tCL) << 24 |
552 t->reg[4] |= boot->reg[4] & 0xffff0000;
554 t->reg[6] = (0x33 - t->tCWL) << 16 |
556 (0x2e + e->tCL - t->tCWL);
558 t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
560 /* XXX: P.version == 1 only has DDR2 and GDDR3? */
561 if (dev_priv->vram_type == NV_MEM_TYPE_DDR2) {
562 t->reg[5] |= (e->tCL + 3) << 8;
563 t->reg[6] |= (t->tCWL - 2) << 8;
564 t->reg[8] |= (e->tCL - 4);
566 t->reg[5] |= (e->tCL + 2) << 8;
567 t->reg[6] |= t->tCWL << 8;
568 t->reg[8] |= (e->tCL - 2);
571 t->reg[1] |= (5 + e->tCL - (t->tCWL));
573 /* XXX: 0xb? 0x30? */
574 t->reg[3] = (0x30 + e->tCL) << 24 |
575 (boot->reg[3] & 0x00ff0000)|
576 (0xb + e->tCL) << 8 |
579 t->reg[4] |= (unk20 << 24 | unk21 << 16);
582 t->reg[5] |= (t->tCWL + 6) << 8;
584 t->reg[6] = (0x5a + e->tCL) << 16 |
585 (6 - e->tCL + t->tCWL) << 8 |
586 (0x50 + e->tCL - t->tCWL);
588 tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
589 t->reg[7] = (tmp7_3 << 24) |
590 ((tmp7_3 - 6 + e->tCL) << 16) |
594 NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
595 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
596 NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
597 t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
598 NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
603 nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
604 struct nouveau_pm_tbl_entry *e, u8 len,
605 struct nouveau_pm_memtiming *boot,
606 struct nouveau_pm_memtiming *t)
611 t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
612 e->tRFC << 8 | e->tRC);
614 t->reg[1] = (boot->reg[1] & 0xff000000) |
615 (e->tRCDWR & 0x0f) << 20 |
616 (e->tRCDRD & 0x0f) << 14 |
620 t->reg[2] = (boot->reg[2] & 0xff0000ff) |
621 e->tWR << 16 | e->tWTR << 8;
623 t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
624 (e->tUNK_21 & 0xf) << 5 |
627 t->reg[4] = (boot->reg[4] & 0xfff00fff) |
628 (e->tRRD&0x1f) << 15;
630 NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
631 t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
632 NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
637 * MR generation methods
641 nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
642 struct nouveau_pm_tbl_entry *e, u8 len,
643 struct nouveau_pm_memtiming *boot,
644 struct nouveau_pm_memtiming *t)
646 t->drive_strength = 0;
650 t->odt = e->RAM_FT1 & 0x07;
653 if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
654 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
658 if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
659 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
664 NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
669 t->mr[0] = (boot->mr[0] & 0x100f) |
672 t->mr[1] = (boot->mr[1] & 0x101fbb) |
673 (t->odt & 0x1) << 2 |
676 NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
680 uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
681 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
684 nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
685 struct nouveau_pm_tbl_entry *e, u8 len,
686 struct nouveau_pm_memtiming *boot,
687 struct nouveau_pm_memtiming *t)
691 t->drive_strength = 0;
695 t->odt = e->RAM_FT1 & 0x07;
698 if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
699 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
703 if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
704 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
709 NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
713 t->mr[0] = (boot->mr[0] & 0x180b) |
717 (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
718 t->mr[1] = (boot->mr[1] & 0x101dbb) |
719 (t->odt & 0x1) << 2 |
720 (t->odt & 0x2) << 5 |
722 t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
724 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
728 uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
729 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
730 uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
731 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
734 nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
735 struct nouveau_pm_tbl_entry *e, u8 len,
736 struct nouveau_pm_memtiming *boot,
737 struct nouveau_pm_memtiming *t)
740 t->drive_strength = boot->drive_strength;
743 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
744 t->odt = e->RAM_FT1 & 0x07;
747 if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
748 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
752 if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
753 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
758 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
763 t->mr[0] = (boot->mr[0] & 0xe0b) |
765 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
766 ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
767 t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
769 (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
770 t->mr[2] = boot->mr[2];
772 NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
773 t->mr[0], t->mr[1], t->mr[2]);
778 nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
779 struct nouveau_pm_tbl_entry *e, u8 len,
780 struct nouveau_pm_memtiming *boot,
781 struct nouveau_pm_memtiming *t)
784 t->drive_strength = boot->drive_strength;
787 t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
788 t->odt = e->RAM_FT1 & 0x03;
791 if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
792 NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
796 if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
797 NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
802 NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
807 t->mr[0] = (boot->mr[0] & 0x007) |
808 ((e->tCL - 5) << 3) |
810 t->mr[1] = (boot->mr[1] & 0x1007f0) |
814 NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
819 nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
820 struct nouveau_pm_memtiming *t)
822 struct drm_nouveau_private *dev_priv = dev->dev_private;
823 struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
824 struct nouveau_pm_memtiming *boot = &pm->boot.timing;
825 struct nouveau_pm_tbl_entry *e;
826 u8 ver, len, *ptr, *ramcfg;
829 ptr = nouveau_perf_timing(dev, freq, &ver, &len);
830 if (!ptr || ptr[0] == 0x00) {
834 e = (struct nouveau_pm_tbl_entry *)ptr;
836 t->tCWL = boot->tCWL;
838 switch (dev_priv->card_type) {
840 ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
843 ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
847 ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
854 switch (dev_priv->vram_type * !ret) {
855 case NV_MEM_TYPE_GDDR3:
856 ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
858 case NV_MEM_TYPE_GDDR5:
859 ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
861 case NV_MEM_TYPE_DDR2:
862 ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
864 case NV_MEM_TYPE_DDR3:
865 ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
872 ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
877 dll_off = !!(ramcfg[3] & 0x04);
879 dll_off = !!(ramcfg[2] & 0x40);
881 switch (dev_priv->vram_type) {
882 case NV_MEM_TYPE_GDDR3:
883 t->mr[1] &= ~0x00000040;
884 t->mr[1] |= 0x00000040 * dll_off;
887 t->mr[1] &= ~0x00000001;
888 t->mr[1] |= 0x00000001 * dll_off;
897 nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
899 struct drm_nouveau_private *dev_priv = dev->dev_private;
900 u32 timing_base, timing_regs, mr_base;
903 if (dev_priv->card_type >= 0xC0) {
904 timing_base = 0x10f290;
907 timing_base = 0x100220;
913 switch (dev_priv->card_type) {
929 for(i = 0; i < timing_regs; i++)
930 t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
933 if (dev_priv->card_type < NV_C0) {
934 t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
935 } else if (dev_priv->card_type <= NV_D0) {
936 t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
939 t->mr[0] = nv_rd32(dev, mr_base);
940 t->mr[1] = nv_rd32(dev, mr_base + 0x04);
941 t->mr[2] = nv_rd32(dev, mr_base + 0x20);
942 t->mr[3] = nv_rd32(dev, mr_base + 0x24);
945 t->drive_strength = 0;
947 switch (dev_priv->vram_type) {
948 case NV_MEM_TYPE_DDR3:
949 t->odt |= (t->mr[1] & 0x200) >> 7;
950 case NV_MEM_TYPE_DDR2:
951 t->odt |= (t->mr[1] & 0x04) >> 2 |
952 (t->mr[1] & 0x40) >> 5;
954 case NV_MEM_TYPE_GDDR3:
955 case NV_MEM_TYPE_GDDR5:
956 t->drive_strength = t->mr[1] & 0x03;
957 t->odt = (t->mr[1] & 0x0c) >> 2;
965 nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
966 struct nouveau_pm_level *perflvl)
968 struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
969 struct nouveau_pm_memtiming *info = &perflvl->timing;
970 u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
971 u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
974 switch (dev_priv->vram_type) {
975 case NV_MEM_TYPE_DDR2:
977 mr1_dlloff = 0x00000001;
979 case NV_MEM_TYPE_DDR3:
983 mr1_dlloff = 0x00000001;
985 case NV_MEM_TYPE_GDDR3:
987 mr1_dlloff = 0x00000040;
990 NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
994 /* fetch current MRs */
995 switch (dev_priv->vram_type) {
996 case NV_MEM_TYPE_GDDR3:
997 case NV_MEM_TYPE_DDR3:
998 mr[2] = exec->mrg(exec, 2);
1000 mr[1] = exec->mrg(exec, 1);
1001 mr[0] = exec->mrg(exec, 0);
1005 /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
1006 if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
1007 exec->precharge(exec);
1008 exec->mrs (exec, 1, mr[1] | mr1_dlloff);
1009 exec->wait(exec, tMRD);
1012 /* enter self-refresh mode */
1013 exec->precharge(exec);
1014 exec->refresh(exec);
1015 exec->refresh(exec);
1016 exec->refresh_auto(exec, false);
1017 exec->refresh_self(exec, true);
1018 exec->wait(exec, tCKSRE);
1020 /* modify input clock frequency */
1021 exec->clock_set(exec);
1023 /* exit self-refresh mode */
1024 exec->wait(exec, tCKSRX);
1025 exec->precharge(exec);
1026 exec->refresh_self(exec, false);
1027 exec->refresh_auto(exec, true);
1028 exec->wait(exec, tXS);
1029 exec->wait(exec, tXS);
1032 if (mr[2] != info->mr[2]) {
1033 exec->mrs (exec, 2, info->mr[2]);
1034 exec->wait(exec, tMRD);
1037 if (mr[1] != info->mr[1]) {
1038 /* need to keep DLL off until later, at least on GDDR3 */
1039 exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
1040 exec->wait(exec, tMRD);
1043 if (mr[0] != info->mr[0]) {
1044 exec->mrs (exec, 0, info->mr[0]);
1045 exec->wait(exec, tMRD);
1048 /* update PFB timing registers */
1049 exec->timing_set(exec);
1051 /* DLL (enable + ) reset */
1052 if (!(info->mr[1] & mr1_dlloff)) {
1053 if (mr[1] & mr1_dlloff) {
1054 exec->mrs (exec, 1, info->mr[1]);
1055 exec->wait(exec, tMRD);
1057 exec->mrs (exec, 0, info->mr[0] | 0x00000100);
1058 exec->wait(exec, tMRD);
1059 exec->mrs (exec, 0, info->mr[0] | 0x00000000);
1060 exec->wait(exec, tMRD);
1061 exec->wait(exec, tDLLK);
1062 if (dev_priv->vram_type == NV_MEM_TYPE_GDDR3)
1063 exec->precharge(exec);
1070 nouveau_mem_vbios_type(struct drm_device *dev)
1073 u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
1074 if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
1075 u8 *table = ROMPTR(dev, M.data[3]);
1076 if (table && table[0] == 0x10 && ramcfg < table[3]) {
1077 u8 *entry = table + table[1] + (ramcfg * table[2]);
1078 switch (entry[0] & 0x0f) {
1079 case 0: return NV_MEM_TYPE_DDR2;
1080 case 1: return NV_MEM_TYPE_DDR3;
1081 case 2: return NV_MEM_TYPE_GDDR3;
1082 case 3: return NV_MEM_TYPE_GDDR5;
1089 return NV_MEM_TYPE_UNKNOWN;
1093 nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1100 nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
1107 nouveau_mem_node_cleanup(struct nouveau_mem *node)
1109 if (node->vma[0].node) {
1110 nouveau_vm_unmap(&node->vma[0]);
1111 nouveau_vm_put(&node->vma[0]);
1114 if (node->vma[1].node) {
1115 nouveau_vm_unmap(&node->vma[1]);
1116 nouveau_vm_put(&node->vma[1]);
1121 nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
1122 struct ttm_mem_reg *mem)
1124 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1125 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1126 struct drm_device *dev = dev_priv->dev;
1128 nouveau_mem_node_cleanup(mem->mm_node);
1129 vram->put(dev, (struct nouveau_mem **)&mem->mm_node);
1133 nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
1134 struct ttm_buffer_object *bo,
1135 struct ttm_placement *placement,
1136 struct ttm_mem_reg *mem)
1138 struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
1139 struct nouveau_vram_engine *vram = &dev_priv->engine.vram;
1140 struct drm_device *dev = dev_priv->dev;
1141 struct nouveau_bo *nvbo = nouveau_bo(bo);
1142 struct nouveau_mem *node;
1146 if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
1147 size_nc = 1 << nvbo->page_shift;
1149 ret = vram->get(dev, mem->num_pages << PAGE_SHIFT,
1150 mem->page_alignment << PAGE_SHIFT, size_nc,
1151 (nvbo->tile_flags >> 8) & 0x3ff, &node);
1153 mem->mm_node = NULL;
1154 return (ret == -ENOSPC) ? 0 : ret;
1157 node->page_shift = nvbo->page_shift;
1159 mem->mm_node = node;
1160 mem->start = node->offset >> PAGE_SHIFT;
1165 nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1167 struct nouveau_mm *mm = man->priv;
1168 struct nouveau_mm_node *r;
1169 u32 total = 0, free = 0;
1171 mutex_lock(&mm->mutex);
1172 list_for_each_entry(r, &mm->nodes, nl_entry) {
1173 printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
1174 prefix, r->type, ((u64)r->offset << 12),
1175 (((u64)r->offset + r->length) << 12));
1181 mutex_unlock(&mm->mutex);
1183 printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
1184 prefix, (u64)total << 12, (u64)free << 12);
1185 printk(KERN_DEBUG "%s block: 0x%08x\n",
1186 prefix, mm->block_size << 12);
1189 const struct ttm_mem_type_manager_func nouveau_vram_manager = {
1190 nouveau_vram_manager_init,
1191 nouveau_vram_manager_fini,
1192 nouveau_vram_manager_new,
1193 nouveau_vram_manager_del,
1194 nouveau_vram_manager_debug
1198 nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
1204 nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
1210 nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
1211 struct ttm_mem_reg *mem)
1213 nouveau_mem_node_cleanup(mem->mm_node);
1214 kfree(mem->mm_node);
1215 mem->mm_node = NULL;
1219 nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
1220 struct ttm_buffer_object *bo,
1221 struct ttm_placement *placement,
1222 struct ttm_mem_reg *mem)
1224 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
1225 struct nouveau_mem *node;
1227 if (unlikely((mem->num_pages << PAGE_SHIFT) >=
1228 dev_priv->gart_info.aper_size))
1231 node = kzalloc(sizeof(*node), GFP_KERNEL);
1234 node->page_shift = 12;
1236 mem->mm_node = node;
1242 nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
1246 const struct ttm_mem_type_manager_func nouveau_gart_manager = {
1247 nouveau_gart_manager_init,
1248 nouveau_gart_manager_fini,
1249 nouveau_gart_manager_new,
1250 nouveau_gart_manager_del,
1251 nouveau_gart_manager_debug