2 * Copyright © 2010 Daniel Vetter
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include <drm/i915_drm.h>
28 #include "i915_trace.h"
29 #include "intel_drv.h"
32 #define GEN6_GTT_ADDR_ENCODE(addr) ((addr) | (((addr) >> 28) & 0xff0))
34 #define GEN6_PDE_VALID (1 << 0)
35 /* gen6+ has bit 11-4 for physical addr bit 39-32 */
36 #define GEN6_PDE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
38 #define GEN6_PTE_VALID (1 << 0)
39 #define GEN6_PTE_UNCACHED (1 << 1)
40 #define HSW_PTE_UNCACHED (0)
41 #define GEN6_PTE_CACHE_LLC (2 << 1)
42 #define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
43 #define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
45 static gen6_gtt_pte_t gen6_pte_encode(struct drm_device *dev,
47 enum i915_cache_level level)
49 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
50 pte |= GEN6_PTE_ADDR_ENCODE(addr);
53 case I915_CACHE_LLC_MLC:
54 /* Haswell doesn't set L3 this way */
56 pte |= GEN6_PTE_CACHE_LLC;
58 pte |= GEN6_PTE_CACHE_LLC_MLC;
61 pte |= GEN6_PTE_CACHE_LLC;
65 pte |= HSW_PTE_UNCACHED;
67 pte |= GEN6_PTE_UNCACHED;
76 #define BYT_PTE_WRITEABLE (1 << 1)
77 #define BYT_PTE_SNOOPED_BY_CPU_CACHES (1 << 2)
79 static gen6_gtt_pte_t byt_pte_encode(struct drm_device *dev,
81 enum i915_cache_level level)
83 gen6_gtt_pte_t pte = GEN6_PTE_VALID;
84 pte |= GEN6_PTE_ADDR_ENCODE(addr);
86 /* Mark the page as writeable. Other platforms don't have a
87 * setting for read-only/writable, so this matches that behavior.
89 pte |= BYT_PTE_WRITEABLE;
91 if (level != I915_CACHE_NONE)
92 pte |= BYT_PTE_SNOOPED_BY_CPU_CACHES;
97 static int gen6_ppgtt_enable(struct drm_device *dev)
99 drm_i915_private_t *dev_priv = dev->dev_private;
101 struct intel_ring_buffer *ring;
102 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
103 gen6_gtt_pte_t __iomem *pd_addr;
107 pd_addr = (gen6_gtt_pte_t __iomem*)dev_priv->gtt.gsm +
108 ppgtt->pd_offset / sizeof(gen6_gtt_pte_t);
109 for (i = 0; i < ppgtt->num_pd_entries; i++) {
112 pt_addr = ppgtt->pt_dma_addr[i];
113 pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
114 pd_entry |= GEN6_PDE_VALID;
116 writel(pd_entry, pd_addr + i);
120 pd_offset = ppgtt->pd_offset;
121 pd_offset /= 64; /* in cachelines, */
124 if (INTEL_INFO(dev)->gen == 6) {
125 uint32_t ecochk, gab_ctl, ecobits;
127 ecobits = I915_READ(GAC_ECO_BITS);
128 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_SNB_BIT |
129 ECOBITS_PPGTT_CACHE64B);
131 gab_ctl = I915_READ(GAB_CTL);
132 I915_WRITE(GAB_CTL, gab_ctl | GAB_CTL_CONT_AFTER_PAGEFAULT);
134 ecochk = I915_READ(GAM_ECOCHK);
135 I915_WRITE(GAM_ECOCHK, ecochk | ECOCHK_SNB_BIT |
136 ECOCHK_PPGTT_CACHE64B);
137 I915_WRITE(GFX_MODE, _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
138 } else if (INTEL_INFO(dev)->gen >= 7) {
139 uint32_t ecochk, ecobits;
141 ecobits = I915_READ(GAC_ECO_BITS);
142 I915_WRITE(GAC_ECO_BITS, ecobits | ECOBITS_PPGTT_CACHE64B);
144 ecochk = I915_READ(GAM_ECOCHK);
145 if (IS_HASWELL(dev)) {
146 ecochk |= ECOCHK_PPGTT_WB_HSW;
148 ecochk |= ECOCHK_PPGTT_LLC_IVB;
149 ecochk &= ~ECOCHK_PPGTT_GFDT_IVB;
151 I915_WRITE(GAM_ECOCHK, ecochk);
152 /* GFX_MODE is per-ring on gen7+ */
155 for_each_ring(ring, dev_priv, i) {
156 if (INTEL_INFO(dev)->gen >= 7)
157 I915_WRITE(RING_MODE_GEN7(ring),
158 _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
160 I915_WRITE(RING_PP_DIR_DCLV(ring), PP_DIR_DCLV_2G);
161 I915_WRITE(RING_PP_DIR_BASE(ring), pd_offset);
166 /* PPGTT support for Sandybdrige/Gen6 and later */
167 static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
168 unsigned first_entry,
169 unsigned num_entries)
171 gen6_gtt_pte_t *pt_vaddr, scratch_pte;
172 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
173 unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
174 unsigned last_pte, i;
176 scratch_pte = ppgtt->pte_encode(ppgtt->dev,
177 ppgtt->scratch_page_dma_addr,
180 while (num_entries) {
181 last_pte = first_pte + num_entries;
182 if (last_pte > I915_PPGTT_PT_ENTRIES)
183 last_pte = I915_PPGTT_PT_ENTRIES;
185 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
187 for (i = first_pte; i < last_pte; i++)
188 pt_vaddr[i] = scratch_pte;
190 kunmap_atomic(pt_vaddr);
192 num_entries -= last_pte - first_pte;
198 static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
199 struct sg_table *pages,
200 unsigned first_entry,
201 enum i915_cache_level cache_level)
203 gen6_gtt_pte_t *pt_vaddr;
204 unsigned act_pt = first_entry / I915_PPGTT_PT_ENTRIES;
205 unsigned act_pte = first_entry % I915_PPGTT_PT_ENTRIES;
206 struct sg_page_iter sg_iter;
208 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
209 for_each_sg_page(pages->sgl, &sg_iter, pages->nents, 0) {
210 dma_addr_t page_addr;
212 page_addr = sg_page_iter_dma_address(&sg_iter);
213 pt_vaddr[act_pte] = ppgtt->pte_encode(ppgtt->dev, page_addr,
215 if (++act_pte == I915_PPGTT_PT_ENTRIES) {
216 kunmap_atomic(pt_vaddr);
218 pt_vaddr = kmap_atomic(ppgtt->pt_pages[act_pt]);
223 kunmap_atomic(pt_vaddr);
226 static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
230 if (ppgtt->pt_dma_addr) {
231 for (i = 0; i < ppgtt->num_pd_entries; i++)
232 pci_unmap_page(ppgtt->dev->pdev,
233 ppgtt->pt_dma_addr[i],
234 4096, PCI_DMA_BIDIRECTIONAL);
237 kfree(ppgtt->pt_dma_addr);
238 for (i = 0; i < ppgtt->num_pd_entries; i++)
239 __free_page(ppgtt->pt_pages[i]);
240 kfree(ppgtt->pt_pages);
244 static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
246 struct drm_device *dev = ppgtt->dev;
247 struct drm_i915_private *dev_priv = dev->dev_private;
248 unsigned first_pd_entry_in_global_pt;
252 /* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
253 * entries. For aliasing ppgtt support we just steal them at the end for
255 first_pd_entry_in_global_pt =
256 gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
258 if (IS_VALLEYVIEW(dev)) {
259 ppgtt->pte_encode = byt_pte_encode;
261 ppgtt->pte_encode = gen6_pte_encode;
263 ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
264 ppgtt->enable = gen6_ppgtt_enable;
265 ppgtt->clear_range = gen6_ppgtt_clear_range;
266 ppgtt->insert_entries = gen6_ppgtt_insert_entries;
267 ppgtt->cleanup = gen6_ppgtt_cleanup;
268 ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
270 if (!ppgtt->pt_pages)
273 for (i = 0; i < ppgtt->num_pd_entries; i++) {
274 ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
275 if (!ppgtt->pt_pages[i])
279 ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
281 if (!ppgtt->pt_dma_addr)
284 for (i = 0; i < ppgtt->num_pd_entries; i++) {
287 pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
288 PCI_DMA_BIDIRECTIONAL);
290 if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
295 ppgtt->pt_dma_addr[i] = pt_addr;
298 ppgtt->clear_range(ppgtt, 0,
299 ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
301 ppgtt->pd_offset = first_pd_entry_in_global_pt * sizeof(gen6_gtt_pte_t);
306 if (ppgtt->pt_dma_addr) {
307 for (i--; i >= 0; i--)
308 pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
309 4096, PCI_DMA_BIDIRECTIONAL);
312 kfree(ppgtt->pt_dma_addr);
313 for (i = 0; i < ppgtt->num_pd_entries; i++) {
314 if (ppgtt->pt_pages[i])
315 __free_page(ppgtt->pt_pages[i]);
317 kfree(ppgtt->pt_pages);
322 static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
324 struct drm_i915_private *dev_priv = dev->dev_private;
325 struct i915_hw_ppgtt *ppgtt;
328 ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
333 ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
335 if (INTEL_INFO(dev)->gen < 8)
336 ret = gen6_ppgtt_init(ppgtt);
343 dev_priv->mm.aliasing_ppgtt = ppgtt;
348 void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
356 ppgtt->cleanup(ppgtt);
357 dev_priv->mm.aliasing_ppgtt = NULL;
360 void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
361 struct drm_i915_gem_object *obj,
362 enum i915_cache_level cache_level)
364 ppgtt->insert_entries(ppgtt, obj->pages,
365 obj->gtt_space->start >> PAGE_SHIFT,
369 void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
370 struct drm_i915_gem_object *obj)
372 ppgtt->clear_range(ppgtt,
373 obj->gtt_space->start >> PAGE_SHIFT,
374 obj->base.size >> PAGE_SHIFT);
377 extern int intel_iommu_gfx_mapped;
378 /* Certain Gen5 chipsets require require idling the GPU before
379 * unmapping anything from the GTT when VT-d is enabled.
381 static inline bool needs_idle_maps(struct drm_device *dev)
383 #ifdef CONFIG_INTEL_IOMMU
384 /* Query intel_iommu to see if we need the workaround. Presumably that
387 if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
393 static bool do_idling(struct drm_i915_private *dev_priv)
395 bool ret = dev_priv->mm.interruptible;
397 if (unlikely(dev_priv->gtt.do_idle_maps)) {
398 dev_priv->mm.interruptible = false;
399 if (i915_gpu_idle(dev_priv->dev)) {
400 DRM_ERROR("Couldn't idle GPU\n");
401 /* Wait a bit, in hopes it avoids the hang */
409 static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
411 if (unlikely(dev_priv->gtt.do_idle_maps))
412 dev_priv->mm.interruptible = interruptible;
415 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
417 struct drm_i915_private *dev_priv = dev->dev_private;
418 struct drm_i915_gem_object *obj;
420 /* First fill our portion of the GTT with scratch pages */
421 dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
422 dev_priv->gtt.total / PAGE_SIZE);
424 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
425 i915_gem_clflush_object(obj);
426 i915_gem_gtt_bind_object(obj, obj->cache_level);
429 i915_gem_chipset_flush(dev);
432 int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
434 if (obj->has_dma_mapping)
437 if (!dma_map_sg(&obj->base.dev->pdev->dev,
438 obj->pages->sgl, obj->pages->nents,
439 PCI_DMA_BIDIRECTIONAL))
446 * Binds an object into the global gtt with the specified cache level. The object
447 * will be accessible to the GPU via commands whose operands reference offsets
448 * within the global GTT as well as accessible by the GPU through the GMADR
449 * mapped BAR (dev_priv->mm.gtt->gtt).
451 static void gen6_ggtt_insert_entries(struct drm_device *dev,
453 unsigned int first_entry,
454 enum i915_cache_level level)
456 struct drm_i915_private *dev_priv = dev->dev_private;
457 gen6_gtt_pte_t __iomem *gtt_entries =
458 (gen6_gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
460 struct sg_page_iter sg_iter;
463 for_each_sg_page(st->sgl, &sg_iter, st->nents, 0) {
464 addr = sg_page_iter_dma_address(&sg_iter);
465 iowrite32(dev_priv->gtt.pte_encode(dev, addr, level),
470 /* XXX: This serves as a posting read to make sure that the PTE has
471 * actually been updated. There is some concern that even though
472 * registers and PTEs are within the same BAR that they are potentially
473 * of NUMA access patterns. Therefore, even with the way we assume
474 * hardware should work, we must keep this posting read for paranoia.
477 WARN_ON(readl(>t_entries[i-1])
478 != dev_priv->gtt.pte_encode(dev, addr, level));
480 /* This next bit makes the above posting read even more important. We
481 * want to flush the TLBs only after we're certain all the PTE updates
484 I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
485 POSTING_READ(GFX_FLSH_CNTL_GEN6);
488 static void gen6_ggtt_clear_range(struct drm_device *dev,
489 unsigned int first_entry,
490 unsigned int num_entries)
492 struct drm_i915_private *dev_priv = dev->dev_private;
493 gen6_gtt_pte_t scratch_pte, __iomem *gtt_base =
494 (gen6_gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
495 const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
498 if (WARN(num_entries > max_entries,
499 "First entry = %d; Num entries = %d (max=%d)\n",
500 first_entry, num_entries, max_entries))
501 num_entries = max_entries;
503 scratch_pte = dev_priv->gtt.pte_encode(dev,
504 dev_priv->gtt.scratch_page_dma,
506 for (i = 0; i < num_entries; i++)
507 iowrite32(scratch_pte, >t_base[i]);
512 static void i915_ggtt_insert_entries(struct drm_device *dev,
514 unsigned int pg_start,
515 enum i915_cache_level cache_level)
517 unsigned int flags = (cache_level == I915_CACHE_NONE) ?
518 AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
520 intel_gtt_insert_sg_entries(st, pg_start, flags);
524 static void i915_ggtt_clear_range(struct drm_device *dev,
525 unsigned int first_entry,
526 unsigned int num_entries)
528 intel_gtt_clear_range(first_entry, num_entries);
532 void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
533 enum i915_cache_level cache_level)
535 struct drm_device *dev = obj->base.dev;
536 struct drm_i915_private *dev_priv = dev->dev_private;
538 dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
539 obj->gtt_space->start >> PAGE_SHIFT,
542 obj->has_global_gtt_mapping = 1;
545 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
547 struct drm_device *dev = obj->base.dev;
548 struct drm_i915_private *dev_priv = dev->dev_private;
550 dev_priv->gtt.gtt_clear_range(obj->base.dev,
551 obj->gtt_space->start >> PAGE_SHIFT,
552 obj->base.size >> PAGE_SHIFT);
554 obj->has_global_gtt_mapping = 0;
557 void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
559 struct drm_device *dev = obj->base.dev;
560 struct drm_i915_private *dev_priv = dev->dev_private;
563 interruptible = do_idling(dev_priv);
565 if (!obj->has_dma_mapping)
566 dma_unmap_sg(&dev->pdev->dev,
567 obj->pages->sgl, obj->pages->nents,
568 PCI_DMA_BIDIRECTIONAL);
570 undo_idling(dev_priv, interruptible);
573 static void i915_gtt_color_adjust(struct drm_mm_node *node,
575 unsigned long *start,
578 if (node->color != color)
581 if (!list_empty(&node->node_list)) {
582 node = list_entry(node->node_list.next,
585 if (node->allocated && node->color != color)
589 void i915_gem_setup_global_gtt(struct drm_device *dev,
591 unsigned long mappable_end,
594 /* Let GEM Manage all of the aperture.
596 * However, leave one page at the end still bound to the scratch page.
597 * There are a number of places where the hardware apparently prefetches
598 * past the end of the object, and we've seen multiple hangs with the
599 * GPU head pointer stuck in a batchbuffer bound at the last page of the
600 * aperture. One page should be enough to keep any prefetching inside
603 drm_i915_private_t *dev_priv = dev->dev_private;
604 struct drm_mm_node *entry;
605 struct drm_i915_gem_object *obj;
606 unsigned long hole_start, hole_end;
608 BUG_ON(mappable_end > end);
610 /* Subtract the guard page ... */
611 drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
613 dev_priv->mm.gtt_space.color_adjust = i915_gtt_color_adjust;
615 /* Mark any preallocated objects as occupied */
616 list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
617 DRM_DEBUG_KMS("reserving preallocated space: %x + %zx\n",
618 obj->gtt_offset, obj->base.size);
620 BUG_ON(obj->gtt_space != I915_GTT_RESERVED);
621 obj->gtt_space = drm_mm_create_block(&dev_priv->mm.gtt_space,
625 obj->has_global_gtt_mapping = 1;
628 dev_priv->gtt.start = start;
629 dev_priv->gtt.total = end - start;
631 /* Clear any non-preallocated blocks */
632 drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
633 hole_start, hole_end) {
634 DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
635 hole_start, hole_end);
636 dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
637 (hole_end-hole_start) / PAGE_SIZE);
640 /* And finally clear the reserved guard page */
641 dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
645 intel_enable_ppgtt(struct drm_device *dev)
647 if (i915_enable_ppgtt >= 0)
648 return i915_enable_ppgtt;
650 #ifdef CONFIG_INTEL_IOMMU
651 /* Disable ppgtt on SNB if VT-d is on. */
652 if (INTEL_INFO(dev)->gen == 6 && intel_iommu_gfx_mapped)
659 void i915_gem_init_global_gtt(struct drm_device *dev)
661 struct drm_i915_private *dev_priv = dev->dev_private;
662 unsigned long gtt_size, mappable_size;
664 gtt_size = dev_priv->gtt.total;
665 mappable_size = dev_priv->gtt.mappable_end;
667 if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
670 if (INTEL_INFO(dev)->gen <= 7) {
671 /* PPGTT pdes are stolen from global gtt ptes, so shrink the
672 * aperture accordingly when using aliasing ppgtt. */
673 gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
676 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
678 ret = i915_gem_init_aliasing_ppgtt(dev);
682 DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
683 drm_mm_takedown(&dev_priv->mm.gtt_space);
684 gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
686 i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
689 static int setup_scratch_page(struct drm_device *dev)
691 struct drm_i915_private *dev_priv = dev->dev_private;
695 page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
699 set_pages_uc(page, 1);
701 #ifdef CONFIG_INTEL_IOMMU
702 dma_addr = pci_map_page(dev->pdev, page, 0, PAGE_SIZE,
703 PCI_DMA_BIDIRECTIONAL);
704 if (pci_dma_mapping_error(dev->pdev, dma_addr))
707 dma_addr = page_to_phys(page);
709 dev_priv->gtt.scratch_page = page;
710 dev_priv->gtt.scratch_page_dma = dma_addr;
715 static void teardown_scratch_page(struct drm_device *dev)
717 struct drm_i915_private *dev_priv = dev->dev_private;
718 set_pages_wb(dev_priv->gtt.scratch_page, 1);
719 pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
720 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
721 put_page(dev_priv->gtt.scratch_page);
722 __free_page(dev_priv->gtt.scratch_page);
725 static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
727 snb_gmch_ctl >>= SNB_GMCH_GGMS_SHIFT;
728 snb_gmch_ctl &= SNB_GMCH_GGMS_MASK;
729 return snb_gmch_ctl << 20;
732 static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
734 snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
735 snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
736 return snb_gmch_ctl << 25; /* 32 MB units */
739 static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
741 static const int stolen_decoder[] = {
742 0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
743 snb_gmch_ctl >>= IVB_GMCH_GMS_SHIFT;
744 snb_gmch_ctl &= IVB_GMCH_GMS_MASK;
745 return stolen_decoder[snb_gmch_ctl] << 20;
748 static int gen6_gmch_probe(struct drm_device *dev,
751 phys_addr_t *mappable_base,
752 unsigned long *mappable_end)
754 struct drm_i915_private *dev_priv = dev->dev_private;
755 phys_addr_t gtt_bus_addr;
756 unsigned int gtt_size;
760 *mappable_base = pci_resource_start(dev->pdev, 2);
761 *mappable_end = pci_resource_len(dev->pdev, 2);
763 /* 64/512MB is the current min/max we actually know of, but this is just
764 * a coarse sanity check.
766 if ((*mappable_end < (64<<20) || (*mappable_end > (512<<20)))) {
767 DRM_ERROR("Unknown GMADR size (%lx)\n",
768 dev_priv->gtt.mappable_end);
772 if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
773 pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
774 pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
775 gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
777 if (IS_GEN7(dev) && !IS_VALLEYVIEW(dev))
778 *stolen = gen7_get_stolen_size(snb_gmch_ctl);
780 *stolen = gen6_get_stolen_size(snb_gmch_ctl);
782 *gtt_total = (gtt_size / sizeof(gen6_gtt_pte_t)) << PAGE_SHIFT;
784 /* For Modern GENs the PTEs and register space are split in the BAR */
785 gtt_bus_addr = pci_resource_start(dev->pdev, 0) +
786 (pci_resource_len(dev->pdev, 0) / 2);
788 dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
789 if (!dev_priv->gtt.gsm) {
790 DRM_ERROR("Failed to map the gtt page table\n");
794 ret = setup_scratch_page(dev);
796 DRM_ERROR("Scratch setup failed\n");
798 dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
799 dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
804 static void gen6_gmch_remove(struct drm_device *dev)
806 struct drm_i915_private *dev_priv = dev->dev_private;
807 iounmap(dev_priv->gtt.gsm);
808 teardown_scratch_page(dev_priv->dev);
811 static int i915_gmch_probe(struct drm_device *dev,
814 phys_addr_t *mappable_base,
815 unsigned long *mappable_end)
817 struct drm_i915_private *dev_priv = dev->dev_private;
820 ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
822 DRM_ERROR("failed to set up gmch\n");
826 intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
828 dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
829 dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
830 dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
835 static void i915_gmch_remove(struct drm_device *dev)
840 int i915_gem_gtt_init(struct drm_device *dev)
842 struct drm_i915_private *dev_priv = dev->dev_private;
843 struct i915_gtt *gtt = &dev_priv->gtt;
846 if (INTEL_INFO(dev)->gen <= 5) {
847 dev_priv->gtt.gtt_probe = i915_gmch_probe;
848 dev_priv->gtt.gtt_remove = i915_gmch_remove;
850 dev_priv->gtt.gtt_probe = gen6_gmch_probe;
851 dev_priv->gtt.gtt_remove = gen6_gmch_remove;
852 if (IS_VALLEYVIEW(dev)) {
853 dev_priv->gtt.pte_encode = byt_pte_encode;
855 dev_priv->gtt.pte_encode = gen6_pte_encode;
859 ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
860 &dev_priv->gtt.stolen_size,
866 /* GMADR is the PCI mmio aperture into the global GTT. */
867 DRM_INFO("Memory usable by graphics device = %zdM\n",
868 dev_priv->gtt.total >> 20);
869 DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
870 dev_priv->gtt.mappable_end >> 20);
871 DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
872 dev_priv->gtt.stolen_size >> 20);