2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uk>
30 #include <drm/i915_drm.h>
34 * The BIOS typically reserves some of the system's memory for the exclusive
35 * use of the integrated graphics. This memory is no longer available for
36 * use by the OS and so the user finds that his system has less memory
37 * available than he put in. We refer to this memory as stolen.
39 * The BIOS will allocate its framebuffer from the stolen memory. Our
40 * goal is try to reuse that object for our own fbcon which must always
41 * be available for panics. Anything else we can reuse the stolen memory
45 int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv,
46 struct drm_mm_node *node, u64 size,
47 unsigned alignment, u64 start, u64 end)
51 if (!drm_mm_initialized(&dev_priv->mm.stolen))
54 mutex_lock(&dev_priv->mm.stolen_lock);
55 ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size,
56 alignment, start, end,
57 DRM_MM_SEARCH_DEFAULT);
58 mutex_unlock(&dev_priv->mm.stolen_lock);
63 int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
64 struct drm_mm_node *node, u64 size,
67 return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
69 dev_priv->gtt.stolen_usable_size);
72 void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
73 struct drm_mm_node *node)
75 mutex_lock(&dev_priv->mm.stolen_lock);
76 drm_mm_remove_node(node);
77 mutex_unlock(&dev_priv->mm.stolen_lock);
80 static unsigned long i915_stolen_to_physical(struct drm_device *dev)
82 struct drm_i915_private *dev_priv = dev->dev_private;
86 /* Almost universally we can find the Graphics Base of Stolen Memory
87 * at offset 0x5c in the igfx configuration space. On a few (desktop)
88 * machines this is also mirrored in the bridge device at different
89 * locations, or in the MCHBAR. On gen2, the layout is again slightly
90 * different with the Graphics Segment immediately following Top of
91 * Memory (or Top of Usable DRAM). Note it appears that TOUD is only
92 * reported by 865g, so we just use the top of memory as determined
95 * XXX However gen2 requires an unavailable symbol.
98 if (INTEL_INFO(dev)->gen >= 3) {
99 /* Read Graphics Base of Stolen Memory directly */
100 pci_read_config_dword(dev->pdev, 0x5c, &base);
101 base &= ~((1<<20) - 1);
104 /* Stolen is immediately above Top of Memory */
105 base = max_low_pfn_mapped << PAGE_SHIFT;
112 /* make sure we don't clobber the GTT if it's within stolen memory */
113 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
117 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
118 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
120 u64 gtt_start, gtt_end;
122 gtt_start = I915_READ(PGTBL_CTL);
124 gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
125 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
127 gtt_start &= PGTBL_ADDRESS_LO_MASK;
128 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
130 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
131 stolen[0].end = gtt_start;
132 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
133 stolen[1].start = gtt_end;
135 /* pick the larger of the two chunks */
136 if (stolen[0].end - stolen[0].start >
137 stolen[1].end - stolen[1].start) {
138 base = stolen[0].start;
139 dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
141 base = stolen[1].start;
142 dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
145 if (stolen[0].start != stolen[1].start ||
146 stolen[0].end != stolen[1].end) {
147 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
148 (unsigned long long) gtt_start,
149 (unsigned long long) gtt_end - 1);
150 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
151 base, base + (u32) dev_priv->gtt.stolen_size - 1);
156 /* Verify that nothing else uses this physical address. Stolen
157 * memory should be reserved by the BIOS and hidden from the
158 * kernel. So if the region is already marked as busy, something
159 * is seriously wrong.
161 r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
162 "Graphics Stolen Memory");
165 * One more attempt but this time requesting region from
166 * base + 1, as we have seen that this resolves the region
167 * conflict with the PCI Bus.
168 * This is a BIOS w/a: Some BIOS wrap stolen in the root
169 * PCI bus, but have an off-by-one error. Hence retry the
170 * reservation starting from 1 instead of 0.
172 r = devm_request_mem_region(dev->dev, base + 1,
173 dev_priv->gtt.stolen_size - 1,
174 "Graphics Stolen Memory");
176 * GEN3 firmware likes to smash pci bridges into the stolen
177 * range. Apparently this works.
179 if (r == NULL && !IS_GEN3(dev)) {
180 DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
181 base, base + (uint32_t)dev_priv->gtt.stolen_size);
189 void i915_gem_cleanup_stolen(struct drm_device *dev)
191 struct drm_i915_private *dev_priv = dev->dev_private;
193 if (!drm_mm_initialized(&dev_priv->mm.stolen))
196 drm_mm_takedown(&dev_priv->mm.stolen);
199 static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
200 unsigned long *base, unsigned long *size)
202 uint32_t reg_val = I915_READ(IS_GM45(dev_priv) ?
203 CTG_STOLEN_RESERVED :
204 ELK_STOLEN_RESERVED);
205 unsigned long stolen_top = dev_priv->mm.stolen_base +
206 dev_priv->gtt.stolen_size;
208 *base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
210 WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
212 /* On these platforms, the register doesn't have a size field, so the
213 * size is the distance between the base and the top of the stolen
214 * memory. We also have the genuine case where base is zero and there's
215 * nothing reserved. */
219 *size = stolen_top - *base;
222 static void gen6_get_stolen_reserved(struct drm_i915_private *dev_priv,
223 unsigned long *base, unsigned long *size)
225 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
227 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
229 switch (reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK) {
230 case GEN6_STOLEN_RESERVED_1M:
233 case GEN6_STOLEN_RESERVED_512K:
236 case GEN6_STOLEN_RESERVED_256K:
239 case GEN6_STOLEN_RESERVED_128K:
244 MISSING_CASE(reg_val & GEN6_STOLEN_RESERVED_SIZE_MASK);
248 static void gen7_get_stolen_reserved(struct drm_i915_private *dev_priv,
249 unsigned long *base, unsigned long *size)
251 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
253 *base = reg_val & GEN7_STOLEN_RESERVED_ADDR_MASK;
255 switch (reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK) {
256 case GEN7_STOLEN_RESERVED_1M:
259 case GEN7_STOLEN_RESERVED_256K:
264 MISSING_CASE(reg_val & GEN7_STOLEN_RESERVED_SIZE_MASK);
268 static void gen8_get_stolen_reserved(struct drm_i915_private *dev_priv,
269 unsigned long *base, unsigned long *size)
271 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
273 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
275 switch (reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK) {
276 case GEN8_STOLEN_RESERVED_1M:
279 case GEN8_STOLEN_RESERVED_2M:
280 *size = 2 * 1024 * 1024;
282 case GEN8_STOLEN_RESERVED_4M:
283 *size = 4 * 1024 * 1024;
285 case GEN8_STOLEN_RESERVED_8M:
286 *size = 8 * 1024 * 1024;
289 *size = 8 * 1024 * 1024;
290 MISSING_CASE(reg_val & GEN8_STOLEN_RESERVED_SIZE_MASK);
294 static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
295 unsigned long *base, unsigned long *size)
297 uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
298 unsigned long stolen_top;
300 stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
302 *base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
304 /* On these platforms, the register doesn't have a size field, so the
305 * size is the distance between the base and the top of the stolen
306 * memory. We also have the genuine case where base is zero and there's
307 * nothing reserved. */
311 *size = stolen_top - *base;
314 int i915_gem_init_stolen(struct drm_device *dev)
316 struct drm_i915_private *dev_priv = dev->dev_private;
317 unsigned long reserved_total, reserved_base = 0, reserved_size;
318 unsigned long stolen_top;
320 mutex_init(&dev_priv->mm.stolen_lock);
322 #ifdef CONFIG_INTEL_IOMMU
323 if (intel_iommu_gfx_mapped && INTEL_INFO(dev)->gen < 8) {
324 DRM_INFO("DMAR active, disabling use of stolen memory\n");
329 if (dev_priv->gtt.stolen_size == 0)
332 dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
333 if (dev_priv->mm.stolen_base == 0)
336 stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
338 switch (INTEL_INFO(dev_priv)->gen) {
344 g4x_get_stolen_reserved(dev_priv, &reserved_base,
348 /* Assume the gen6 maximum for the older platforms. */
349 reserved_size = 1024 * 1024;
350 reserved_base = stolen_top - reserved_size;
353 gen6_get_stolen_reserved(dev_priv, &reserved_base,
357 gen7_get_stolen_reserved(dev_priv, &reserved_base,
361 if (IS_BROADWELL(dev_priv) || IS_SKYLAKE(dev_priv))
362 bdw_get_stolen_reserved(dev_priv, &reserved_base,
365 gen8_get_stolen_reserved(dev_priv, &reserved_base,
370 /* It is possible for the reserved base to be zero, but the register
371 * field for size doesn't have a zero option. */
372 if (reserved_base == 0) {
374 reserved_base = stolen_top;
377 if (reserved_base < dev_priv->mm.stolen_base ||
378 reserved_base + reserved_size > stolen_top) {
379 DRM_DEBUG_KMS("Stolen reserved area [0x%08lx - 0x%08lx] outside stolen memory [0x%08lx - 0x%08lx]\n",
380 reserved_base, reserved_base + reserved_size,
381 dev_priv->mm.stolen_base, stolen_top);
385 /* It is possible for the reserved area to end before the end of stolen
386 * memory, so just consider the start. */
387 reserved_total = stolen_top - reserved_base;
389 DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
390 dev_priv->gtt.stolen_size >> 10,
391 (dev_priv->gtt.stolen_size - reserved_total) >> 10);
393 dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
396 /* Basic memrange allocator for stolen space */
397 drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
402 static struct sg_table *
403 i915_pages_create_for_stolen(struct drm_device *dev,
404 u32 offset, u32 size)
406 struct drm_i915_private *dev_priv = dev->dev_private;
408 struct scatterlist *sg;
410 DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
411 BUG_ON(offset > dev_priv->gtt.stolen_size - size);
413 /* We hide that we have no struct page backing our stolen object
414 * by wrapping the contiguous physical allocation with a fake
415 * dma mapping in a single scatterlist.
418 st = kmalloc(sizeof(*st), GFP_KERNEL);
422 if (sg_alloc_table(st, 1, GFP_KERNEL)) {
431 sg_dma_address(sg) = (dma_addr_t)dev_priv->mm.stolen_base + offset;
432 sg_dma_len(sg) = size;
437 static int i915_gem_object_get_pages_stolen(struct drm_i915_gem_object *obj)
443 static void i915_gem_object_put_pages_stolen(struct drm_i915_gem_object *obj)
445 /* Should only be called during free */
446 sg_free_table(obj->pages);
452 i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
454 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
457 i915_gem_stolen_remove_node(dev_priv, obj->stolen);
462 static const struct drm_i915_gem_object_ops i915_gem_object_stolen_ops = {
463 .get_pages = i915_gem_object_get_pages_stolen,
464 .put_pages = i915_gem_object_put_pages_stolen,
465 .release = i915_gem_object_release_stolen,
468 static struct drm_i915_gem_object *
469 _i915_gem_object_create_stolen(struct drm_device *dev,
470 struct drm_mm_node *stolen)
472 struct drm_i915_gem_object *obj;
474 obj = i915_gem_object_alloc(dev);
478 drm_gem_private_object_init(dev, &obj->base, stolen->size);
479 i915_gem_object_init(obj, &i915_gem_object_stolen_ops);
481 obj->pages = i915_pages_create_for_stolen(dev,
482 stolen->start, stolen->size);
483 if (obj->pages == NULL)
486 i915_gem_object_pin_pages(obj);
487 obj->stolen = stolen;
489 obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
490 obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;
495 i915_gem_object_free(obj);
499 struct drm_i915_gem_object *
500 i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
502 struct drm_i915_private *dev_priv = dev->dev_private;
503 struct drm_i915_gem_object *obj;
504 struct drm_mm_node *stolen;
507 if (!drm_mm_initialized(&dev_priv->mm.stolen))
510 DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
514 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
518 ret = i915_gem_stolen_insert_node(dev_priv, stolen, size, 4096);
524 obj = _i915_gem_object_create_stolen(dev, stolen);
528 i915_gem_stolen_remove_node(dev_priv, stolen);
533 struct drm_i915_gem_object *
534 i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
539 struct drm_i915_private *dev_priv = dev->dev_private;
540 struct i915_address_space *ggtt = &dev_priv->gtt.base;
541 struct drm_i915_gem_object *obj;
542 struct drm_mm_node *stolen;
543 struct i915_vma *vma;
546 if (!drm_mm_initialized(&dev_priv->mm.stolen))
549 DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
550 stolen_offset, gtt_offset, size);
552 /* KISS and expect everything to be page-aligned */
553 if (WARN_ON(size == 0) || WARN_ON(size & 4095) ||
554 WARN_ON(stolen_offset & 4095))
557 stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
561 stolen->start = stolen_offset;
563 mutex_lock(&dev_priv->mm.stolen_lock);
564 ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
565 mutex_unlock(&dev_priv->mm.stolen_lock);
567 DRM_DEBUG_KMS("failed to allocate stolen space\n");
572 obj = _i915_gem_object_create_stolen(dev, stolen);
574 DRM_DEBUG_KMS("failed to allocate stolen object\n");
575 i915_gem_stolen_remove_node(dev_priv, stolen);
580 /* Some objects just need physical mem from stolen space */
581 if (gtt_offset == I915_GTT_OFFSET_NONE)
584 vma = i915_gem_obj_lookup_or_create_vma(obj, ggtt);
590 /* To simplify the initialisation sequence between KMS and GTT,
591 * we allow construction of the stolen object prior to
592 * setting up the GTT space. The actual reservation will occur
595 vma->node.start = gtt_offset;
596 vma->node.size = size;
597 if (drm_mm_initialized(&ggtt->mm)) {
598 ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
600 DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
604 vma->bound |= GLOBAL_BIND;
605 list_add_tail(&vma->mm_list, &ggtt->inactive_list);
608 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
609 i915_gem_object_pin_pages(obj);
614 drm_gem_object_unreference(&obj->base);