Framebuffer device support for the Tegra display controller.
config TEGRA_NVMAP
- bool "Tegra GPU memory management driver"
- select ARM_ATTRIB_ALLOCATOR
+ bool "Tegra GPU memory management driver (nvmap)"
default y
help
Say Y here to include the memory management driver for the Tegra
GPU, multimedia and display subsystems
config NVMAP_RECLAIM_UNPINNED_VM
- bool "Allow /dev/nvmap to reclaim unpinned I/O virtual memory"
+ bool "Virtualize IOVMM memory in nvmap"
depends on TEGRA_NVMAP && TEGRA_IOVMM
default y
help
- Say Y here to enable /dev/nvmap to reclaim I/O virtual memory after
- it has been unpinned, and re-use it for other objects. This can
+ Say Y here to enable nvmap to reclaim I/O virtual memory after
+ it has been unpinned, and re-use it for other handles. This can
allow a larger virtual I/O VM space than would normally be
supported by the hardware, at a slight cost in performance.
+config NVMAP_ALLOW_SYSMEM
+ bool "Allow physical system memory to be used by nvmap"
+ depends on TEGRA_NVMAP
+ default y
+ help
+ Say Y here to allow nvmap to use physical system memory (i.e.,
+ shared with the operating system but not translated through
+ an IOVMM device) for allocations.
+
+config NVMAP_HIGHMEM_ONLY
+ bool "Use only HIGHMEM for nvmap"
+ depends on TEGRA_NVMAP && (NVMAP_ALLOW_SYSMEM || TEGRA_IOVMM) && HIGHMEM
+ default n
+ help
+ Say Y here to restrict nvmap system memory allocations (both
+ physical system memory and IOVMM) to just HIGHMEM pages.
+
endif
prot = nvmap_pgprot(h, pgprot_kernel);
- if (h->heap_pgalloc && h->pgalloc.contig &&
- !PageHighMem(h->pgalloc.pages[0]))
- return page_address(h->pgalloc.pages[0]);
- else if (h->heap_pgalloc)
+ if (h->heap_pgalloc)
return vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT,
-1, prot);
h = ref->handle;
- if (h->heap_pgalloc && (!h->pgalloc.contig ||
- PageHighMem(h->pgalloc.pages[0]))) {
+ if (h->heap_pgalloc) {
vm_unmap_ram(addr, h->size >> PAGE_SHIFT);
- } else if (!h->heap_pgalloc) {
+ } else {
struct vm_struct *vm;
addr -= (h->carveout->base & ~PAGE_MASK);
vm = remove_vm_area(addr);
#include <linux/slab.h>
#include <linux/vmalloc.h>
-#include <asm/attrib_alloc.h>
+#include <asm/cacheflush.h>
+#include <asm/outercache.h>
#include <asm/pgtable.h>
#include <mach/iovmm.h>
#include "nvmap_mru.h"
#define NVMAP_SECURE_HEAPS (NVMAP_HEAP_CARVEOUT_IRAM | NVMAP_HEAP_IOVMM)
+#ifdef CONFIG_NVMAP_HIGHMEM_ONLY
#define GFP_NVMAP (__GFP_HIGHMEM | __GFP_NOWARN)
+#else
+#define GFP_NVMAP (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
+#endif
/* handles may be arbitrarily large (16+MiB), and any handle allocated from
* the kernel (i.e., not a carveout handle) includes its array of pages. to
* preserve kmalloc space, if the array of pages exceeds PAGELIST_VMALLOC_MIN,
tegra_iovmm_free_vm(h->pgalloc.area);
for (i = 0; i < nr_page; i++)
- arm_attrib_free_page(h->pgalloc.pages[i]);
+ __free_page(h->pgalloc.pages[i]);
altfree(h->pgalloc.pages, nr_page * sizeof(struct page *));
nvmap_client_put(client);
}
+extern void __flush_dcache_page(struct address_space *, struct page *);
+
+static struct page *nvmap_alloc_pages_exact(gfp_t gfp, size_t size)
+{
+ struct page *page, *p, *e;
+ unsigned int order;
+ unsigned long base;
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ split_page(page, order);
+
+ e = page + (1 << order);
+ for (p = page + (size >> PAGE_SHIFT); p < e; p++)
+ __free_page(p);
+
+ e = page + (size >> PAGE_SHIFT);
+ for (p = page; p < e; p++)
+ __flush_dcache_page(page_mapping(p), p);
+
+ base = page_to_phys(page);
+ outer_flush_range(base, base + size);
+ return page;
+}
+
static int handle_page_alloc(struct nvmap_client *client,
struct nvmap_handle *h, bool contiguous)
{
h->pgalloc.area = NULL;
if (contiguous) {
struct page *page;
- page = arm_attrib_alloc_pages_exact(GFP_NVMAP, size, prot);
+ page = nvmap_alloc_pages_exact(GFP_NVMAP, size);
+ if (!page)
+ goto fail;
for (i = 0; i < nr_page; i++)
pages[i] = nth_page(page, i);
} else {
for (i = 0; i < nr_page; i++) {
- pages[i] = arm_attrib_alloc_page(GFP_NVMAP, prot);
+ pages[i] = nvmap_alloc_pages_exact(GFP_NVMAP, PAGE_SIZE);
if (!pages[i])
goto fail;
}
fail:
while (i--)
- arm_attrib_free_page(pages[i]);
+ __free_page(pages[i]);
altfree(pages, nr_page * sizeof(*pages));
return -ENOMEM;
}
* sub-page splinters */
static const unsigned int heap_policy_small[] = {
NVMAP_HEAP_CARVEOUT_IRAM,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
NVMAP_HEAP_SYSMEM,
+#endif
NVMAP_HEAP_CARVEOUT_MASK,
NVMAP_HEAP_IOVMM,
0,
NVMAP_HEAP_CARVEOUT_IRAM,
NVMAP_HEAP_IOVMM,
NVMAP_HEAP_CARVEOUT_MASK,
+#ifdef CONFIG_NVMAP_ALLOW_SYSMEM
NVMAP_HEAP_SYSMEM,
+#endif
0,
};