mcr p15, 0, r0, c3, c0, 0 @ domain access register
mov32 r0, 0xff0a89a8
- mov32 r1, 0xc0e0c4e0
+ mov32 r1, 0x40e044e0
mcr p15, 0, r0, c10, c2, 0 @ PRRR
mcr p15, 0, r1, c10, c2, 1 @ NMRR
mrc p15, 0, r0, c1, c0, 0
int dirty;
struct tegra_dc *dc;
-
- unsigned long cur_handle;
};
#define TEGRA_WIN_FLAG_ENABLED (1 << 0)
void tegra_dc_enable(struct tegra_dc *dc);
void tegra_dc_disable(struct tegra_dc *dc);
-u32 tegra_dc_get_syncpt_id(struct tegra_dc *dc);
-u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc);
-void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, u32 val);
-
/* tegra_dc_update_windows and tegra_dc_sync_windows do not support windows
* with differenct dcs in one call
*/
default n
help
Enable NVOS driver
-
-config TEGRA_NVMAP
- bool "Enable NVMAP driver"
- default n
- help
- Enable NVMAP driver
-
-config TEGRA_IOVMM
- bool "Enable IOVMM driver"
- default n
- help
- Enable IOVMM driver
obj-$(CONFIG_TEGRA_NVOS) += nvos_user.o
obj-$(CONFIG_TEGRA_NVOS) += nvos/
-
-obj-$(CONFIG_TEGRA_NVMAP) += nvmap.o
+++ /dev/null
-/*
- * arch/arm/mach-tegra/include/linux/nvmem_ioctl.h
- *
- * structure declarations for nvmem and nvmap user-space ioctls
- *
- * Copyright (c) 2009, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/ioctl.h>
-
-#if !defined(__KERNEL__)
-#define __user
-#endif
-
-#ifndef _MACH_TEGRA_NVMEM_IOCTL_H_
-#define _MACH_TEGRA_NVMEM_IOCTL_H_
-
-struct nvmem_create_handle {
- union {
- __u32 key; /* ClaimPreservedHandle */
- __u32 id; /* FromId */
- __u32 size; /* CreateHandle */
- };
- __u32 handle;
-};
-
-#define NVMEM_HEAP_SYSMEM (1ul<<31)
-#define NVMEM_HEAP_IOVMM (1ul<<30)
-
-/* common carveout heaps */
-#define NVMEM_HEAP_CARVEOUT_IRAM (1ul<<29)
-#define NVMEM_HEAP_CARVEOUT_GENERIC (1ul<<0)
-
-#define NVMEM_HEAP_CARVEOUT_MASK (NVMEM_HEAP_IOVMM - 1)
-
-#define NVMEM_HANDLE_UNCACHEABLE (0x0ul << 0)
-#define NVMEM_HANDLE_WRITE_COMBINE (0x1ul << 0)
-#define NVMEM_HANDLE_INNER_CACHEABLE (0x2ul << 0)
-#define NVMEM_HANDLE_CACHEABLE (0x3ul << 0)
-
-#define NVMEM_HANDLE_SECURE (0x1ul << 2)
-
-struct nvmem_alloc_handle {
- __u32 handle;
- __u32 heap_mask;
- __u32 flags;
- __u32 align;
-};
-
-struct nvmem_map_caller {
- __u32 handle; /* hmem */
- __u32 offset; /* offset into hmem; should be page-aligned */
- __u32 length; /* number of bytes to map */
- __u32 flags;
- unsigned long addr; /* user pointer */
-};
-
-struct nvmem_rw_handle {
- unsigned long addr; /* user pointer */
- __u32 handle; /* hmem */
- __u32 offset; /* offset into hmem */
- __u32 elem_size; /* individual atom size */
- __u32 hmem_stride; /* delta in bytes between atoms in hmem */
- __u32 user_stride; /* delta in bytes between atoms in user */
- __u32 count; /* number of atoms to copy */
-};
-
-struct nvmem_pin_handle {
- unsigned long handles; /* array of handles to pin/unpin */
- unsigned long addr; /* array of addresses to return */
- __u32 count; /* number of entries in handles */
-};
-
-struct nvmem_handle_param {
- __u32 handle;
- __u32 param;
- unsigned long result;
-};
-
-enum {
- NVMEM_HANDLE_PARAM_SIZE = 1,
- NVMEM_HANDLE_PARAM_ALIGNMENT,
- NVMEM_HANDLE_PARAM_BASE,
- NVMEM_HANDLE_PARAM_HEAP,
-};
-
-enum {
- NVMEM_CACHE_OP_WB = 0,
- NVMEM_CACHE_OP_INV,
- NVMEM_CACHE_OP_WB_INV,
-};
-
-struct nvmem_cache_op {
- unsigned long addr;
- __u32 handle;
- __u32 len;
- __s32 op;
-};
-
-#define NVMEM_IOC_MAGIC 'N'
-
-/* Creates a new memory handle. On input, the argument is the size of the new
- * handle; on return, the argument is the name of the new handle
- */
-#define NVMEM_IOC_CREATE _IOWR(NVMEM_IOC_MAGIC, 0, struct nvmem_create_handle)
-#define NVMEM_IOC_CLAIM _IOWR(NVMEM_IOC_MAGIC, 1, struct nvmem_create_handle)
-#define NVMEM_IOC_FROM_ID _IOWR(NVMEM_IOC_MAGIC, 2, struct nvmem_create_handle)
-
-/* Actually allocates memory for the specified handle */
-#define NVMEM_IOC_ALLOC _IOW (NVMEM_IOC_MAGIC, 3, struct nvmem_alloc_handle)
-
-/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
- */
-#define NVMEM_IOC_FREE _IO (NVMEM_IOC_MAGIC, 4)
-
-/* Maps the region of the specified handle into a user-provided virtual address
- * that was previously created via an mmap syscall on this fd */
-#define NVMEM_IOC_MMAP _IOWR(NVMEM_IOC_MAGIC, 5, struct nvmem_map_caller)
-
-/* Reads/writes data (possibly strided) from a user-provided buffer into the
- * hmem at the specified offset */
-#define NVMEM_IOC_WRITE _IOW (NVMEM_IOC_MAGIC, 6, struct nvmem_rw_handle)
-#define NVMEM_IOC_READ _IOW (NVMEM_IOC_MAGIC, 7, struct nvmem_rw_handle)
-
-#define NVMEM_IOC_PARAM _IOWR(NVMEM_IOC_MAGIC, 8, struct nvmem_handle_param)
-
-/* Pins a list of memory handles into IO-addressable memory (either IOVMM
- * space or physical memory, depending on the allocation), and returns the
- * address. Handles may be pinned recursively. */
-#define NVMEM_IOC_PIN_MULT _IOWR(NVMEM_IOC_MAGIC, 10, struct nvmem_pin_handle)
-#define NVMEM_IOC_UNPIN_MULT _IOW (NVMEM_IOC_MAGIC, 11, struct nvmem_pin_handle)
-
-#define NVMEM_IOC_CACHE _IOW (NVMEM_IOC_MAGIC, 12, struct nvmem_cache_op)
-
-/* Returns a global ID usable to allow a remote process to create a handle
- * reference to the same handle */
-#define NVMEM_IOC_GET_ID _IOWR(NVMEM_IOC_MAGIC, 13, struct nvmem_create_handle)
-
-#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_GET_ID))
-#endif
+++ /dev/null
-/*
- * drivers/char/nvmap.c
- *
- * Memory manager for Tegra GPU memory handles
- *
- * Copyright (c) 2009-2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#define NV_DEBUG 0
-
-#include <linux/vmalloc.h>
-#include <linux/module.h>
-#include <linux/bitmap.h>
-#include <linux/wait.h>
-#include <linux/miscdevice.h>
-#include <linux/platform_device.h>
-#include <linux/mm.h>
-#include <linux/mman.h>
-#include <linux/uaccess.h>
-#include <linux/backing-dev.h>
-#include <linux/device.h>
-#include <linux/highmem.h>
-#include <linux/smp_lock.h>
-#include <linux/pagemap.h>
-#include <linux/sched.h>
-#include <linux/io.h>
-#include <linux/rbtree.h>
-#include <linux/proc_fs.h>
-#include <linux/ctype.h>
-#include <linux/nvmap.h>
-#include <asm/tlbflush.h>
-#include <linux/dma-mapping.h>
-#include <asm/cacheflush.h>
-#include <mach/iovmm.h>
-#include "nvcommon.h"
-#include "nvrm_memmgr.h"
-#include "nvbootargs.h"
-
-
-#ifndef NVMAP_BASE
-#define NVMAP_BASE 0xFEE00000
-#define NVMAP_SIZE SZ_2M
-#endif
-
-#define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */
-#define pgprot_inner_writeback(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB)
-static void smp_dma_clean_range(const void *start, const void *end)
-{
- dmac_map_area(start, end - start, DMA_TO_DEVICE);
-}
-
-static void smp_dma_inv_range(const void *start, const void *end)
-{
- dmac_unmap_area(start, end - start, DMA_FROM_DEVICE);
-}
-
-static void smp_dma_flush_range(const void *start, const void *end)
-{
- dmac_flush_range(start, end);
-}
-
-static void nvmap_vma_open(struct vm_area_struct *vma);
-
-static void nvmap_vma_close(struct vm_area_struct *vma);
-
-static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-
-static int nvmap_open(struct inode *inode, struct file *filp);
-
-static int nvmap_release(struct inode *inode, struct file *file);
-
-static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma);
-
-static long nvmap_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg);
-
-static int nvmap_ioctl_getid(struct file *filp, void __user *arg);
-
-static int nvmap_ioctl_get_param(struct file *filp, void __user* arg);
-
-static int nvmap_ioctl_alloc(struct file *filp, void __user *arg);
-
-static int nvmap_ioctl_free(struct file *filp, unsigned long arg);
-
-static int nvmap_ioctl_create(struct file *filp,
- unsigned int cmd, void __user *arg);
-
-static int nvmap_ioctl_pinop(struct file *filp,
- bool is_pin, void __user *arg);
-
-static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg);
-
-static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg);
-
-static int nvmap_ioctl_rw_handle(struct file *filp, int is_read,
- void __user* arg);
-
-static struct backing_dev_info nvmap_bdi = {
- .ra_pages = 0,
- .capabilities = (BDI_CAP_NO_ACCT_AND_WRITEBACK |
- BDI_CAP_READ_MAP | BDI_CAP_WRITE_MAP),
-};
-
-#define NVMAP_PTE_OFFSET(x) (((unsigned long)(x) - NVMAP_BASE) >> PAGE_SHIFT)
-#define NVMAP_PTE_INDEX(x) (((unsigned long)(x) - NVMAP_BASE)>>PGDIR_SHIFT)
-#define NUM_NVMAP_PTES (NVMAP_SIZE >> PGDIR_SHIFT)
-#define NVMAP_END (NVMAP_BASE + NVMAP_SIZE)
-#define NVMAP_PAGES (NVMAP_SIZE >> PAGE_SHIFT)
-
-/* private nvmap_handle flag for pinning duplicate detection */
-#define NVMEM_HANDLE_VISITED (0x1ul << 31)
-
-/* Heaps to use for kernel allocs when no heap list supplied */
-#define NVMAP_KERNEL_DEFAULT_HEAPS (NVMEM_HEAP_SYSMEM | NVMEM_HEAP_CARVEOUT_GENERIC)
-
-/* Heaps for which secure allocations are allowed */
-#define NVMAP_SECURE_HEAPS (NVMEM_HEAP_CARVEOUT_IRAM | NVMEM_HEAP_IOVMM)
-
-static pte_t *nvmap_pte[NUM_NVMAP_PTES];
-static unsigned long nvmap_ptebits[NVMAP_PAGES/BITS_PER_LONG];
-
-static DEFINE_SPINLOCK(nvmap_ptelock);
-static DECLARE_WAIT_QUEUE_HEAD(nvmap_ptefull);
-
-/* used to lost the master tree of memory handles */
-static DEFINE_SPINLOCK(nvmap_handle_lock);
-
-/* only one task may be performing pin / unpin operations at once, to
- * prevent deadlocks caused by interleaved IOVMM re-allocations */
-static DEFINE_MUTEX(nvmap_pin_lock);
-
-/* queue of tasks which are blocking on pin, for IOVMM room */
-static DECLARE_WAIT_QUEUE_HEAD(nvmap_pin_wait);
-static struct rb_root nvmap_handles = RB_ROOT;
-
-static struct tegra_iovmm_client *nvmap_vm_client = NULL;
-
-/* default heap order policy */
-static unsigned int _nvmap_heap_policy (unsigned int heaps, int numpages)
-{
- static const unsigned int multipage_order[] = {
- NVMEM_HEAP_CARVEOUT_MASK,
- NVMEM_HEAP_SYSMEM,
- NVMEM_HEAP_IOVMM,
- 0
- };
- static const unsigned int singlepage_order[] = {
- NVMEM_HEAP_SYSMEM,
- NVMEM_HEAP_CARVEOUT_MASK,
- NVMEM_HEAP_IOVMM,
- 0
- };
- const unsigned int* order;
-
- if (numpages == 1)
- order = singlepage_order;
- else
- order = multipage_order;
-
- while (*order) {
- unsigned int h = (*order & heaps);
- if (h) return h;
- order++;
- }
- return 0;
-};
-
-/* first-fit linear allocator carveout heap manager */
-struct nvmap_mem_block {
- unsigned long base;
- size_t size;
- short next; /* next absolute (address-order) block */
- short prev; /* previous absolute (address-order) block */
- short next_free;
- short prev_free;
-};
-
-struct nvmap_carveout {
- unsigned short num_blocks;
- short spare_index;
- short free_index;
- short block_index;
- spinlock_t lock;
- const char *name;
- struct nvmap_mem_block *blocks;
-};
-
-enum {
- CARVEOUT_STAT_TOTAL_SIZE,
- CARVEOUT_STAT_FREE_SIZE,
- CARVEOUT_STAT_NUM_BLOCKS,
- CARVEOUT_STAT_FREE_BLOCKS,
- CARVEOUT_STAT_LARGEST_BLOCK,
- CARVEOUT_STAT_LARGEST_FREE,
- CARVEOUT_STAT_BASE,
-};
-
-static inline pgprot_t _nvmap_flag_to_pgprot(unsigned long flag, pgprot_t base)
-{
- switch (flag) {
- case NVMEM_HANDLE_UNCACHEABLE:
- base = pgprot_noncached(base);
- break;
- case NVMEM_HANDLE_WRITE_COMBINE:
- base = pgprot_writecombine(base);
- break;
- case NVMEM_HANDLE_INNER_CACHEABLE:
- base = pgprot_inner_writeback(base);
- break;
- }
- return base;
-}
-
-static unsigned long _nvmap_carveout_blockstat(struct nvmap_carveout *co,
- int stat)
-{
- unsigned long val = 0;
- short idx;
- spin_lock(&co->lock);
-
- if (stat==CARVEOUT_STAT_BASE) {
- if (co->block_index==-1)
- val = ~0;
- else
- val = co->blocks[co->block_index].base;
- spin_unlock(&co->lock);
- return val;
- }
-
- if (stat==CARVEOUT_STAT_TOTAL_SIZE ||
- stat==CARVEOUT_STAT_NUM_BLOCKS ||
- stat==CARVEOUT_STAT_LARGEST_BLOCK)
- idx = co->block_index;
- else
- idx = co->free_index;
-
- while (idx!=-1) {
- switch (stat) {
- case CARVEOUT_STAT_TOTAL_SIZE:
- val += co->blocks[idx].size;
- idx = co->blocks[idx].next;
- break;
- case CARVEOUT_STAT_NUM_BLOCKS:
- val++;
- idx = co->blocks[idx].next;
- break;
- case CARVEOUT_STAT_LARGEST_BLOCK:
- val = max_t(unsigned long, val, co->blocks[idx].size);
- idx = co->blocks[idx].next;
- break;
- case CARVEOUT_STAT_FREE_SIZE:
- val += co->blocks[idx].size;
- idx = co->blocks[idx].next_free;
- break;
- case CARVEOUT_STAT_FREE_BLOCKS:
- val ++;
- idx = co->blocks[idx].next_free;
- break;
- case CARVEOUT_STAT_LARGEST_FREE:
- val = max_t(unsigned long, val, co->blocks[idx].size);
- idx = co->blocks[idx].next_free;
- break;
- }
- }
-
- spin_unlock(&co->lock);
- return val;
-}
-
-#define co_is_free(_co, _idx) \
- ((_co)->free_index==(_idx) || ((_co)->blocks[(_idx)].prev_free!=-1))
-
-static int _nvmap_init_carveout(struct nvmap_carveout *co,
- const char *name, unsigned long base_address, size_t len)
-{
- unsigned int num_blocks;
- struct nvmap_mem_block *blocks = NULL;
- int i;
-
- num_blocks = min_t(unsigned int, len/1024, 1024);
- blocks = vmalloc(sizeof(*blocks)*num_blocks);
-
- if (!blocks) goto fail;
- co->name = kstrdup(name, GFP_KERNEL);
- if (!co->name) goto fail;
-
- for (i=1; i<num_blocks; i++) {
- blocks[i].next = i+1;
- blocks[i].prev = i-1;
- blocks[i].next_free = -1;
- blocks[i].prev_free = -1;
- }
- blocks[i-1].next = -1;
- blocks[1].prev = -1;
-
- blocks[0].next = blocks[0].prev = -1;
- blocks[0].next_free = blocks[0].prev_free = -1;
- blocks[0].base = base_address;
- blocks[0].size = len;
- co->blocks = blocks;
- co->num_blocks = num_blocks;
- spin_lock_init(&co->lock);
- co->block_index = 0;
- co->spare_index = 1;
- co->free_index = 0;
- return 0;
-
-fail:
- if (blocks) kfree(blocks);
- return -ENOMEM;
-}
-
-static int nvmap_get_spare(struct nvmap_carveout *co) {
- int idx;
-
- if (co->spare_index == -1)
- return -1;
-
- idx = co->spare_index;
- co->spare_index = co->blocks[idx].next;
- co->blocks[idx].next = -1;
- co->blocks[idx].prev = -1;
- co->blocks[idx].next_free = -1;
- co->blocks[idx].prev_free = -1;
- return idx;
-}
-
-#define BLOCK(_co, _idx) ((_idx)==-1 ? NULL : &(_co)->blocks[(_idx)])
-
-static void nvmap_zap_free(struct nvmap_carveout *co, int idx)
-{
- struct nvmap_mem_block *block;
-
- block = BLOCK(co, idx);
- if (block->prev_free != -1)
- BLOCK(co, block->prev_free)->next_free = block->next_free;
- else
- co->free_index = block->next_free;
-
- if (block->next_free != -1)
- BLOCK(co, block->next_free)->prev_free = block->prev_free;
-
- block->prev_free = -1;
- block->next_free = -1;
-}
-
-static void nvmap_split_block(struct nvmap_carveout *co,
- int idx, size_t start, size_t size)
-{
- if (BLOCK(co, idx)->base < start) {
- int spare_idx = nvmap_get_spare(co);
- struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
- struct nvmap_mem_block *block = BLOCK(co, idx);
- if (spare) {
- spare->size = start - block->base;
- spare->base = block->base;
- block->size -= (start - block->base);
- block->base = start;
- spare->next = idx;
- spare->prev = block->prev;
- block->prev = spare_idx;
- if (spare->prev != -1)
- co->blocks[spare->prev].next = spare_idx;
- else
- co->block_index = spare_idx;
- spare->prev_free = -1;
- spare->next_free = co->free_index;
- if (co->free_index != -1)
- co->blocks[co->free_index].prev_free = spare_idx;
- co->free_index = spare_idx;
- } else {
- if (block->prev != -1) {
- spare = BLOCK(co, block->prev);
- spare->size += start - block->base;
- block->base = start;
- }
- }
- }
-
- if (BLOCK(co, idx)->size > size) {
- int spare_idx = nvmap_get_spare(co);
- struct nvmap_mem_block *spare = BLOCK(co, spare_idx);
- struct nvmap_mem_block *block = BLOCK(co, idx);
- if (spare) {
- spare->base = block->base + size;
- spare->size = block->size - size;
- block->size = size;
- spare->prev = idx;
- spare->next = block->next;
- block->next = spare_idx;
- if (spare->next != -1)
- co->blocks[spare->next].prev = spare_idx;
- spare->prev_free = -1;
- spare->next_free = co->free_index;
- if (co->free_index != -1)
- co->blocks[co->free_index].prev_free = spare_idx;
- co->free_index = spare_idx;
- }
- }
-
- nvmap_zap_free(co, idx);
-}
-
-#define next_spare next
-#define prev_spare prev
-
-#define nvmap_insert_block(_list, _co, _idx) \
- do { \
- struct nvmap_mem_block *b = BLOCK((_co), (_idx)); \
- struct nvmap_mem_block *s = BLOCK((_co), (_co)->_list##_index);\
- if (s) s->prev_##_list = (_idx); \
- b->prev_##_list = -1; \
- b->next_##_list = (_co)->_list##_index; \
- (_co)->_list##_index = (_idx); \
- } while (0);
-
-static void nvmap_carveout_free(struct nvmap_carveout *co, int idx)
-{
- struct nvmap_mem_block *b;
-
- spin_lock(&co->lock);
-
- b = BLOCK(co, idx);
-
- if (b->next!=-1 && co_is_free(co, b->next)) {
- int zap = b->next;
- struct nvmap_mem_block *n = BLOCK(co, zap);
- b->size += n->size;
-
- b->next = n->next;
- if (n->next != -1) co->blocks[n->next].prev = idx;
-
- nvmap_zap_free(co, zap);
- nvmap_insert_block(spare, co, zap);
- }
-
- if (b->prev!=-1 && co_is_free(co, b->prev)) {
- int zap = b->prev;
- struct nvmap_mem_block *p = BLOCK(co, zap);
-
- b->base = p->base;
- b->size += p->size;
-
- b->prev = p->prev;
-
- if (p->prev != -1) co->blocks[p->prev].next = idx;
- else co->block_index = idx;
-
- nvmap_zap_free(co, zap);
- nvmap_insert_block(spare, co, zap);
- }
-
- nvmap_insert_block(free, co, idx);
- spin_unlock(&co->lock);
-}
-
-static int nvmap_carveout_alloc(struct nvmap_carveout *co,
- size_t align, size_t size)
-{
- short idx;
-
- spin_lock(&co->lock);
-
- idx = co->free_index;
-
- while (idx != -1) {
- struct nvmap_mem_block *b = BLOCK(co, idx);
- /* try to be a bit more clever about generating block-
- * droppings by comparing the results of a left-justified vs
- * right-justified block split, and choosing the
- * justification style which yields the largest remaining
- * block */
- size_t end = b->base + b->size;
- size_t ljust = (b->base + align - 1) & ~(align-1);
- size_t rjust = (end - size) & ~(align-1);
- size_t l_max, r_max;
-
- if (rjust < b->base) rjust = ljust;
- l_max = max_t(size_t, ljust - b->base, end - (ljust + size));
- r_max = max_t(size_t, rjust - b->base, end - (rjust + size));
-
- if (b->base + b->size >= ljust + size) {
- if (l_max >= r_max)
- nvmap_split_block(co, idx, ljust, size);
- else
- nvmap_split_block(co, idx, rjust, size);
- break;
- }
- idx = b->next_free;
- }
-
- spin_unlock(&co->lock);
- return idx;
-}
-
-#undef next_spare
-#undef prev_spare
-
-#define NVDA_POISON (('n'<<24) | ('v'<<16) | ('d'<<8) | ('a'))
-
-struct nvmap_handle {
- struct rb_node node;
- atomic_t ref;
- atomic_t pin;
- unsigned long flags;
- size_t size;
- size_t orig_size;
- struct task_struct *owner;
- unsigned int poison;
- union {
- struct {
- struct page **pages;
- struct tegra_iovmm_area *area;
- struct list_head mru_list;
- bool contig;
- bool dirty; /* IOVMM area allocated since last pin */
- } pgalloc;
- struct {
- struct nvmap_carveout *co_heap;
- int block_idx;
- unsigned long base;
- unsigned int key; /* preserved by bootloader */
- } carveout;
- };
- bool global;
- bool secure; /* only allocated in IOVM space, zapped on unpin */
- bool heap_pgalloc;
- bool alloc;
- void *kern_map; /* used for RM memmgr backwards compat */
-};
-
-/* handle_ref objects are file-descriptor-local references to nvmap_handle
- * objects. they track the number of references and pins performed by
- * the specific caller (since nvmap_handle objects may be global), so that
- * a client which terminates without properly unwinding all handles (or
- * all nested pins) can be unwound by nvmap. */
-struct nvmap_handle_ref {
- struct nvmap_handle *h;
- struct rb_node node;
- atomic_t refs;
- atomic_t pin;
-};
-
-struct nvmap_file_priv {
- struct rb_root handle_refs;
- atomic_t iovm_commit;
- size_t iovm_limit;
- spinlock_t ref_lock;
- bool su;
-};
-
-struct nvmap_carveout_node {
- struct device dev;
- struct list_head heap_list;
- unsigned int heap_bit;
- struct nvmap_carveout carveout;
-};
-
-/* the master structure for all nvmap-managed carveouts and all handle_ref
- * objects allocated inside the kernel. heaps are sorted by their heap_bit
- * (highest heap_bit first) so that carveout allocation will be first
- * attempted by the heap with the highest heap_bit set in the allocation's
- * heap mask */
-static struct {
- struct nvmap_file_priv init_data;
- struct rw_semaphore list_sem;
- struct list_head heaps;
-} nvmap_context;
-
-static struct vm_operations_struct nvmap_vma_ops = {
- .open = nvmap_vma_open,
- .close = nvmap_vma_close,
- .fault = nvmap_vma_fault,
-};
-
-const struct file_operations nvmap_fops = {
- .owner = THIS_MODULE,
- .open = nvmap_open,
- .release = nvmap_release,
- .unlocked_ioctl = nvmap_ioctl,
- .mmap = nvmap_mmap
-};
-
-const struct file_operations knvmap_fops = {
- .owner = THIS_MODULE,
- .open = nvmap_open,
- .release = nvmap_release,
- .unlocked_ioctl = nvmap_ioctl,
- .mmap = nvmap_mmap
-};
-
-struct nvmap_vma_priv {
- struct nvmap_handle *h;
- size_t offs;
- atomic_t ref;
-};
-
-static struct proc_dir_entry *nvmap_procfs_root;
-static struct proc_dir_entry *nvmap_procfs_proc;
-
-static void _nvmap_handle_free(struct nvmap_handle *h);
-
-#define NVMAP_CARVEOUT_ATTR_RO(_name) \
- struct device_attribute nvmap_heap_attr_##_name = \
- __ATTR(_name, S_IRUGO, _nvmap_sysfs_show_heap_##_name, NULL)
-
-#define NVMAP_CARVEOUT_ATTR_WO(_name, _mode) \
- struct device_attribute nvmap_heap_attr_##_name = \
- __ATTR(_name, _mode, NULL, _nvmap_sysfs_set_heap_##_name)
-
-
-static ssize_t _nvmap_sysfs_show_heap_usage(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%08x\n", c->heap_bit);
-}
-
-static ssize_t _nvmap_sysfs_show_heap_name(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%s\n", c->carveout.name);
-}
-
-static ssize_t _nvmap_sysfs_show_heap_base(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%08lx\n",
- _nvmap_carveout_blockstat(&c->carveout, CARVEOUT_STAT_BASE));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_free_size(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_FREE_SIZE));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_free_count(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_FREE_BLOCKS));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_free_max(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_LARGEST_FREE));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_total_count(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_NUM_BLOCKS));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_total_max(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_LARGEST_BLOCK));
-}
-
-static ssize_t _nvmap_sysfs_show_heap_total_size(struct device *d,
- struct device_attribute *attr, char *buf)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- return sprintf(buf, "%lu\n",
- _nvmap_carveout_blockstat(&c->carveout,
- CARVEOUT_STAT_TOTAL_SIZE));
-}
-
-static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
- const char *name, unsigned int new_bitmask);
-
-static ssize_t _nvmap_sysfs_set_heap_split(struct device *d,
- struct device_attribute *attr, const char * buf, size_t count)
-{
- struct nvmap_carveout_node *c = container_of(d,
- struct nvmap_carveout_node, dev);
- char *tmp, *local = kzalloc(count+1, GFP_KERNEL);
- char *sizestr = NULL, *bitmaskstr = NULL, *name = NULL;
- char **format[] = { &sizestr, &bitmaskstr, &name };
- char ***f_iter = format;
- unsigned int i;
- unsigned long size, bitmask;
- int err;
-
- if (!local) {
- pr_err("%s: unable to read string\n", __func__);
- return -ENOMEM;
- }
-
- memcpy(local, buf, count);
- tmp = local;
- for (i=0, **f_iter = local; i<count &&
- (f_iter - format)<ARRAY_SIZE(format)-1; i++) {
- if (local[i]==',') {
- local[i] = '\0';
- f_iter++;
- **f_iter = &local[i+1];
- }
- }
-
- if (!sizestr || !bitmaskstr || !name) {
- pr_err("%s: format error\n", __func__);
- kfree(tmp);
- return -EINVAL;
- }
-
- for (local=name; !isspace(*local); local++);
-
- if (local==name) {
- pr_err("%s: invalid name %s\n", __func__, name);
- kfree(tmp);
- return -EINVAL;
- }
-
- *local=0;
-
- size = memparse(sizestr, &sizestr);
- if (!size) {
- kfree(tmp);
- return -EINVAL;
- }
-
- if (strict_strtoul(bitmaskstr, 0, &bitmask)==-EINVAL) {
- kfree(tmp);
- return -EINVAL;
- }
-
- err = nvmap_split_carveout_heap(&c->carveout, size, name, bitmask);
-
- if (err) pr_err("%s: failed to create split heap %s\n", __func__, name);
- kfree(tmp);
- return err ? err : count;
-}
-
-static NVMAP_CARVEOUT_ATTR_RO(usage);
-static NVMAP_CARVEOUT_ATTR_RO(name);
-static NVMAP_CARVEOUT_ATTR_RO(base);
-static NVMAP_CARVEOUT_ATTR_RO(free_size);
-static NVMAP_CARVEOUT_ATTR_RO(free_count);
-static NVMAP_CARVEOUT_ATTR_RO(free_max);
-static NVMAP_CARVEOUT_ATTR_RO(total_size);
-static NVMAP_CARVEOUT_ATTR_RO(total_count);
-static NVMAP_CARVEOUT_ATTR_RO(total_max);
-static NVMAP_CARVEOUT_ATTR_WO(split, (S_IWUSR | S_IWGRP));
-
-static struct attribute *nvmap_heap_default_attrs[] = {
- &nvmap_heap_attr_usage.attr,
- &nvmap_heap_attr_name.attr,
- &nvmap_heap_attr_split.attr,
- &nvmap_heap_attr_base.attr,
- &nvmap_heap_attr_total_size.attr,
- &nvmap_heap_attr_free_size.attr,
- &nvmap_heap_attr_total_count.attr,
- &nvmap_heap_attr_free_count.attr,
- &nvmap_heap_attr_total_max.attr,
- &nvmap_heap_attr_free_max.attr,
- NULL
-};
-
-static struct attribute_group nvmap_heap_defattr_group = {
- .attrs = nvmap_heap_default_attrs
-};
-
-static struct device *__nvmap_heap_parent_dev(void);
-#define _nvmap_heap_parent_dev __nvmap_heap_parent_dev()
-
-/* unpinned I/O VMM areas may be reclaimed by nvmap to make room for
- * new surfaces. unpinned surfaces are stored in segregated linked-lists
- * sorted in most-recently-unpinned order (i.e., head insertion, head
- * removal */
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
-static DEFINE_SPINLOCK(nvmap_mru_vma_lock);
-static const size_t nvmap_mru_cutoff[] = {
- 262144, 393216, 786432, 1048576, 1572864
-};
-
-static struct list_head nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)];
-
-static inline struct list_head *_nvmap_list(size_t size)
-{
- unsigned int i;
-
- for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
- if (size <= nvmap_mru_cutoff[i]) return &nvmap_mru_vma_lists[i];
-
- return &nvmap_mru_vma_lists[ARRAY_SIZE(nvmap_mru_cutoff)-1];
-}
-#endif
-
-static inline struct nvmap_handle *_nvmap_handle_get(struct nvmap_handle *h)
-{
- if (unlikely(h->poison!=NVDA_POISON)) {
- pr_err("%s: %s getting poisoned handle\n", __func__,
- current->group_leader->comm);
- return NULL;
- } else if (unlikely(atomic_inc_return(&h->ref)<=1)) {
- pr_err("%s: %s getting a freed handle\n",
- __func__, current->group_leader->comm);
- return NULL;
- }
- return h;
-}
-
-static inline void _nvmap_handle_put(struct nvmap_handle *h)
-{
- int cnt = atomic_dec_return(&h->ref);
-
- if (unlikely(cnt<0)) {
- pr_err("%s: %s put to negative references\n",
- __func__, current->comm);
- dump_stack();
- } else if (!cnt) _nvmap_handle_free(h);
-}
-
-static struct nvmap_handle *_nvmap_claim_preserved(
- struct task_struct *new_owner, unsigned long key)
-{
- struct rb_node *n;
- struct nvmap_handle *b = NULL;
-
- if (!key) return NULL;
-
- spin_lock(&nvmap_handle_lock);
- n = rb_first(&nvmap_handles);
-
- while (n) {
- b = rb_entry(n, struct nvmap_handle, node);
- if (b->alloc && !b->heap_pgalloc && b->carveout.key == key) {
- b->carveout.key = 0;
- b->owner = new_owner;
- break;
- }
- b = NULL;
- n = rb_next(n);
- }
-
- spin_unlock(&nvmap_handle_lock);
- return b;
-}
-
-static struct nvmap_handle *_nvmap_validate_get(unsigned long handle, bool su)
-{
- struct nvmap_handle *b = NULL;
-
-#ifdef CONFIG_DEVNVMAP_PARANOID
- struct rb_node *n;
-
- spin_lock(&nvmap_handle_lock);
-
- n = nvmap_handles.rb_node;
-
- while (n) {
- b = rb_entry(n, struct nvmap_handle, node);
- if ((unsigned long)b == handle) {
- if (su || b->global || b->owner==current->group_leader)
- b = _nvmap_handle_get(b);
- else
- b = NULL;
- spin_unlock(&nvmap_handle_lock);
- return b;
- }
- if (handle > (unsigned long)b) n = n->rb_right;
- else n = n->rb_left;
- }
- spin_unlock(&nvmap_handle_lock);
- return NULL;
-#else
- if (!handle) return NULL;
- b = _nvmap_handle_get((struct nvmap_handle *)handle);
- return b;
-#endif
-}
-
-/* nvmap_mru_vma_lock should be acquired by the caller before calling this */
-static inline void _nvmap_insert_mru_vma(struct nvmap_handle *h)
-{
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- list_add(&h->pgalloc.mru_list, _nvmap_list(h->pgalloc.area->iovm_length));
-#endif
-}
-
-static void _nvmap_remove_mru_vma(struct nvmap_handle *h)
-{
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_lock(&nvmap_mru_vma_lock);
- if (!list_empty(&h->pgalloc.mru_list))
- list_del(&h->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
- INIT_LIST_HEAD(&h->pgalloc.mru_list);
-#endif
-}
-
-static struct tegra_iovmm_area *_nvmap_get_vm(struct nvmap_handle *h)
-{
-#ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- BUG_ON(!h->pgalloc.area);
- BUG_ON(h->size > h->pgalloc.area->iovm_length);
- BUG_ON((h->size | h->pgalloc.area->iovm_length) & ~PAGE_MASK);
- return h->pgalloc.area;
-#else
- struct list_head *mru;
- struct nvmap_handle *evict = NULL;
- struct tegra_iovmm_area *vm = NULL;
- unsigned int i, idx;
-
- if (h->pgalloc.area) {
- spin_lock(&nvmap_mru_vma_lock);
- BUG_ON(list_empty(&h->pgalloc.mru_list));
- list_del(&h->pgalloc.mru_list);
- INIT_LIST_HEAD(&h->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
- return h->pgalloc.area;
- }
-
- vm = tegra_iovmm_create_vm(nvmap_vm_client, NULL, h->size,
- _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
-
- if (vm) {
- INIT_LIST_HEAD(&h->pgalloc.mru_list);
- return vm;
- }
- /* attempt to re-use the most recently unpinned IOVMM area in the
- * same size bin as the current handle. If that fails, iteratively
- * evict handles (starting from the current bin) until an allocation
- * succeeds or no more areas can be evicted */
-
- spin_lock(&nvmap_mru_vma_lock);
- mru = _nvmap_list(h->size);
- if (!list_empty(mru))
- evict = list_first_entry(mru, struct nvmap_handle,
- pgalloc.mru_list);
- if (evict && evict->pgalloc.area->iovm_length >= h->size) {
- list_del(&evict->pgalloc.mru_list);
- vm = evict->pgalloc.area;
- evict->pgalloc.area = NULL;
- INIT_LIST_HEAD(&evict->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
- return vm;
- }
-
- idx = mru - nvmap_mru_vma_lists;
-
- for (i=0; i<ARRAY_SIZE(nvmap_mru_vma_lists) && !vm; i++, idx++) {
- if (idx >= ARRAY_SIZE(nvmap_mru_vma_lists))
- idx -= ARRAY_SIZE(nvmap_mru_vma_lists);
- mru = &nvmap_mru_vma_lists[idx];
- while (!list_empty(mru) && !vm) {
- evict = list_first_entry(mru, struct nvmap_handle,
- pgalloc.mru_list);
-
- BUG_ON(atomic_add_return(0, &evict->pin)!=0);
- BUG_ON(!evict->pgalloc.area);
- list_del(&evict->pgalloc.mru_list);
- INIT_LIST_HEAD(&evict->pgalloc.mru_list);
- spin_unlock(&nvmap_mru_vma_lock);
- tegra_iovmm_free_vm(evict->pgalloc.area);
- evict->pgalloc.area = NULL;
- vm = tegra_iovmm_create_vm(nvmap_vm_client,
- NULL, h->size,
- _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
- spin_lock(&nvmap_mru_vma_lock);
- }
- }
- spin_unlock(&nvmap_mru_vma_lock);
- return vm;
-#endif
-}
-
-static int _nvmap_do_cache_maint(struct nvmap_handle *h,
- unsigned long start, unsigned long end, unsigned long op, bool get);
-
-void _nvmap_handle_free(struct nvmap_handle *h)
-{
- int e;
- spin_lock(&nvmap_handle_lock);
-
- /* if 2 contexts call _get and _put simultaneously, the reference
- * count may drop to 0 and then increase to 1 before the handle
- * can be freed. */
- if (atomic_add_return(0, &h->ref)>0) {
- spin_unlock(&nvmap_handle_lock);
- return;
- }
- smp_rmb();
- BUG_ON(atomic_read(&h->ref)<0);
- BUG_ON(atomic_read(&h->pin)!=0);
-
- rb_erase(&h->node, &nvmap_handles);
-
- spin_unlock(&nvmap_handle_lock);
-
- if (h->owner) put_task_struct(h->owner);
-
- /* remove when NvRmMemMgr compatibility is eliminated */
- if (h->kern_map) {
- BUG_ON(!h->alloc);
- if (h->heap_pgalloc)
- vm_unmap_ram(h->kern_map, h->size>>PAGE_SHIFT);
- else {
- unsigned long addr = (unsigned long)h->kern_map;
- addr &= ~PAGE_MASK;
- iounmap((void *)addr);
- }
- }
-
- /* ensure that no stale data remains in the cache for this handle */
- e = _nvmap_do_cache_maint(h, 0, h->size, NVMEM_CACHE_OP_WB_INV, false);
-
- if (h->alloc && !h->heap_pgalloc)
- nvmap_carveout_free(h->carveout.co_heap, h->carveout.block_idx);
- else if (h->alloc) {
- unsigned int i;
- BUG_ON(h->size & ~PAGE_MASK);
- BUG_ON(!h->pgalloc.pages);
- _nvmap_remove_mru_vma(h);
- if (h->pgalloc.area) tegra_iovmm_free_vm(h->pgalloc.area);
- for (i=0; i<h->size>>PAGE_SHIFT; i++) {
- ClearPageReserved(h->pgalloc.pages[i]);
- __free_page(h->pgalloc.pages[i]);
- }
- if ((h->size>>PAGE_SHIFT)*sizeof(struct page*)>=PAGE_SIZE)
- vfree(h->pgalloc.pages);
- else
- kfree(h->pgalloc.pages);
- }
- h->poison = 0xa5a5a5a5;
- kfree(h);
-}
-
-#define nvmap_gfp (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
-
-/* map the backing pages for a heap_pgalloc handle into its IOVMM area */
-static void _nvmap_handle_iovmm_map(struct nvmap_handle *h)
-{
- tegra_iovmm_addr_t va;
- unsigned long i;
-
- BUG_ON(!h->heap_pgalloc || !h->pgalloc.area);
- BUG_ON(h->size & ~PAGE_MASK);
- WARN_ON(!h->pgalloc.dirty);
-
- for (va = h->pgalloc.area->iovm_start, i=0;
- va < (h->pgalloc.area->iovm_start + h->size);
- i++, va+=PAGE_SIZE) {
- BUG_ON(!pfn_valid(page_to_pfn(h->pgalloc.pages[i])));
- tegra_iovmm_vm_insert_pfn(h->pgalloc.area, va,
- page_to_pfn(h->pgalloc.pages[i]));
- }
- h->pgalloc.dirty = false;
-}
-
-static int nvmap_pagealloc(struct nvmap_handle *h, bool contiguous)
-{
- unsigned int i = 0, cnt = (h->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
- struct page **pages;
-
- if (cnt*sizeof(*pages)>=PAGE_SIZE)
- pages = vmalloc(cnt*sizeof(*pages));
- else
- pages = kzalloc(sizeof(*pages)*cnt, GFP_KERNEL);
-
- if (!pages) return -ENOMEM;
-
- if (contiguous) {
- size_t order = get_order(h->size);
- struct page *compound_page;
- compound_page = alloc_pages(nvmap_gfp, order);
- if (!compound_page) goto fail;
- split_page(compound_page, order);
- for (i=0; i<cnt; i++)
- pages[i] = nth_page(compound_page, i);
- for (; i<(1<<order); i++)
- __free_page(nth_page(compound_page, i));
- } else {
- for (i=0; i<cnt; i++) {
- pages[i] = alloc_page(nvmap_gfp);
- if (!pages[i]) {
- pr_err("failed to allocate %u pages after %u entries\n",
- cnt, i);
- goto fail;
- }
- }
- }
-
- h->pgalloc.area = NULL;
-#ifndef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- if (!contiguous) {
- h->pgalloc.area = tegra_iovmm_create_vm(nvmap_vm_client,
- NULL, cnt << PAGE_SHIFT,
- _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
- if (!h->pgalloc.area) goto fail;
- h->pgalloc.dirty = true;
- }
-#endif
-
- for (i=0; i<cnt; i++) {
- void *km;
- SetPageReserved(pages[i]);
- km = kmap(pages[i]);
- if (km) __cpuc_flush_dcache_area(km, PAGE_SIZE);
- outer_flush_range(page_to_phys(pages[i]),
- page_to_phys(pages[i])+PAGE_SIZE);
- kunmap(pages[i]);
- }
-
- h->size = cnt<<PAGE_SHIFT;
- h->pgalloc.pages = pages;
- h->pgalloc.contig = contiguous;
- INIT_LIST_HEAD(&h->pgalloc.mru_list);
- return 0;
-
-fail:
- while (i--) __free_page(pages[i]);
- if (pages && (cnt*sizeof(*pages)>=PAGE_SIZE)) vfree(pages);
- else if (pages) kfree(pages);
- return -ENOMEM;
-}
-
-static struct nvmap_handle *_nvmap_handle_create(
- struct task_struct *owner, size_t size)
-{
- struct nvmap_handle *h = kzalloc(sizeof(*h), GFP_KERNEL);
- struct nvmap_handle *b;
- struct rb_node **p;
- struct rb_node *parent = NULL;
-
- if (!h) return NULL;
- atomic_set(&h->ref, 1);
- atomic_set(&h->pin, 0);
- h->owner = owner;
- h->size = h->orig_size = size;
- h->flags = NVMEM_HANDLE_WRITE_COMBINE;
- h->poison = NVDA_POISON;
-
- spin_lock(&nvmap_handle_lock);
- p = &nvmap_handles.rb_node;
- while (*p) {
- parent = *p;
- b = rb_entry(parent, struct nvmap_handle, node);
- if (h > b) p = &parent->rb_right;
- else p = &parent->rb_left;
- }
- rb_link_node(&h->node, parent, p);
- rb_insert_color(&h->node, &nvmap_handles);
- spin_unlock(&nvmap_handle_lock);
- if (owner) get_task_struct(owner);
- return h;
-}
-
-/* nvmap pte manager */
-
-static void _nvmap_set_pte_at(unsigned long addr, unsigned long pfn,
- pgprot_t prot)
-{
- u32 off;
- int idx;
- pte_t *pte;
-
- BUG_ON(!addr);
- idx = NVMAP_PTE_INDEX(addr);
- off = NVMAP_PTE_OFFSET(addr) & (PTRS_PER_PTE-1);
-
- pte = nvmap_pte[idx] + off;
- set_pte_ext(pte, pfn_pte(pfn, prot), 0);
- flush_tlb_kernel_page(addr);
-}
-
-static int _nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **vaddr)
-{
- static unsigned int last_bit = 0;
- unsigned long bit;
- unsigned long addr;
- unsigned long flags;
-
- spin_lock_irqsave(&nvmap_ptelock, flags);
-
- bit = find_next_zero_bit(nvmap_ptebits, NVMAP_PAGES, last_bit);
- if (bit==NVMAP_PAGES) {
- bit = find_first_zero_bit(nvmap_ptebits, last_bit);
- if (bit == last_bit) bit = NVMAP_PAGES;
- }
-
- if (bit==NVMAP_PAGES) {
- spin_unlock_irqrestore(&nvmap_ptelock, flags);
- return -ENOMEM;
- }
-
- last_bit = bit;
- set_bit(bit, nvmap_ptebits);
- spin_unlock_irqrestore(&nvmap_ptelock, flags);
-
- addr = NVMAP_BASE + bit*PAGE_SIZE;
-
- _nvmap_set_pte_at(addr, pfn, prot);
- *vaddr = (void *)addr;
- return 0;
-}
-
-static int nvmap_map_pte(unsigned long pfn, pgprot_t prot, void **addr)
-{
- int ret;
- ret = wait_event_interruptible(nvmap_ptefull,
- !_nvmap_map_pte(pfn, prot, addr));
-
- if (ret==-ERESTARTSYS) return -EINTR;
- return ret;
-}
-
-static void nvmap_unmap_pte(void *addr)
-{
- unsigned long bit = NVMAP_PTE_OFFSET(addr);
- unsigned long flags;
-
- /* the ptes aren't cleared in this function, since the address isn't
- * re-used until it is allocated again by nvmap_map_pte. */
- BUG_ON(bit >= NVMAP_PAGES);
- spin_lock_irqsave(&nvmap_ptelock, flags);
- clear_bit(bit, nvmap_ptebits);
- spin_unlock_irqrestore(&nvmap_ptelock, flags);
- wake_up(&nvmap_ptefull);
-}
-
-/* to ensure that the backing store for the VMA isn't freed while a fork'd
- * reference still exists, nvmap_vma_open increments the reference count on
- * the handle, and nvmap_vma_close decrements it. alternatively, we could
- * disallow copying of the vma, or behave like pmem and zap the pages. FIXME.
-*/
-static void nvmap_vma_open(struct vm_area_struct *vma)
-{
- struct nvmap_vma_priv *priv;
-
- priv = vma->vm_private_data;
-
- BUG_ON(!priv);
-
- atomic_inc(&priv->ref);
-}
-
-static void nvmap_vma_close(struct vm_area_struct *vma) {
- struct nvmap_vma_priv *priv = vma->vm_private_data;
-
- if (priv && !atomic_dec_return(&priv->ref)) {
- if (priv->h) _nvmap_handle_put(priv->h);
- kfree(priv);
- }
- vma->vm_private_data = NULL;
-}
-
-static int nvmap_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct nvmap_vma_priv *priv;
- unsigned long offs;
-
- offs = (unsigned long)(vmf->virtual_address - vma->vm_start);
- priv = vma->vm_private_data;
- if (!priv || !priv->h || !priv->h->alloc)
- return VM_FAULT_SIGBUS;
-
- offs += priv->offs;
- /* if the VMA was split for some reason, vm_pgoff will be the VMA's
- * offset from the original VMA */
- offs += (vma->vm_pgoff << PAGE_SHIFT);
-
- if (offs >= priv->h->size)
- return VM_FAULT_SIGBUS;
-
- if (!priv->h->heap_pgalloc) {
- unsigned long pfn;
- BUG_ON(priv->h->carveout.base & ~PAGE_MASK);
- pfn = ((priv->h->carveout.base + offs) >> PAGE_SHIFT);
- vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
- return VM_FAULT_NOPAGE;
- } else {
- struct page *page;
- offs >>= PAGE_SHIFT;
- page = priv->h->pgalloc.pages[offs];
- if (page) get_page(page);
- vmf->page = page;
- return (page) ? 0 : VM_FAULT_SIGBUS;
- }
-}
-
-static long nvmap_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- int err = 0;
- void __user *uarg = (void __user *)arg;
-
- if (_IOC_TYPE(cmd) != NVMEM_IOC_MAGIC)
- return -ENOTTY;
-
- if (_IOC_NR(cmd) > NVMEM_IOC_MAXNR)
- return -ENOTTY;
-
- if (_IOC_DIR(cmd) & _IOC_READ)
- err = !access_ok(VERIFY_WRITE, uarg, _IOC_SIZE(cmd));
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- err = !access_ok(VERIFY_READ, uarg, _IOC_SIZE(cmd));
-
- if (err)
- return -EFAULT;
-
- switch (cmd) {
- case NVMEM_IOC_CREATE:
- case NVMEM_IOC_CLAIM:
- case NVMEM_IOC_FROM_ID:
- err = nvmap_ioctl_create(filp, cmd, uarg);
- break;
-
- case NVMEM_IOC_GET_ID:
- err = nvmap_ioctl_getid(filp, uarg);
- break;
-
- case NVMEM_IOC_PARAM:
- err = nvmap_ioctl_get_param(filp, uarg);
- break;
-
- case NVMEM_IOC_UNPIN_MULT:
- case NVMEM_IOC_PIN_MULT:
- err = nvmap_ioctl_pinop(filp, cmd==NVMEM_IOC_PIN_MULT, uarg);
- break;
-
- case NVMEM_IOC_ALLOC:
- err = nvmap_ioctl_alloc(filp, uarg);
- break;
-
- case NVMEM_IOC_FREE:
- err = nvmap_ioctl_free(filp, arg);
- break;
-
- case NVMEM_IOC_MMAP:
- err = nvmap_map_into_caller_ptr(filp, uarg);
- break;
-
- case NVMEM_IOC_WRITE:
- case NVMEM_IOC_READ:
- err = nvmap_ioctl_rw_handle(filp, cmd==NVMEM_IOC_READ, uarg);
- break;
-
- case NVMEM_IOC_CACHE:
- err = nvmap_ioctl_cache_maint(filp, uarg);
- break;
-
- default:
- return -ENOTTY;
- }
- return err;
-}
-
-/* must be called with the ref_lock held - given a user-space handle ID
- * ref, validate that the handle_ref object may be used by the caller */
-struct nvmap_handle_ref *_nvmap_ref_lookup_locked(
- struct nvmap_file_priv *priv, unsigned long ref)
-{
- struct rb_node *n = priv->handle_refs.rb_node;
- struct nvmap_handle *h = (struct nvmap_handle *)ref;
-
- if (unlikely(h->poison != NVDA_POISON)) {
- pr_err("%s: handle is poisoned\n", __func__);
- return NULL;
- }
-
- while (n) {
- struct nvmap_handle_ref *r;
- r = rb_entry(n, struct nvmap_handle_ref, node);
- if ((unsigned long)r->h == ref) return r;
- else if (ref > (unsigned long)r->h) n = n->rb_right;
- else n = n->rb_left;
- }
-
- return NULL;
-}
-
-/* must be called inside nvmap_pin_lock, to ensure that an entire stream
- * of pins will complete without competition from a second stream. returns
- * 0 if the pin was successful, -ENOMEM on failure */
-static int _nvmap_handle_pin_locked(struct nvmap_handle *h)
-{
- struct tegra_iovmm_area *area;
- BUG_ON(!h->alloc);
-
- h = _nvmap_handle_get(h);
- if (!h) return -ENOMEM;
-
- if (atomic_inc_return(&h->pin)==1) {
- if (h->heap_pgalloc && !h->pgalloc.contig) {
- area = _nvmap_get_vm(h);
- if (!area) {
- /* no race here, inside the pin mutex */
- atomic_dec(&h->pin);
- _nvmap_handle_put(h);
- return -ENOMEM;
- }
- if (area != h->pgalloc.area)
- h->pgalloc.dirty = true;
- h->pgalloc.area = area;
- }
- }
- return 0;
-}
-
-/* doesn't need to be called inside nvmap_pin_lock, since this will only
- * expand the available VM area */
-static int _nvmap_handle_unpin(struct nvmap_handle *h)
-{
- int ret = 0;
-
- if (atomic_add_return(0, &h->pin)==0) {
- pr_err("%s: %s attempting to unpin an unpinned handle\n",
- __func__, current->comm);
- dump_stack();
- return 0;
- }
-
- BUG_ON(!h->alloc);
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_lock(&nvmap_mru_vma_lock);
-#endif
- if (!atomic_dec_return(&h->pin)) {
- if (h->heap_pgalloc && h->pgalloc.area) {
- /* if a secure handle is clean (i.e., mapped into
- * IOVMM, it needs to be zapped on unpin. */
- if (h->secure && !h->pgalloc.dirty) {
- tegra_iovmm_zap_vm(h->pgalloc.area);
- h->pgalloc.dirty = true;
- }
- _nvmap_insert_mru_vma(h);
- ret=1;
- }
- }
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- spin_unlock(&nvmap_mru_vma_lock);
-#endif
- _nvmap_handle_put(h);
- return ret;
-}
-
-/* pin a list of handles, mapping IOVMM areas if needed. may sleep, if
- * a handle's IOVMM area has been reclaimed and insufficient IOVMM space
- * is available to complete the list pin. no intervening pin operations
- * will interrupt this, and no validation is performed on the handles
- * that are provided. */
-static int _nvmap_handle_pin_fast(unsigned int nr, struct nvmap_handle **h)
-{
- unsigned int i;
- int ret = 0;
-
- mutex_lock(&nvmap_pin_lock);
- for (i=0; i<nr && !ret; i++) {
- ret = wait_event_interruptible(nvmap_pin_wait,
- !_nvmap_handle_pin_locked(h[i]));
- }
- mutex_unlock(&nvmap_pin_lock);
-
- if (ret) {
- int do_wake = 0;
- while (i--) do_wake |= _nvmap_handle_unpin(h[i]);
- if (do_wake) wake_up(&nvmap_pin_wait);
- return -EINTR;
- } else {
- for (i=0; i<nr; i++)
- if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
- _nvmap_handle_iovmm_map(h[i]);
- }
-
- return 0;
-}
-
-static int _nvmap_do_global_unpin(unsigned long ref)
-{
- struct nvmap_handle *h;
- int w;
-
- h = _nvmap_validate_get(ref, true);
- if (unlikely(!h)) {
- pr_err("%s: %s attempting to unpin non-existent handle\n",
- __func__, current->group_leader->comm);
- return 0;
- }
-
- pr_err("%s: %s unpinning %s's %uB %s handle without local context\n",
- __func__, current->group_leader->comm,
- (h->owner) ? h->owner->comm : "kernel", h->orig_size,
- (h->heap_pgalloc && !h->pgalloc.contig) ? "iovmm" :
- (h->heap_pgalloc) ? "sysmem" : "carveout");
-
- w = _nvmap_handle_unpin(h);
- _nvmap_handle_put(h);
- return w;
-}
-
-static void _nvmap_do_unpin(struct nvmap_file_priv *priv,
- unsigned int nr, unsigned long *refs)
-{
- struct nvmap_handle_ref *r;
- unsigned int i;
- int do_wake = 0;
-
- spin_lock(&priv->ref_lock);
- for (i=0; i<nr; i++) {
- if (!refs[i]) continue;
- r = _nvmap_ref_lookup_locked(priv, refs[i]);
- if (unlikely(!r)) {
- if (priv->su)
- do_wake |= _nvmap_do_global_unpin(refs[i]);
- else
- pr_err("%s: %s unpinning invalid handle\n",
- __func__, current->comm);
- } else if (unlikely(!atomic_add_unless(&r->pin, -1, 0)))
- pr_err("%s: %s unpinning unpinned handle\n",
- __func__, current->comm);
- else
- do_wake |= _nvmap_handle_unpin(r->h);
- }
- spin_unlock(&priv->ref_lock);
- if (do_wake) wake_up(&nvmap_pin_wait);
-}
-
-/* pins a list of handle_ref objects; same conditions apply as to
- * _nvmap_handle_pin, but also bumps the pin count of each handle_ref. */
-static int _nvmap_do_pin(struct nvmap_file_priv *priv,
- unsigned int nr, unsigned long *refs)
-{
- int ret = 0;
- unsigned int i;
- struct nvmap_handle **h = (struct nvmap_handle **)refs;
- struct nvmap_handle_ref *r;
-
- /* to optimize for the common case (client provided valid handle
- * references and the pin succeeds), increment the handle_ref pin
- * count during validation. in error cases, the tree will need to
- * be re-walked, since the handle_ref is discarded so that an
- * allocation isn't required. if a handle_ref is not found,
- * locally validate that the caller has permission to pin the handle;
- * handle_refs are not created in this case, so it is possible that
- * if the caller crashes after pinning a global handle, the handle
- * will be permanently leaked. */
- spin_lock(&priv->ref_lock);
- for (i=0; i<nr && !ret; i++) {
- r = _nvmap_ref_lookup_locked(priv, refs[i]);
- if (r) atomic_inc(&r->pin);
- else {
- if ((h[i]->poison != NVDA_POISON) ||
- (!(priv->su || h[i]->global ||
- current->group_leader == h[i]->owner)))
- ret = -EPERM;
- else {
- pr_err("%s: %s pinning %s's %uB handle without "
- "local context\n", __func__,
- current->group_leader->comm,
- h[i]->owner->comm, h[i]->orig_size);
- }
- }
- }
-
- while (ret && i--) {
- r = _nvmap_ref_lookup_locked(priv, refs[i]);
- if (r) atomic_dec(&r->pin);
- }
- spin_unlock(&priv->ref_lock);
-
- if (ret) return ret;
-
- mutex_lock(&nvmap_pin_lock);
- for (i=0; i<nr && !ret; i++) {
- ret = wait_event_interruptible(nvmap_pin_wait,
- !_nvmap_handle_pin_locked(h[i]));
- }
- mutex_unlock(&nvmap_pin_lock);
-
- if (ret) {
- int do_wake = 0;
- spin_lock(&priv->ref_lock);
- while (i--) {
- r = _nvmap_ref_lookup_locked(priv, refs[i]);
- do_wake |= _nvmap_handle_unpin(r->h);
- if (r) atomic_dec(&r->pin);
- }
- spin_unlock(&priv->ref_lock);
- if (do_wake) wake_up(&nvmap_pin_wait);
- return -EINTR;
- } else {
- for (i=0; i<nr; i++) {
- if (h[i]->heap_pgalloc && h[i]->pgalloc.dirty)
- _nvmap_handle_iovmm_map(h[i]);
- }
- }
-
- return 0;
-}
-
-static int nvmap_ioctl_pinop(struct file *filp,
- bool is_pin, void __user *arg)
-{
- struct nvmem_pin_handle op;
- struct nvmap_handle *h;
- unsigned long on_stack[16];
- unsigned long *refs;
- unsigned long __user *output;
- unsigned int i;
- int err;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!op.count) return -EINVAL;
-
- if (op.count > 1) {
- size_t bytes = op.count * sizeof(unsigned long *);
- if (!access_ok(VERIFY_READ, (void *)op.handles, bytes))
- return -EPERM;
- if (is_pin && op.addr &&
- !access_ok(VERIFY_WRITE, (void *)op.addr, bytes))
- return -EPERM;
-
- if (op.count <= ARRAY_SIZE(on_stack)) refs = on_stack;
- else refs = kzalloc(bytes, GFP_KERNEL);
-
- if (!refs) return -ENOMEM;
- err = copy_from_user(refs, (void*)op.handles, bytes);
- if (err) goto out;
- } else {
- refs = on_stack;
- on_stack[0] = (unsigned long)op.handles;
- }
-
- if (is_pin)
- err = _nvmap_do_pin(filp->private_data, op.count, refs);
- else
- _nvmap_do_unpin(filp->private_data, op.count, refs);
-
- /* skip the output stage on unpin */
- if (err || !is_pin) goto out;
-
- /* it is guaranteed that if _nvmap_do_pin returns 0 that
- * all of the handle_ref objects are valid, so dereferencing directly
- * here is safe */
- if (op.count > 1)
- output = (unsigned long __user *)op.addr;
- else {
- struct nvmem_pin_handle __user *tmp = arg;
- output = (unsigned long __user *)&(tmp->addr);
- }
-
- if (!output) goto out;
-
- for (i=0; i<op.count; i++) {
- unsigned long addr;
- h = (struct nvmap_handle *)refs[i];
- if (h->heap_pgalloc && h->pgalloc.contig)
- addr = page_to_phys(h->pgalloc.pages[0]);
- else if (h->heap_pgalloc)
- addr = h->pgalloc.area->iovm_start;
- else
- addr = h->carveout.base;
-
- __put_user(addr, &output[i]);
- }
-
-out:
- if (refs != on_stack) kfree(refs);
- return err;
-}
-
-static int nvmap_release(struct inode *inode, struct file *filp)
-{
- struct nvmap_file_priv *priv = filp->private_data;
- struct rb_node *n;
- struct nvmap_handle_ref *r;
- int refs;
- int do_wake = 0;
- int pins;
-
- if (!priv) return 0;
-
- while ((n = rb_first(&priv->handle_refs))) {
- r = rb_entry(n, struct nvmap_handle_ref, node);
- rb_erase(&r->node, &priv->handle_refs);
- smp_rmb();
- pins = atomic_read(&r->pin);
- atomic_set(&r->pin, 0);
- while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
- refs = atomic_read(&r->refs);
- if (r->h->alloc && r->h->heap_pgalloc && !r->h->pgalloc.contig)
- atomic_sub(r->h->size, &priv->iovm_commit);
- while (refs--) _nvmap_handle_put(r->h);
- kfree(r);
- }
- if (do_wake) wake_up(&nvmap_pin_wait);
- kfree(priv);
- return 0;
-}
-
-static int nvmap_open(struct inode *inode, struct file *filp)
-{
- /* eliminate read, write and llseek support on this node */
- struct nvmap_file_priv *priv;
- int ret;
-
- /* nvmap doesn't track total number of pinned references, so its
- * IOVMM client is always locked. */
- if (!nvmap_vm_client) {
- mutex_lock(&nvmap_pin_lock);
- if (!nvmap_vm_client) {
- nvmap_vm_client = tegra_iovmm_alloc_client("gpu", NULL);
- if (nvmap_vm_client)
- tegra_iovmm_client_lock(nvmap_vm_client);
- }
- mutex_unlock(&nvmap_pin_lock);
- }
-
- ret = nonseekable_open(inode, filp);
- if (unlikely(ret))
- return ret;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) return -ENOMEM;
- priv->handle_refs = RB_ROOT;
- priv->su = (filp->f_op == &knvmap_fops);
-
- atomic_set(&priv->iovm_commit, 0);
-
- if (nvmap_vm_client)
- priv->iovm_limit = tegra_iovmm_get_vm_size(nvmap_vm_client);
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- /* to prevent fragmentation-caused deadlocks, derate the size of
- * the IOVM space to 75% */
- priv->iovm_limit >>= 2;
- priv->iovm_limit *= 3;
-#endif
-
- spin_lock_init(&priv->ref_lock);
-
- filp->f_mapping->backing_dev_info = &nvmap_bdi;
-
- filp->private_data = priv;
- return 0;
-}
-
-static int nvmap_ioctl_getid(struct file *filp, void __user *arg)
-{
- struct nvmem_create_handle op;
- struct nvmap_handle *h = NULL;
- int err;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!op.handle) return -EINVAL;
-
- h = _nvmap_validate_get((unsigned long)op.handle,
- filp->f_op==&knvmap_fops);
-
- if (h) {
- op.id = (__u32)h;
- /* when the owner of a handle gets its ID, this is treated
- * as a granting of the handle for use by other processes.
- * however, the super-user is not capable of promoting a
- * handle to global status if it was created in another
- * process. */
- if (current->group_leader == h->owner) h->global = true;
-
- /* getid is not supposed to result in a ref count increase */
- _nvmap_handle_put(h);
-
- return copy_to_user(arg, &op, sizeof(op));
- }
- return -EPERM;
-}
-
-static int _nvmap_do_alloc(struct nvmap_file_priv *priv,
- unsigned long href, unsigned int heap_mask, size_t align,
- unsigned int flags)
-{
- struct nvmap_handle_ref *r;
- struct nvmap_handle *h;
- int numpages;
-
- align = max_t(size_t, align, L1_CACHE_BYTES);
-
- if (!href) return -EINVAL;
-
- spin_lock(&priv->ref_lock);
- r = _nvmap_ref_lookup_locked(priv, href);
- spin_unlock(&priv->ref_lock);
-
- if (!r) return -EPERM;
-
- h = r->h;
- if (h->alloc) return 0;
-
- numpages = ((h->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
- h->secure = (flags & NVMEM_HANDLE_SECURE);
- h->flags = (flags & 0x3);
-
- BUG_ON(!numpages);
-
- /* secure allocations can only be served from secure heaps */
- if (h->secure) {
- heap_mask &= NVMAP_SECURE_HEAPS;
- if (!heap_mask) return -EINVAL;
- }
- /* can't do greater than page size alignment with page alloc */
- if (align > PAGE_SIZE)
- heap_mask &= NVMEM_HEAP_CARVEOUT_MASK;
-
- while (heap_mask && !h->alloc) {
- unsigned int heap_type = _nvmap_heap_policy(heap_mask, numpages);
-
- if (heap_type & NVMEM_HEAP_CARVEOUT_MASK) {
- struct nvmap_carveout_node *n;
-
- down_read(&nvmap_context.list_sem);
- list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
- if (heap_type & n->heap_bit) {
- struct nvmap_carveout* co = &n->carveout;
- int idx = nvmap_carveout_alloc(co, align, h->size);
- if (idx != -1) {
- h->carveout.co_heap = co;
- h->carveout.block_idx = idx;
- spin_lock(&co->lock);
- h->carveout.base = co->blocks[idx].base;
- spin_unlock(&co->lock);
- h->heap_pgalloc = false;
- h->alloc = true;
- break;
- }
- }
- }
- up_read(&nvmap_context.list_sem);
- }
- else if (heap_type & NVMEM_HEAP_IOVMM) {
- int ret;
-
- BUG_ON(align > PAGE_SIZE);
-
- /* increment the committed IOVM space prior to
- * allocation, to avoid race conditions with other
- * threads simultaneously allocating. this is
- * conservative, but guaranteed to work */
- if (atomic_add_return(numpages << PAGE_SHIFT, &priv->iovm_commit)
- < priv->iovm_limit) {
- ret = nvmap_pagealloc(h, false);
- }
- else ret = -ENOMEM;
-
- if (ret) {
- atomic_sub(numpages << PAGE_SHIFT, &priv->iovm_commit);
- }
- else {
- BUG_ON(h->pgalloc.contig);
- h->heap_pgalloc = true;
- h->alloc = true;
- }
- }
- else if (heap_type & NVMEM_HEAP_SYSMEM) {
- if (nvmap_pagealloc(h, true) == 0) {
- BUG_ON(!h->pgalloc.contig);
- h->heap_pgalloc = true;
- h->alloc = true;
- }
- }
- else break;
-
- heap_mask &= ~heap_type;
- }
-
- return (h->alloc ? 0 : -ENOMEM);
-}
-
-static int nvmap_ioctl_alloc(struct file *filp, void __user *arg)
-{
- struct nvmem_alloc_handle op;
- struct nvmap_file_priv *priv = filp->private_data;
- int err;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (op.align & (op.align-1)) return -EINVAL;
-
- /* user-space handles are aligned to page boundaries, to prevent
- * data leakage. */
- op.align = max_t(size_t, op.align, PAGE_SIZE);
-
- return _nvmap_do_alloc(priv, op.handle, op.heap_mask, op.align, op.flags);
-}
-
-static int _nvmap_do_free(struct nvmap_file_priv *priv, unsigned long href)
-{
- struct nvmap_handle_ref *r;
- struct nvmap_handle *h;
- int do_wake = 0;
-
- if (!href) return 0;
-
- spin_lock(&priv->ref_lock);
- r = _nvmap_ref_lookup_locked(priv, href);
-
- if (!r) {
- spin_unlock(&priv->ref_lock);
- pr_err("%s attempting to free unrealized handle\n",
- current->group_leader->comm);
- return -EPERM;
- }
-
- h = r->h;
-
- smp_rmb();
- if (!atomic_dec_return(&r->refs)) {
- int pins = atomic_read(&r->pin);
- rb_erase(&r->node, &priv->handle_refs);
- spin_unlock(&priv->ref_lock);
- if (pins) pr_err("%s: %s freeing %s's pinned %s %s %uB handle\n",
- __func__, current->comm,
- (r->h->owner) ? r->h->owner->comm : "kernel",
- (r->h->global) ? "global" : "private",
- (r->h->alloc && r->h->heap_pgalloc) ? "page-alloc" :
- (r->h->alloc) ? "carveout" : "unallocated",
- r->h->orig_size);
- while (pins--) do_wake |= _nvmap_handle_unpin(r->h);
- kfree(r);
- if (h->alloc && h->heap_pgalloc && !h->pgalloc.contig)
- atomic_sub(h->size, &priv->iovm_commit);
- if (do_wake) wake_up(&nvmap_pin_wait);
- } else
- spin_unlock(&priv->ref_lock);
-
- BUG_ON(!atomic_read(&h->ref));
- _nvmap_handle_put(h);
- return 0;
-}
-
-static int nvmap_ioctl_free(struct file *filp, unsigned long arg)
-{
- return _nvmap_do_free(filp->private_data, arg);
-}
-
-/* given a size, pre-existing handle ID, or a preserved handle key, create
- * a handle and a reference to the handle in the per-context data */
-static int _nvmap_do_create(struct nvmap_file_priv *priv,
- unsigned int cmd, unsigned long key, bool su,
- struct nvmap_handle_ref **ref)
-{
- struct nvmap_handle_ref *r = NULL;
- struct nvmap_handle *h = NULL;
- struct rb_node **p, *parent = NULL;
-
- if (cmd == NVMEM_IOC_FROM_ID) {
- /* only ugly corner case to handle with from ID:
- *
- * normally, if the handle that is being duplicated is IOVMM-
- * backed, the handle should fail to duplicate if duping it
- * would over-commit IOVMM space. however, if the handle is
- * already duplicated in the client process (or the client
- * is duplicating a handle it created originally), IOVMM space
- * should not be doubly-reserved.
- */
- h = _nvmap_validate_get(key, priv->su);
-
- if (!h) {
- pr_err("%s: %s duplicate handle failed\n", __func__,
- current->group_leader->comm);
- return -EPERM;
- }
-
- if (!h->alloc) {
- pr_err("%s: attempting to clone unallocated "
- "handle\n", __func__);
- _nvmap_handle_put(h);
- h = NULL;
- return -EINVAL;
- }
-
- spin_lock(&priv->ref_lock);
- r = _nvmap_ref_lookup_locked(priv, (unsigned long)h);
- spin_unlock(&priv->ref_lock);
- if (r) {
- /* if the client does something strange, like calling CreateFromId
- * when it was the original creator, avoid creating two handle refs
- * for the same handle */
- atomic_inc(&r->refs);
- *ref = r;
- return 0;
- }
-
- /* verify that adding this handle to the process' access list
- * won't exceed the IOVM limit */
- /* TODO: [ahatala 2010-04-20] let the kernel over-commit for now */
- if (h->heap_pgalloc && !h->pgalloc.contig && !su) {
- int oc = atomic_add_return(h->size, &priv->iovm_commit);
- if (oc > priv->iovm_limit) {
- atomic_sub(h->size, &priv->iovm_commit);
- _nvmap_handle_put(h);
- h = NULL;
- pr_err("%s: %s duplicating handle would "
- "over-commit iovmm space (%dB / %dB)\n",
- __func__, current->group_leader->comm,
- oc, priv->iovm_limit);
- return -ENOMEM;
- }
- }
- } else if (cmd == NVMEM_IOC_CREATE) {
- h = _nvmap_handle_create(current->group_leader, key);
- if (!h) return -ENOMEM;
- } else {
- h = _nvmap_claim_preserved(current->group_leader, key);
- if (!h) return -EINVAL;
- }
-
- BUG_ON(!h);
-
- r = kzalloc(sizeof(*r), GFP_KERNEL);
- if (!r) {
- if (h) _nvmap_handle_put(h);
- return -ENOMEM;
- }
-
- atomic_set(&r->refs, 1);
- r->h = h;
- atomic_set(&r->pin, 0);
-
- spin_lock(&priv->ref_lock);
- p = &priv->handle_refs.rb_node;
- while (*p) {
- struct nvmap_handle_ref *l;
- parent = *p;
- l = rb_entry(parent, struct nvmap_handle_ref, node);
- if (r->h > l->h) p = &parent->rb_right;
- else p = &parent->rb_left;
- }
- rb_link_node(&r->node, parent, p);
- rb_insert_color(&r->node, &priv->handle_refs);
-
- spin_unlock(&priv->ref_lock);
- *ref = r;
- return 0;
-}
-
-static int nvmap_ioctl_create(struct file *filp,
- unsigned int cmd, void __user *arg)
-{
- struct nvmem_create_handle op;
- struct nvmap_handle_ref *r = NULL;
- struct nvmap_file_priv *priv = filp->private_data;
- unsigned long key;
- int err = 0;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!priv) return -ENODEV;
-
- /* user-space-created handles are expanded to be page-aligned,
- * so that mmap() will not accidentally leak a different allocation */
- if (cmd==NVMEM_IOC_CREATE)
- key = (op.size + PAGE_SIZE - 1) & ~(PAGE_SIZE-1);
- else if (cmd==NVMEM_IOC_CLAIM)
- key = op.key;
- else if (cmd==NVMEM_IOC_FROM_ID)
- key = op.id;
-
- err = _nvmap_do_create(priv, cmd, key, (filp->f_op==&knvmap_fops), &r);
-
- if (!err) {
- op.handle = (uintptr_t)r->h;
- /* since the size is spoofed to a page-multiple above,
- * clobber the orig_size field back to the requested value for
- * debugging. */
- if (cmd == NVMEM_IOC_CREATE) r->h->orig_size = op.size;
- err = copy_to_user(arg, &op, sizeof(op));
- if (err) _nvmap_do_free(priv, op.handle);
- }
-
- return err;
-}
-
-static int nvmap_map_into_caller_ptr(struct file *filp, void __user *arg)
-{
- struct nvmem_map_caller op;
- struct nvmap_vma_priv *vpriv;
- struct vm_area_struct *vma;
- struct nvmap_handle *h;
- int err = 0;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!op.handle) return -EINVAL;
-
- h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
- if (!h) return -EINVAL;
-
- down_read(¤t->mm->mmap_sem);
-
- vma = find_vma(current->mm, op.addr);
- if (!vma || !vma->vm_private_data) {
- err = -ENOMEM;
- goto out;
- }
-
- if (op.offset & ~PAGE_MASK) {
- err = -EFAULT;
- goto out;
- }
-
- if ((op.offset + op.length) > h->size) {
- err = -EADDRNOTAVAIL;
- goto out;
- }
-
- vpriv = vma->vm_private_data;
- BUG_ON(!vpriv);
-
- /* the VMA must exactly match the requested mapping operation, and the
- * VMA that is targetted must have been created originally by /dev/nvmap
- */
- if ((vma->vm_start != op.addr) || (vma->vm_ops != &nvmap_vma_ops) ||
- (vma->vm_end-vma->vm_start != op.length)) {
- err = -EPERM;
- goto out;
- }
-
- /* verify that each mmap() system call creates a unique VMA */
-
- if (vpriv->h && h==vpriv->h)
- goto out;
- else if (vpriv->h) {
- err = -EADDRNOTAVAIL;
- goto out;
- }
-
- if (!h->heap_pgalloc && (h->carveout.base & ~PAGE_MASK)) {
- err = -EFAULT;
- goto out;
- }
-
- vpriv->h = h;
- vpriv->offs = op.offset;
-
- /* if the hmem is not writeback-cacheable, drop back to a page mapping
- * which will guarantee DMA coherency
- */
- vma->vm_page_prot = _nvmap_flag_to_pgprot(h->flags,
- vma->vm_page_prot);
-
-out:
- up_read(¤t->mm->mmap_sem);
- if (err) _nvmap_handle_put(h);
- return err;
-}
-/* Initially, the nvmap mmap system call is used to allocate an inaccessible
- * region of virtual-address space in the client. A subsequent
- * NVMAP_IOC_MMAP ioctl will associate each
- */
-static int nvmap_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- /* FIXME: drivers which do not support cow seem to be split down the
- * middle whether to force the VM_SHARED flag, or to return an error
- * when this flag isn't already set (i.e., MAP_PRIVATE).
- */
- struct nvmap_vma_priv *priv;
-
- vma->vm_private_data = NULL;
-
- priv = kzalloc(sizeof(*priv),GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->offs = 0;
- priv->h = NULL;
- atomic_set(&priv->ref, 1);
-
- vma->vm_flags |= VM_SHARED;
- vma->vm_flags |= (VM_IO | VM_DONTEXPAND | VM_MIXEDMAP | VM_RESERVED);
- vma->vm_ops = &nvmap_vma_ops;
- vma->vm_private_data = priv;
-
- return 0;
-}
-
-/* perform cache maintenance on a handle; caller's handle must be pre-
- * validated. */
-static int _nvmap_do_cache_maint(struct nvmap_handle *h,
- unsigned long start, unsigned long end, unsigned long op, bool get)
-{
- pgprot_t prot;
- void *addr = NULL;
- void (*inner_maint)(const void*, const void*);
- void (*outer_maint)(unsigned long, unsigned long);
- int err = 0;
-
- if (get) h = _nvmap_handle_get(h);
-
- if (!h) return -EINVAL;
-
- /* don't waste time on cache maintenance if the handle isn't cached */
- if (h->flags == NVMEM_HANDLE_UNCACHEABLE ||
- h->flags == NVMEM_HANDLE_WRITE_COMBINE)
- goto out;
-
- if (op == NVMEM_CACHE_OP_WB) {
- inner_maint = smp_dma_clean_range;
- if (h->flags == NVMEM_HANDLE_CACHEABLE)
- outer_maint = outer_clean_range;
- else
- outer_maint = NULL;
- } else if (op == NVMEM_CACHE_OP_WB_INV) {
- inner_maint = dmac_flush_range;
- if (h->flags == NVMEM_HANDLE_CACHEABLE)
- outer_maint = outer_flush_range;
- else
- outer_maint = NULL;
- } else {
- inner_maint = smp_dma_inv_range;
- if (h->flags == NVMEM_HANDLE_CACHEABLE)
- outer_maint = outer_inv_range;
- else
- outer_maint = NULL;
- }
-
- prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
-
- while (start < end) {
- struct page *page = NULL;
- unsigned long phys;
- void *src;
- size_t count;
-
- if (h->heap_pgalloc) {
- page = h->pgalloc.pages[start>>PAGE_SHIFT];
- BUG_ON(!page);
- get_page(page);
- phys = page_to_phys(page) + (start & ~PAGE_MASK);
- } else {
- phys = h->carveout.base + start;
- }
-
- if (!addr) {
- err = nvmap_map_pte(__phys_to_pfn(phys), prot, &addr);
- if (err) {
- if (page) put_page(page);
- break;
- }
- } else {
- _nvmap_set_pte_at((unsigned long)addr,
- __phys_to_pfn(phys), prot);
- }
-
- src = addr + (phys & ~PAGE_MASK);
- count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
-
- inner_maint(src, src+count);
- if (outer_maint) outer_maint(phys, phys+count);
- start += count;
- if (page) put_page(page);
- }
-
-out:
- if (h->flags == NVMEM_HANDLE_INNER_CACHEABLE) outer_sync();
- if (addr) nvmap_unmap_pte(addr);
- if (get) _nvmap_handle_put(h);
- return err;
-}
-
-static int nvmap_ioctl_cache_maint(struct file *filp, void __user *arg)
-{
- struct nvmem_cache_op op;
- int err = 0;
- struct vm_area_struct *vma;
- struct nvmap_vma_priv *vpriv;
- unsigned long start;
- unsigned long end;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!op.handle || !op.addr || op.op<NVMEM_CACHE_OP_WB ||
- op.op>NVMEM_CACHE_OP_WB_INV)
- return -EINVAL;
-
- vma = find_vma(current->active_mm, (unsigned long)op.addr);
- if (!vma || vma->vm_ops!=&nvmap_vma_ops ||
- (unsigned long)op.addr + op.len > vma->vm_end)
- return -EADDRNOTAVAIL;
-
- vpriv = (struct nvmap_vma_priv *)vma->vm_private_data;
-
- if ((unsigned long)vpriv->h != op.handle)
- return -EFAULT;
-
- start = (unsigned long)op.addr - vma->vm_start;
- end = start + op.len;
-
- return _nvmap_do_cache_maint(vpriv->h, start, end, op.op, true);
-}
-
-/* copies a single element from the pre-get()'ed handle h, returns
- * the number of bytes copied, and the address in the nvmap mapping range
- * which was used (to eliminate re-allocation when copying multiple
- * elements */
-static ssize_t _nvmap_do_one_rw_handle(struct nvmap_handle *h, int is_read,
- int is_user, unsigned long start, unsigned long rw_addr,
- unsigned long bytes, void **nvmap_addr)
-{
- pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
- unsigned long end = start + bytes;
- unsigned long orig_start = start;
-
- if (is_user) {
- if (is_read && !access_ok(VERIFY_WRITE, (void*)rw_addr, bytes))
- return -EPERM;
- if (!is_read && !access_ok(VERIFY_READ, (void*)rw_addr, bytes))
- return -EPERM;
- }
-
- while (start < end) {
- struct page *page = NULL;
- unsigned long phys;
- size_t count;
- void *src;
-
- if (h->heap_pgalloc) {
- page = h->pgalloc.pages[start >> PAGE_SHIFT];
- BUG_ON(!page);
- get_page(page);
- phys = page_to_phys(page) + (start & ~PAGE_MASK);
- } else {
- phys = h->carveout.base + start;
- }
-
- if (!*nvmap_addr) {
- int err = nvmap_map_pte(__phys_to_pfn(phys),
- prot, nvmap_addr);
- if (err) {
- if (page) put_page(page);
- count = start - orig_start;
- return (count) ? count : err;
- }
- } else {
- _nvmap_set_pte_at((unsigned long)*nvmap_addr,
- __phys_to_pfn(phys), prot);
-
- }
-
- src = *nvmap_addr + (phys & ~PAGE_MASK);
- count = min_t(size_t, end-start, PAGE_SIZE-(phys&~PAGE_MASK));
-
- if (is_user && is_read)
- copy_to_user((void*)rw_addr, src, count);
- else if (is_user)
- copy_from_user(src, (void*)rw_addr, count);
- else if (is_read)
- memcpy((void*)rw_addr, src, count);
- else
- memcpy(src, (void*)rw_addr, count);
-
- rw_addr += count;
- start += count;
- if (page) put_page(page);
- }
-
- return (ssize_t)start - orig_start;
-}
-
-static ssize_t _nvmap_do_rw_handle(struct nvmap_handle *h, int is_read,
- int is_user, unsigned long h_offs, unsigned long sys_addr,
- unsigned long h_stride, unsigned long sys_stride,
- unsigned long elem_size, unsigned long count)
-{
- ssize_t bytes_copied = 0;
- void *addr = NULL;
-
- h = _nvmap_handle_get(h);
- if (!h) return -EINVAL;
-
- if (elem_size == h_stride &&
- elem_size == sys_stride) {
- elem_size *= count;
- h_stride = elem_size;
- sys_stride = elem_size;
- count = 1;
- }
-
- while (count--) {
- size_t ret = _nvmap_do_one_rw_handle(h, is_read,
- is_user, h_offs, sys_addr, elem_size, &addr);
- if (ret < 0) {
- if (!bytes_copied) bytes_copied = ret;
- break;
- }
- bytes_copied += ret;
- if (ret < elem_size) break;
- sys_addr += sys_stride;
- h_offs += h_stride;
- }
-
- if (addr) nvmap_unmap_pte(addr);
- _nvmap_handle_put(h);
- return bytes_copied;
-}
-
-static int nvmap_ioctl_rw_handle(struct file *filp,
- int is_read, void __user* arg)
-{
- struct nvmem_rw_handle __user *uarg = arg;
- struct nvmem_rw_handle op;
- struct nvmap_handle *h;
- ssize_t copied;
- int err = 0;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (!op.handle || !op.addr || !op.count || !op.elem_size)
- return -EINVAL;
-
- h = _nvmap_validate_get(op.handle, (filp->f_op == &knvmap_fops));
- if (!h) return -EINVAL; /* -EPERM? */
-
- copied = _nvmap_do_rw_handle(h, is_read, 1, op.offset,
- (unsigned long)op.addr, op.hmem_stride,
- op.user_stride, op.elem_size, op.count);
-
- if (copied < 0) { err = copied; copied = 0; }
- else if (copied < (op.count*op.elem_size)) err = -EINTR;
-
- __put_user(copied, &uarg->count);
-
- _nvmap_handle_put(h);
-
- return err;
-}
-
-static unsigned int _nvmap_do_get_param(struct nvmap_handle *h,
- unsigned int param)
-{
- if (param==NVMEM_HANDLE_PARAM_SIZE)
- return h->orig_size;
-
- else if (param==NVMEM_HANDLE_PARAM_ALIGNMENT) {
- if (!h->alloc) return 0;
-
- if (h->heap_pgalloc) return PAGE_SIZE;
- else {
- unsigned int i=1;
- if (!h->carveout.base) return SZ_4M;
- while (!(i & h->carveout.base)) i<<=1;
- return i;
- }
- } else if (param==NVMEM_HANDLE_PARAM_BASE) {
-
- if (!h->alloc || !atomic_add_return(0, &h->pin)){
- WARN_ON(1);
- return ~0ul;
- }
-
- if (!h->heap_pgalloc)
- return h->carveout.base;
-
- if (h->pgalloc.contig)
- return page_to_phys(h->pgalloc.pages[0]);
-
- if (h->pgalloc.area)
- return h->pgalloc.area->iovm_start;
-
- return ~0ul;
- } else if (param==NVMEM_HANDLE_PARAM_HEAP) {
-
- if (!h->alloc) return 0;
-
- if (!h->heap_pgalloc) {
- /* FIXME: hard-coded physical address */
- if ((h->carveout.base & 0xf0000000ul)==0x40000000ul)
- return NVMEM_HEAP_CARVEOUT_IRAM;
- else
- return NVMEM_HEAP_CARVEOUT_GENERIC;
- }
-
- if (!h->pgalloc.contig)
- return NVMEM_HEAP_IOVMM;
-
- return NVMEM_HEAP_SYSMEM;
- }
-
- return 0;
-}
-
-static int nvmap_ioctl_get_param(struct file *filp, void __user* arg)
-{
- struct nvmem_handle_param op;
- struct nvmap_handle *h;
- int err;
-
- err = copy_from_user(&op, arg, sizeof(op));
- if (err) return err;
-
- if (op.param < NVMEM_HANDLE_PARAM_SIZE ||
- op.param > NVMEM_HANDLE_PARAM_HEAP)
- return -EINVAL;
-
- h = _nvmap_validate_get(op.handle, (filp->f_op==&knvmap_fops));
- if (!h) return -EINVAL;
-
- op.result = _nvmap_do_get_param(h, op.param);
- err = copy_to_user(arg, &op, sizeof(op));
-
- _nvmap_handle_put(h);
- return err;
-}
-
-static struct miscdevice misc_nvmap_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "nvmap",
- .fops = &nvmap_fops
-};
-
-static struct miscdevice misc_knvmap_dev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "knvmap",
- .fops = &knvmap_fops
-};
-
-static struct device *__nvmap_heap_parent_dev(void)
-{
- return misc_nvmap_dev.this_device;
-}
-
-/* creates the sysfs attribute files for a carveout heap; if called
- * before fs initialization, silently returns.
- */
-static void _nvmap_create_heap_attrs(struct nvmap_carveout_node *n)
-{
- if (!_nvmap_heap_parent_dev) return;
- dev_set_name(&n->dev, "heap-%s", n->carveout.name);
- n->dev.parent = _nvmap_heap_parent_dev;
- n->dev.driver = NULL;
- n->dev.release = NULL;
- if (device_register(&n->dev)) {
- pr_err("%s: failed to create heap-%s device\n",
- __func__, n->carveout.name);
- return;
- }
- if (sysfs_create_group(&n->dev.kobj, &nvmap_heap_defattr_group))
- pr_err("%s: failed to create attribute group for heap-%s "
- "device\n", __func__, n->carveout.name);
-}
-
-static int __init nvmap_dev_init(void)
-{
- struct nvmap_carveout_node *n;
-
- if (misc_register(&misc_nvmap_dev))
- pr_err("%s error registering %s\n", __func__,
- misc_nvmap_dev.name);
-
- if (misc_register(&misc_knvmap_dev))
- pr_err("%s error registering %s\n", __func__,
- misc_knvmap_dev.name);
-
- /* create sysfs attribute entries for all the heaps which were
- * created prior to nvmap_dev_init */
- down_read(&nvmap_context.list_sem);
- list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
- _nvmap_create_heap_attrs(n);
- }
- up_read(&nvmap_context.list_sem);
-
- nvmap_procfs_root = proc_mkdir("nvmap", NULL);
- if (nvmap_procfs_root) {
- nvmap_procfs_proc = proc_mkdir("proc", nvmap_procfs_root);
- }
- return 0;
-}
-fs_initcall(nvmap_dev_init);
-
-/* initialization of core data structures split out to earlier in the
- * init sequence, to allow kernel drivers access to nvmap before devfs
- * is initialized */
-#define NR_CARVEOUTS 2
-static unsigned int nvmap_carveout_cmds = 0;
-static unsigned long nvmap_carveout_cmd_base[NR_CARVEOUTS];
-static unsigned long nvmap_carveout_cmd_size[NR_CARVEOUTS];
-
-static int __init nvmap_core_init(void)
-{
- u32 base = NVMAP_BASE;
- pgd_t *pgd;
- pmd_t *pmd;
- pte_t *pte;
- unsigned int i;
-
- init_rwsem(&nvmap_context.list_sem);
- nvmap_context.init_data.handle_refs = RB_ROOT;
- atomic_set(&nvmap_context.init_data.iovm_commit, 0);
- /* no IOVMM allocations for kernel-created handles */
- spin_lock_init(&nvmap_context.init_data.ref_lock);
- nvmap_context.init_data.su = true;
- nvmap_context.init_data.iovm_limit = 0;
- INIT_LIST_HEAD(&nvmap_context.heaps);
-
-#ifdef CONFIG_DEVNVMAP_RECLAIM_UNPINNED_VM
- for (i=0; i<ARRAY_SIZE(nvmap_mru_cutoff); i++)
- INIT_LIST_HEAD(&nvmap_mru_vma_lists[i]);
-#endif
-
- i = 0;
- do {
- pgd = pgd_offset(&init_mm, base);
- pmd = pmd_alloc(&init_mm, pgd, base);
- if (!pmd) {
- pr_err("%s: no pmd tables\n", __func__);
- return -ENOMEM;
- }
- pte = pte_alloc_kernel(pmd, base);
- if (!pte) {
- pr_err("%s: no pte tables\n", __func__);
- return -ENOMEM;
- }
- nvmap_pte[i++] = pte;
- base += (1<<PGDIR_SHIFT);
- } while (base < NVMAP_END);
-
- for (i=0; i<nvmap_carveout_cmds; i++) {
- char tmp[16];
- snprintf(tmp, sizeof(tmp), "generic-%u", i);
- nvmap_add_carveout_heap(nvmap_carveout_cmd_base[i],
- nvmap_carveout_cmd_size[i], tmp, 0x1);
- }
-
- return 0;
-}
-core_initcall(nvmap_core_init);
-
-static int __init nvmap_heap_arg(char *options)
-{
- unsigned long start, size;
- char *p = options;
-
- start = -1;
- size = memparse(p, &p);
- if (*p == '@')
- start = memparse(p + 1, &p);
-
- if (nvmap_carveout_cmds < ARRAY_SIZE(nvmap_carveout_cmd_size)) {
- nvmap_carveout_cmd_base[nvmap_carveout_cmds] = start;
- nvmap_carveout_cmd_size[nvmap_carveout_cmds] = size;
- nvmap_carveout_cmds++;
- }
- return 0;
-}
-__setup("nvmem=", nvmap_heap_arg);
-
-static int _nvmap_try_create_preserved(struct nvmap_carveout *co,
- struct nvmap_handle *h, unsigned long base,
- size_t size, unsigned int key)
-{
- unsigned long end = base + size;
- short idx;
-
- h->carveout.base = ~0;
- h->carveout.key = key;
- h->carveout.co_heap = NULL;
-
- spin_lock(&co->lock);
- idx = co->free_index;
- while (idx != -1) {
- struct nvmap_mem_block *b = BLOCK(co, idx);
- unsigned long blk_end = b->base + b->size;
- if (b->base <= base && blk_end >= end) {
- nvmap_split_block(co, idx, base, size);
- h->carveout.block_idx = idx;
- h->carveout.base = co->blocks[idx].base;
- h->carveout.co_heap = co;
- h->alloc = true;
- break;
- }
- idx = b->next_free;
- }
- spin_unlock(&co->lock);
-
- return (h->carveout.co_heap == NULL) ? -ENXIO : 0;
-}
-
-static void _nvmap_create_nvos_preserved(struct nvmap_carveout *co)
-{
-#ifdef CONFIG_TEGRA_NVOS
- unsigned int i, key;
- NvBootArgsPreservedMemHandle mem;
- static int was_created[NvBootArgKey_PreservedMemHandle_Num -
- NvBootArgKey_PreservedMemHandle_0] = { 0 };
-
- for (i=0, key=NvBootArgKey_PreservedMemHandle_0;
- i<ARRAY_SIZE(was_created); i++, key++) {
- struct nvmap_handle *h;
-
- if (was_created[i]) continue;
-
- if (NvOsBootArgGet(key, &mem, sizeof(mem))!=NvSuccess) continue;
- if (!mem.Address || !mem.Size) continue;
-
- h = _nvmap_handle_create(NULL, mem.Size);
- if (!h) continue;
-
- if (!_nvmap_try_create_preserved(co, h, mem.Address,
- mem.Size, key))
- was_created[i] = 1;
- else
- _nvmap_handle_put(h);
- }
-#endif
-}
-
-int nvmap_add_carveout_heap(unsigned long base, size_t size,
- const char *name, unsigned int bitmask)
-{
- struct nvmap_carveout_node *n;
- struct nvmap_carveout_node *l;
-
-
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n) return -ENOMEM;
-
- BUG_ON(bitmask & ~NVMEM_HEAP_CARVEOUT_MASK);
- n->heap_bit = bitmask;
-
- if (_nvmap_init_carveout(&n->carveout, name, base, size)) {
- kfree(n);
- return -ENOMEM;
- }
-
- down_write(&nvmap_context.list_sem);
-
- /* called inside the list_sem lock to ensure that the was_created
- * array is protected against simultaneous access */
- _nvmap_create_nvos_preserved(&n->carveout);
- _nvmap_create_heap_attrs(n);
-
- list_for_each_entry(l, &nvmap_context.heaps, heap_list) {
- if (n->heap_bit > l->heap_bit) {
- list_add_tail(&n->heap_list, &l->heap_list);
- up_write(&nvmap_context.list_sem);
- return 0;
- }
- }
- list_add_tail(&n->heap_list, &nvmap_context.heaps);
- up_write(&nvmap_context.list_sem);
- return 0;
-}
-
-int nvmap_create_preserved_handle(unsigned long base, size_t size,
- unsigned int key)
-{
- struct nvmap_carveout_node *i;
- struct nvmap_handle *h;
-
- h = _nvmap_handle_create(NULL, size);
- if (!h) return -ENOMEM;
-
- down_read(&nvmap_context.list_sem);
- list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
- struct nvmap_carveout *co = &i->carveout;
- if (!_nvmap_try_create_preserved(co, h, base, size, key))
- break;
- }
- up_read(&nvmap_context.list_sem);
-
- /* the base may not be correct if block splitting fails */
- if (!h->carveout.co_heap || h->carveout.base != base) {
- _nvmap_handle_put(h);
- return -ENOMEM;
- }
-
- return 0;
-}
-
-/* attempts to create a new carveout heap with a new usage bitmask by
- * taking an allocation from a previous carveout with a different bitmask */
-static int nvmap_split_carveout_heap(struct nvmap_carveout *co, size_t size,
- const char *name, unsigned int new_bitmask)
-{
- struct nvmap_carveout_node *i, *n;
- int idx = -1;
- unsigned int blkbase, blksize;
-
-
- n = kzalloc(sizeof(*n), GFP_KERNEL);
- if (!n) return -ENOMEM;
- n->heap_bit = new_bitmask;
-
- /* align split carveouts to 1M */
- idx = nvmap_carveout_alloc(co, SZ_1M, size);
- if (idx != -1) {
- /* take the spin lock to avoid race conditions with
- * intervening allocations triggering grow_block operations */
- spin_lock(&co->lock);
- blkbase = co->blocks[idx].base;
- blksize = co->blocks[idx].size;
- spin_unlock(&co->lock);
-
- if (_nvmap_init_carveout(&n->carveout,name, blkbase, blksize)) {
- nvmap_carveout_free(co, idx);
- idx = -1;
- } else {
- spin_lock(&co->lock);
- if (co->blocks[idx].prev) {
- co->blocks[co->blocks[idx].prev].next =
- co->blocks[idx].next;
- }
- if (co->blocks[idx].next) {
- co->blocks[co->blocks[idx].next].prev =
- co->blocks[idx].prev;
- }
- if (co->block_index==idx)
- co->block_index = co->blocks[idx].next;
- co->blocks[idx].next_free = -1;
- co->blocks[idx].prev_free = -1;
- co->blocks[idx].next = co->spare_index;
- if (co->spare_index!=-1)
- co->blocks[co->spare_index].prev = idx;
- co->spare_index = idx;
- spin_unlock(&co->lock);
- }
- }
-
- if (idx==-1) {
- kfree(n);
- return -ENOMEM;
- }
-
- down_write(&nvmap_context.list_sem);
- _nvmap_create_heap_attrs(n);
- list_for_each_entry(i, &nvmap_context.heaps, heap_list) {
- if (n->heap_bit > i->heap_bit) {
- list_add_tail(&n->heap_list, &i->heap_list);
- up_write(&nvmap_context.list_sem);
- return 0;
- }
- }
- list_add_tail(&n->heap_list, &nvmap_context.heaps);
- up_write(&nvmap_context.list_sem);
- return 0;
-}
-
-/* NvRmMemMgr APIs implemented on top of nvmap */
-
-#if defined(CONFIG_TEGRA_NVRM)
-#include <linux/freezer.h>
-
-NvU32 NvRmMemGetAddress(NvRmMemHandle hMem, NvU32 Offset)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- unsigned long addr;
-
- if (unlikely(!atomic_add_return(0, &h->pin) || !h->alloc ||
- Offset >= h->orig_size)) {
- WARN_ON(1);
- return ~0ul;
- }
-
- if (h->heap_pgalloc && h->pgalloc.contig)
- addr = page_to_phys(h->pgalloc.pages[0]);
- else if (h->heap_pgalloc) {
- BUG_ON(!h->pgalloc.area);
- addr = h->pgalloc.area->iovm_start;
- } else
- addr = h->carveout.base;
-
- return (NvU32)addr+Offset;
-
-}
-
-void NvRmMemPinMult(NvRmMemHandle *hMems, NvU32 *addrs, NvU32 Count)
-{
- struct nvmap_handle **h = (struct nvmap_handle **)hMems;
- unsigned int i;
- int ret;
-
- do {
- ret = _nvmap_handle_pin_fast(Count, h);
- if (ret && !try_to_freeze()) {
- pr_err("%s: failed to pin handles\n", __func__);
- dump_stack();
- }
- } while (ret);
-
- for (i=0; i<Count; i++) {
- addrs[i] = NvRmMemGetAddress(hMems[i], 0);
- BUG_ON(addrs[i]==~0ul);
- }
-}
-
-void NvRmMemUnpinMult(NvRmMemHandle *hMems, NvU32 Count)
-{
- int do_wake = 0;
- unsigned int i;
-
- for (i=0; i<Count; i++) {
- struct nvmap_handle *h = (struct nvmap_handle *)hMems[i];
- if (h) {
- BUG_ON(atomic_add_return(0, &h->pin)==0);
- do_wake |= _nvmap_handle_unpin(h);
- }
- }
-
- if (do_wake) wake_up(&nvmap_pin_wait);
-}
-
-NvU32 NvRmMemPin(NvRmMemHandle hMem)
-{
- NvU32 addr;
- NvRmMemPinMult(&hMem, &addr, 1);
- return addr;
-}
-
-void NvRmMemUnpin(NvRmMemHandle hMem)
-{
- NvRmMemUnpinMult(&hMem, 1);
-}
-
-void NvRmMemHandleFree(NvRmMemHandle hMem)
-{
- _nvmap_do_free(&nvmap_context.init_data, (unsigned long)hMem);
-}
-
-NvError NvRmMemMap(NvRmMemHandle hMem, NvU32 Offset, NvU32 Size,
- NvU32 Flags, void **pVirtAddr)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- pgprot_t prot = _nvmap_flag_to_pgprot(h->flags, pgprot_kernel);
-
- BUG_ON(!h->alloc);
-
- if (Offset+Size > h->size)
- return NvError_BadParameter;
-
- if (!h->kern_map && h->heap_pgalloc) {
- BUG_ON(h->size & ~PAGE_MASK);
- h->kern_map = vm_map_ram(h->pgalloc.pages,
- h->size>>PAGE_SHIFT, -1, prot);
- } else if (!h->kern_map) {
- unsigned int size;
- unsigned long addr;
-
- addr = h->carveout.base;
- size = h->size + (addr & ~PAGE_MASK);
- addr &= PAGE_MASK;
- size = (size + PAGE_SIZE - 1) & PAGE_MASK;
-
- h->kern_map = ioremap_wc(addr, size);
- if (h->kern_map) {
- addr = h->carveout.base - addr;
- h->kern_map += addr;
- }
- }
-
- if (h->kern_map) {
- *pVirtAddr = (h->kern_map + Offset);
- return NvSuccess;
- }
-
- return NvError_InsufficientMemory;
-}
-
-void NvRmMemUnmap(NvRmMemHandle hMem, void *pVirtAddr, NvU32 Size)
-{
- return;
-}
-
-NvU32 NvRmMemGetId(NvRmMemHandle hMem)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- if (!h->owner) h->global = true;
- return (NvU32)h;
-}
-
-NvError NvRmMemHandleFromId(NvU32 id, NvRmMemHandle *hMem)
-{
- struct nvmap_handle_ref *r;
-
- int err = _nvmap_do_create(&nvmap_context.init_data,
- NVMEM_IOC_FROM_ID, id, true, &r);
-
- if (err || !r) return NvError_NotInitialized;
-
- *hMem = (NvRmMemHandle)r->h;
- return NvSuccess;
-}
-
-NvError NvRmMemHandleClaimPreservedHandle(NvRmDeviceHandle hRm,
- NvU32 Key, NvRmMemHandle *hMem)
-{
- struct nvmap_handle_ref *r;
-
- int err = _nvmap_do_create(&nvmap_context.init_data,
- NVMEM_IOC_CLAIM, (unsigned long)Key, true, &r);
-
- if (err || !r) return NvError_NotInitialized;
-
- *hMem = (NvRmMemHandle)r->h;
- return NvSuccess;
-}
-
-NvError NvRmMemHandleCreate(NvRmDeviceHandle hRm,
- NvRmMemHandle *hMem, NvU32 Size)
-{
- struct nvmap_handle_ref *r;
- int err = _nvmap_do_create(&nvmap_context.init_data,
- NVMEM_IOC_CREATE, (unsigned long)Size, true, &r);
-
- if (err || !r) return NvError_InsufficientMemory;
- *hMem = (NvRmMemHandle)r->h;
- return NvSuccess;
-}
-
-NvError NvRmMemAlloc(NvRmMemHandle hMem, const NvRmHeap *Heaps,
- NvU32 NumHeaps, NvU32 Alignment, NvOsMemAttribute Coherency)
-{
- unsigned int flags = pgprot_kernel;
- int err = -ENOMEM;
-
- BUG_ON(Alignment & (Alignment-1));
-
- if (Coherency == NvOsMemAttribute_WriteBack)
- flags = NVMEM_HANDLE_INNER_CACHEABLE;
- else
- flags = NVMEM_HANDLE_WRITE_COMBINE;
-
- if (!NumHeaps || !Heaps) {
- err = _nvmap_do_alloc(&nvmap_context.init_data,
- (unsigned long)hMem, NVMAP_KERNEL_DEFAULT_HEAPS,
- (size_t)Alignment, flags);
- }
- else {
- unsigned int i;
- for (i = 0; i < NumHeaps; i++) {
- unsigned int heap;
- switch (Heaps[i]) {
- case NvRmHeap_GART:
- heap = NVMEM_HEAP_IOVMM;
- break;
- case NvRmHeap_External:
- heap = NVMEM_HEAP_SYSMEM;
- break;
- case NvRmHeap_ExternalCarveOut:
- heap = NVMEM_HEAP_CARVEOUT_GENERIC;
- break;
- case NvRmHeap_IRam:
- heap = NVMEM_HEAP_CARVEOUT_IRAM;
- break;
- default:
- heap = 0;
- break;
- }
- if (heap) {
- err = _nvmap_do_alloc(&nvmap_context.init_data,
- (unsigned long)hMem, heap,
- (size_t)Alignment, flags);
- if (!err) break;
- }
- }
- }
-
- return (err ? NvError_InsufficientMemory : NvSuccess);
-}
-
-void NvRmMemReadStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 SrcStride,
- void *pDst, NvU32 DstStride, NvU32 ElementSize, NvU32 Count)
-{
- ssize_t bytes = 0;
-
- bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, true,
- false, Offset, (unsigned long)pDst, SrcStride,
- DstStride, ElementSize, Count);
-
- BUG_ON(bytes != (ssize_t)(Count*ElementSize));
-}
-
-void NvRmMemWriteStrided(NvRmMemHandle hMem, NvU32 Offset, NvU32 DstStride,
- const void *pSrc, NvU32 SrcStride, NvU32 ElementSize, NvU32 Count)
-{
- ssize_t bytes = 0;
-
- bytes = _nvmap_do_rw_handle((struct nvmap_handle *)hMem, false,
- false, Offset, (unsigned long)pSrc, DstStride,
- SrcStride, ElementSize, Count);
-
- BUG_ON(bytes != (ssize_t)(Count*ElementSize));
-}
-
-NvU32 NvRmMemGetSize(NvRmMemHandle hMem)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- return h->orig_size;
-}
-
-NvRmHeap NvRmMemGetHeapType(NvRmMemHandle hMem, NvU32 *BaseAddr)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- NvRmHeap heap;
-
- if (!h->alloc) {
- *BaseAddr = ~0ul;
- return (NvRmHeap)0;
- }
-
- if (h->heap_pgalloc && !h->pgalloc.contig)
- heap = NvRmHeap_GART;
- else if (h->heap_pgalloc)
- heap = NvRmHeap_External;
- else if ((h->carveout.base & 0xf0000000ul) == 0x40000000ul)
- heap = NvRmHeap_IRam;
- else
- heap = NvRmHeap_ExternalCarveOut;
-
- if (h->heap_pgalloc && h->pgalloc.contig)
- *BaseAddr = (NvU32)page_to_phys(h->pgalloc.pages[0]);
- else if (h->heap_pgalloc && atomic_add_return(0, &h->pin))
- *BaseAddr = h->pgalloc.area->iovm_start;
- else if (h->heap_pgalloc)
- *BaseAddr = ~0ul;
- else
- *BaseAddr = (NvU32)h->carveout.base;
-
- return heap;
-}
-
-void NvRmMemCacheMaint(NvRmMemHandle hMem, void *pMapping,
- NvU32 Size, NvBool Writeback, NvBool Inv)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- unsigned long start;
- unsigned int op;
-
- if (!h->kern_map || h->flags==NVMEM_HANDLE_UNCACHEABLE ||
- h->flags==NVMEM_HANDLE_WRITE_COMBINE) return;
-
- if (!Writeback && !Inv) return;
-
- if (Writeback && Inv) op = NVMEM_CACHE_OP_WB_INV;
- else if (Writeback) op = NVMEM_CACHE_OP_WB;
- else op = NVMEM_CACHE_OP_INV;
-
- start = (unsigned long)pMapping - (unsigned long)h->kern_map;
-
- _nvmap_do_cache_maint(h, start, start+Size, op, true);
- return;
-}
-
-NvU32 NvRmMemGetAlignment(NvRmMemHandle hMem)
-{
- struct nvmap_handle *h = (struct nvmap_handle *)hMem;
- return _nvmap_do_get_param(h, NVMEM_HANDLE_PARAM_ALIGNMENT);
-}
-
-NvError NvRmMemGetStat(NvRmMemStat Stat, NvS32 *Result)
-{
- unsigned long total_co = 0;
- unsigned long free_co = 0;
- unsigned long max_free = 0;
- struct nvmap_carveout_node *n;
-
- down_read(&nvmap_context.list_sem);
- list_for_each_entry(n, &nvmap_context.heaps, heap_list) {
-
- if (!(n->heap_bit & NVMEM_HEAP_CARVEOUT_GENERIC)) continue;
- total_co += _nvmap_carveout_blockstat(&n->carveout,
- CARVEOUT_STAT_TOTAL_SIZE);
- free_co += _nvmap_carveout_blockstat(&n->carveout,
- CARVEOUT_STAT_FREE_SIZE);
- max_free = max(max_free,
- _nvmap_carveout_blockstat(&n->carveout,
- CARVEOUT_STAT_LARGEST_FREE));
- }
- up_read(&nvmap_context.list_sem);
-
- if (Stat==NvRmMemStat_TotalCarveout) {
- *Result = (NvU32)total_co;
- return NvSuccess;
- } else if (Stat==NvRmMemStat_UsedCarveout) {
- *Result = (NvU32)total_co - (NvU32)free_co;
- return NvSuccess;
- } else if (Stat==NvRmMemStat_LargestFreeCarveoutBlock) {
- *Result = (NvU32)max_free;
- return NvSuccess;
- }
-
- return NvError_BadParameter;
-}
-
-NvU8 NvRmMemRd08(NvRmMemHandle hMem, NvU32 Offset)
-{
- NvU8 val;
- NvRmMemRead(hMem, Offset, &val, sizeof(val));
- return val;
-}
-
-NvU16 NvRmMemRd16(NvRmMemHandle hMem, NvU32 Offset)
-{
- NvU16 val;
- NvRmMemRead(hMem, Offset, &val, sizeof(val));
- return val;
-}
-
-NvU32 NvRmMemRd32(NvRmMemHandle hMem, NvU32 Offset)
-{
- NvU32 val;
- NvRmMemRead(hMem, Offset, &val, sizeof(val));
- return val;
-}
-
-void NvRmMemWr08(NvRmMemHandle hMem, NvU32 Offset, NvU8 Data)
-{
- NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
-}
-
-void NvRmMemWr16(NvRmMemHandle hMem, NvU32 Offset, NvU16 Data)
-{
- NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
-}
-
-void NvRmMemWr32(NvRmMemHandle hMem, NvU32 Offset, NvU32 Data)
-{
- NvRmMemWrite(hMem, Offset, &Data, sizeof(Data));
-}
-
-void NvRmMemRead(NvRmMemHandle hMem, NvU32 Offset, void *pDst, NvU32 Size)
-{
- NvRmMemReadStrided(hMem, Offset, Size, pDst, Size, Size, 1);
-}
-
-void NvRmMemWrite(NvRmMemHandle hMem, NvU32 Offset,
- const void *pSrc, NvU32 Size)
-{
- NvRmMemWriteStrided(hMem, Offset, Size, pSrc, Size, Size, 1);
-}
-
-void NvRmMemMove(NvRmMemHandle dstHMem, NvU32 dstOffset,
- NvRmMemHandle srcHMem, NvU32 srcOffset, NvU32 Size)
-{
- while (Size--) {
- NvU8 tmp = NvRmMemRd08(srcHMem, srcOffset);
- NvRmMemWr08(dstHMem, dstOffset, tmp);
- dstOffset++;
- srcOffset++;
- }
-}
-
-NvU32 NvRmMemGetCacheLineSize(void)
-{
- return 32;
-}
-
-void *NvRmHostAlloc(size_t size)
-{
- return NvOsAlloc(size);
-}
-
-void NvRmHostFree(void *ptr)
-{
- NvOsFree(ptr);
-}
-
-NvError NvRmMemMapIntoCallerPtr(NvRmMemHandle hMem, void *pCallerPtr,
- NvU32 Offset, NvU32 Size)
-{
- return NvError_NotSupported;
-}
-
-NvError NvRmMemHandlePreserveHandle(NvRmMemHandle hMem, NvU32 *pKey)
-{
- return NvError_NotSupported;
-}
-
-#endif
-
-static u32 nvmap_get_physaddr(struct nvmap_handle *h)
-{
- u32 addr;
-
- if (h->heap_pgalloc && h->pgalloc.contig) {
- addr = page_to_phys(h->pgalloc.pages[0]);
- } else if (h->heap_pgalloc) {
- BUG_ON(!h->pgalloc.area);
- addr = h->pgalloc.area->iovm_start;
- } else {
- addr = h->carveout.base;
- }
-
- return addr;
-}
-
-struct nvmap_handle *nvmap_alloc(
- size_t size, size_t align,
- unsigned int flags, void **map)
-{
- struct nvmap_handle_ref *r = NULL;
- struct nvmap_handle *h;
- int err;
-
- err = _nvmap_do_create(&nvmap_context.init_data,
- NVMEM_IOC_CREATE, (unsigned long)size, true, &r);
- if (err || !r)
- return ERR_PTR(err);
- h = r->h;
-
- err = _nvmap_do_alloc(&nvmap_context.init_data,
- (unsigned long)h, NVMAP_KERNEL_DEFAULT_HEAPS,
- align, flags);
- if (err) {
- _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
- return ERR_PTR(err);
- }
-
- if (!map)
- return h;
-
- if (h->heap_pgalloc) {
- *map = vm_map_ram(h->pgalloc.pages, h->size >> PAGE_SHIFT, -1,
- _nvmap_flag_to_pgprot(h->flags, pgprot_kernel));
- } else {
- size_t mapaddr = h->carveout.base;
- size_t mapsize = h->size;
-
- mapsize += (mapaddr & ~PAGE_MASK);
- mapaddr &= PAGE_MASK;
- mapsize = (mapsize + PAGE_SIZE - 1) & PAGE_MASK;
-
- /* TODO: [ahatala 2010-06-21] honor coherency flag? */
- *map = ioremap_wc(mapaddr, mapsize);
- if (*map)
- *map += (h->carveout.base - mapaddr);
- }
- if (!*map) {
- _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
- return ERR_PTR(-ENOMEM);
- }
- /* TODO: [ahatala 2010-06-22] get rid of kern_map */
- h->kern_map = *map;
- return h;
-}
-
-void nvmap_free(struct nvmap_handle *h, void *map)
-{
- if (map) {
- BUG_ON(h->kern_map != map);
-
- if (h->heap_pgalloc) {
- vm_unmap_ram(map, h->size >> PAGE_SHIFT);
- } else {
- unsigned long addr = (unsigned long)map;
- addr &= ~PAGE_MASK;
- iounmap((void *)addr);
- }
- h->kern_map = NULL;
- }
- _nvmap_do_free(&nvmap_context.init_data, (unsigned long)h);
-}
-
-u32 nvmap_pin_single(struct nvmap_handle *h)
-{
- int ret;
- do {
- ret = _nvmap_handle_pin_fast(1, &h);
- if (ret) {
- pr_err("%s: failed to pin handle\n", __func__);
- dump_stack();
- }
- } while (ret);
-
- return nvmap_get_physaddr(h);
-}
-
-int nvmap_pin_array(struct file *filp,
- struct nvmap_pinarray_elem *arr, int num_elems,
- struct nvmap_handle **unique_arr, int *num_unique, bool wait)
-{
- struct nvmap_pinarray_elem *elem;
- struct nvmap_file_priv *priv = filp->private_data;
- int i, unique_idx = 0;
- unsigned long pfn = 0;
- void *pteaddr = NULL;
- int ret = 0;
-
- mutex_lock(&nvmap_pin_lock);
-
- /* find unique handles, pin them and collect into unpin array */
- for (elem = arr, i = num_elems; i && !ret; i--, elem++) {
- struct nvmap_handle *to_pin = elem->pin_mem;
- if (to_pin->poison != NVDA_POISON) {
- pr_err("%s: handle is poisoned\n", __func__);
- ret = -EFAULT;
- }
- else if (!(to_pin->flags & NVMEM_HANDLE_VISITED)) {
- if (!priv->su && !to_pin->global) {
- struct nvmap_handle_ref *r;
- spin_lock(&priv->ref_lock);
- r = _nvmap_ref_lookup_locked(priv,
- (unsigned long)to_pin);
- spin_unlock(&priv->ref_lock);
- if (!r) {
- pr_err("%s: handle access failure\n", __func__);
- ret = -EPERM;
- break;
- }
- }
- if (wait) {
- ret = wait_event_interruptible(
- nvmap_pin_wait,
- !_nvmap_handle_pin_locked(to_pin));
- }
- else
- ret = _nvmap_handle_pin_locked(to_pin);
- if (!ret) {
- to_pin->flags |= NVMEM_HANDLE_VISITED;
- unique_arr[unique_idx++] = to_pin;
- }
- }
- }
-
- /* clear visited flags before releasing mutex */
- i = unique_idx;
- while (i--)
- unique_arr[i]->flags &= ~NVMEM_HANDLE_VISITED;
-
- mutex_unlock(&nvmap_pin_lock);
-
- if (!ret)
- ret = nvmap_map_pte(pfn, pgprot_kernel, &pteaddr);
-
- if (unlikely(ret)) {
- int do_wake = 0;
- i = unique_idx;
- while (i--)
- do_wake |= _nvmap_handle_unpin(unique_arr[i]);
- if (do_wake)
- wake_up(&nvmap_pin_wait);
- return ret;
- }
-
- for (elem = arr, i = num_elems; i; i--, elem++) {
- struct nvmap_handle *h_patch = elem->patch_mem;
- struct nvmap_handle *h_pin = elem->pin_mem;
- struct page *page = NULL;
- u32* patch_addr;
-
- /* commit iovmm mapping */
- if (h_pin->heap_pgalloc && h_pin->pgalloc.dirty)
- _nvmap_handle_iovmm_map(h_pin);
-
- /* patch */
- if (h_patch->kern_map) {
- patch_addr = (u32*)((unsigned long)h_patch->kern_map +
- elem->patch_offset);
- } else {
- unsigned long phys, new_pfn;
- if (h_patch->heap_pgalloc) {
- page = h_patch->pgalloc.pages[elem->patch_offset >> PAGE_SHIFT];
- get_page(page);
- phys = page_to_phys(page) + (elem->patch_offset & ~PAGE_MASK);
- } else {
- phys = h_patch->carveout.base + elem->patch_offset;
- }
- new_pfn = __phys_to_pfn(phys);
- if (new_pfn != pfn) {
- _nvmap_set_pte_at((unsigned long)pteaddr, new_pfn,
- _nvmap_flag_to_pgprot(h_patch->flags, pgprot_kernel));
- pfn = new_pfn;
- }
- patch_addr = (u32*)((unsigned long)pteaddr + (phys & ~PAGE_MASK));
- }
-
- *patch_addr = nvmap_get_physaddr(h_pin) + elem->pin_offset;
-
- if (page)
- put_page(page);
- }
- nvmap_unmap_pte(pteaddr);
- *num_unique = unique_idx;
- return 0;
-}
-
-void nvmap_unpin(struct nvmap_handle **h, int num_handles)
-{
- int do_wake = 0;
-
- while (num_handles--) {
- BUG_ON(!*h);
- do_wake |= _nvmap_handle_unpin(*h);
- h++;
- }
-
- if (do_wake) wake_up(&nvmap_pin_wait);
-}
-
-int nvmap_validate_file(struct file *f)
-{
- return (f->f_op==&knvmap_fops || f->f_op==&nvmap_fops) ? 0 : -EFAULT;
-}
ccflags-y += -Iarch/arm/mach-tegra/nv/include
obj-y += nvos.o
-obj-y += nvos_page.o
obj-y += nvustring.o
-obj-y += nvos_exports.o
+++ /dev/null
-/*
- * Copyright (c) 2009 NVIDIA Corporation.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- *
- * Redistributions of source code must retain the above copyright notice,
- * this list of conditions and the following disclaimer.
- *
- * Redistributions in binary form must reproduce the above copyright notice,
- * this list of conditions and the following disclaimer in the documentation
- * and/or other materials provided with the distribution.
- *
- * Neither the name of the NVIDIA Corporation nor the names of its contributors
- * may be used to endorse or promote products derived from this software
- * without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include "nvos.h"
-#include "nvutil.h"
-#include "nvassert.h"
-#if NVOS_IS_LINUX_KERNEL
-#include <linux/module.h>
-EXPORT_SYMBOL(NvOsBreakPoint);
-EXPORT_SYMBOL(NvOsFprintf);
-EXPORT_SYMBOL(NvOsSnprintf);
-EXPORT_SYMBOL(NvOsVfprintf);
-EXPORT_SYMBOL(NvOsVsnprintf);
-EXPORT_SYMBOL(NvOsDebugPrintf);
-EXPORT_SYMBOL(NvOsDebugVprintf);
-EXPORT_SYMBOL(NvOsDebugNprintf);
-EXPORT_SYMBOL(NvOsStrncpy);
-EXPORT_SYMBOL(NvOsStrlen);
-EXPORT_SYMBOL(NvOsStrcmp);
-EXPORT_SYMBOL(NvOsStrncmp);
-EXPORT_SYMBOL(NvOsStrGetSystemCodePage);
-EXPORT_SYMBOL(NvOsMemcpy);
-EXPORT_SYMBOL(NvOsMemcmp);
-EXPORT_SYMBOL(NvOsMemset);
-EXPORT_SYMBOL(NvOsMemmove);
-EXPORT_SYMBOL(NvOsCopyIn);
-EXPORT_SYMBOL(NvOsCopyOut);
-EXPORT_SYMBOL(NvOsFopen);
-EXPORT_SYMBOL(NvOsFclose);
-EXPORT_SYMBOL(NvOsFwrite);
-EXPORT_SYMBOL(NvOsFread);
-EXPORT_SYMBOL(NvOsFreadTimeout);
-EXPORT_SYMBOL(NvOsFgetc);
-EXPORT_SYMBOL(NvOsFseek);
-EXPORT_SYMBOL(NvOsFtell);
-EXPORT_SYMBOL(NvOsStat);
-EXPORT_SYMBOL(NvOsFstat);
-EXPORT_SYMBOL(NvOsFflush);
-EXPORT_SYMBOL(NvOsFsync);
-EXPORT_SYMBOL(NvOsIoctl);
-EXPORT_SYMBOL(NvOsOpendir);
-EXPORT_SYMBOL(NvOsReaddir);
-EXPORT_SYMBOL(NvOsClosedir);
-EXPORT_SYMBOL(NvOsSetFileHooks);
-EXPORT_SYMBOL(NvOsGetConfigU32);
-EXPORT_SYMBOL(NvOsGetConfigString);
-EXPORT_SYMBOL(NvOsAlloc);
-EXPORT_SYMBOL(NvOsRealloc);
-EXPORT_SYMBOL(NvOsFree);
-#if NV_DEBUG
-EXPORT_SYMBOL(NvOsAllocLeak);
-EXPORT_SYMBOL(NvOsReallocLeak);
-EXPORT_SYMBOL(NvOsFreeLeak);
-#endif
-EXPORT_SYMBOL(NvOsExecAlloc);
-EXPORT_SYMBOL(NvOsSharedMemAlloc);
-EXPORT_SYMBOL(NvOsSharedMemMap);
-EXPORT_SYMBOL(NvOsSharedMemUnmap);
-EXPORT_SYMBOL(NvOsSharedMemFree);
-EXPORT_SYMBOL(NvOsPhysicalMemMap);
-EXPORT_SYMBOL(NvOsPhysicalMemMapIntoCaller);
-EXPORT_SYMBOL(NvOsPhysicalMemUnmap);
-EXPORT_SYMBOL(NvOsPageAlloc);
-EXPORT_SYMBOL(NvOsPageFree);
-EXPORT_SYMBOL(NvOsPageLock);
-EXPORT_SYMBOL(NvOsPageMap);
-EXPORT_SYMBOL(NvOsPageMapIntoPtr);
-EXPORT_SYMBOL(NvOsPageUnmap);
-EXPORT_SYMBOL(NvOsPageAddress);
-EXPORT_SYMBOL(NvOsLibraryLoad);
-EXPORT_SYMBOL(NvOsLibraryGetSymbol);
-EXPORT_SYMBOL(NvOsLibraryUnload);
-EXPORT_SYMBOL(NvOsSleepMS);
-EXPORT_SYMBOL(NvOsWaitUS);
-EXPORT_SYMBOL(NvOsMutexCreate);
-EXPORT_SYMBOL(NvOsTraceLogPrintf);
-EXPORT_SYMBOL(NvOsTraceLogStart);
-EXPORT_SYMBOL(NvOsTraceLogEnd);
-EXPORT_SYMBOL(NvOsMutexLock);
-EXPORT_SYMBOL(NvOsMutexUnlock);
-EXPORT_SYMBOL(NvOsMutexDestroy);
-EXPORT_SYMBOL(NvOsIntrMutexCreate);
-EXPORT_SYMBOL(NvOsIntrMutexLock);
-EXPORT_SYMBOL(NvOsIntrMutexUnlock);
-EXPORT_SYMBOL(NvOsIntrMutexDestroy);
-EXPORT_SYMBOL(NvOsSpinMutexCreate);
-EXPORT_SYMBOL(NvOsSpinMutexLock);
-EXPORT_SYMBOL(NvOsSpinMutexUnlock);
-EXPORT_SYMBOL(NvOsSpinMutexDestroy);
-EXPORT_SYMBOL(NvOsSemaphoreCreate);
-EXPORT_SYMBOL(NvOsSemaphoreClone);
-EXPORT_SYMBOL(NvOsSemaphoreUnmarshal);
-EXPORT_SYMBOL(NvOsSemaphoreWait);
-EXPORT_SYMBOL(NvOsSemaphoreWaitTimeout);
-EXPORT_SYMBOL(NvOsSemaphoreSignal);
-EXPORT_SYMBOL(NvOsSemaphoreDestroy);
-EXPORT_SYMBOL(NvOsThreadCreate);
-EXPORT_SYMBOL(NvOsInterruptPriorityThreadCreate);
-EXPORT_SYMBOL(NvOsThreadSetLowPriority);
-EXPORT_SYMBOL(NvOsThreadJoin);
-EXPORT_SYMBOL(NvOsThreadYield);
-EXPORT_SYMBOL(NvOsGetTimeMS);
-EXPORT_SYMBOL(NvOsGetTimeUS);
-EXPORT_SYMBOL(NvOsInstrCacheInvalidate);
-EXPORT_SYMBOL(NvOsInstrCacheInvalidateRange);
-EXPORT_SYMBOL(NvOsFlushWriteCombineBuffer);
-EXPORT_SYMBOL(NvOsInterruptRegister);
-EXPORT_SYMBOL(NvOsInterruptUnregister);
-EXPORT_SYMBOL(NvOsInterruptEnable);
-EXPORT_SYMBOL(NvOsInterruptDone);
-EXPORT_SYMBOL(NvOsInterruptMask);
-EXPORT_SYMBOL(NvOsProfileApertureSizes);
-EXPORT_SYMBOL(NvOsProfileStart);
-EXPORT_SYMBOL(NvOsProfileStop);
-EXPORT_SYMBOL(NvOsProfileWrite);
-EXPORT_SYMBOL(NvOsBootArgSet);
-EXPORT_SYMBOL(NvOsBootArgGet);
-EXPORT_SYMBOL(NvOsGetOsInformation);
-EXPORT_SYMBOL(NvOsThreadMode);
-EXPORT_SYMBOL(NvOsAtomicCompareExchange32);
-EXPORT_SYMBOL(NvOsAtomicExchange32);
-EXPORT_SYMBOL(NvOsAtomicExchangeAdd32);
-#if (NVOS_TRACE || NV_DEBUG)
-EXPORT_SYMBOL(NvOsSetResourceAllocFileLine);
-#endif
-EXPORT_SYMBOL(NvOsTlsAlloc);
-EXPORT_SYMBOL(NvOsTlsFree);
-EXPORT_SYMBOL(NvOsTlsGet);
-EXPORT_SYMBOL(NvOsTlsSet);
-EXPORT_SYMBOL(NvULowestBitSet);
-EXPORT_SYMBOL(NvOsGetProcessInfo);
-#endif /* NVOS_IS_LINUX_KERNEL */
+++ /dev/null
-/*
- * arch/arm/mach-tegra/nvos/nvos_page.c
- *
- * Implementation of NvOsPage* APIs using the Linux page allocator
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/vmalloc.h>
-#include <linux/highmem.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <asm/pgtable.h>
-#include <asm/pgalloc.h>
-#include "nvcommon.h"
-#include "nvos.h"
-#include <linux/slab.h>
-
-#if NVOS_TRACE || NV_DEBUG
-#undef NvOsPageMap
-#undef NvOsPageGetPage
-#undef NvOsPageAddress
-#undef NvOsPageUnmap
-#undef NvOsPageAlloc
-#undef NvOsPageFree
-#undef NvOsPageLock
-#undef NvOsPageMapIntoPtr
-#endif
-
-#define L_PTE_MT_INNER_WB (0x05 << 2) /* 0101 (armv6, armv7) */
-#define pgprot_inner_writeback(prot) \
- __pgprot((pgprot_val(prot) & ~L_PTE_MT_MASK) | L_PTE_MT_INNER_WB)
-
-#define nv_gfp_pool (GFP_KERNEL | __GFP_HIGHMEM | __GFP_NOWARN)
-
-struct nvos_pagemap {
- void *addr;
- unsigned int nr_pages;
- struct page *pages[1];
-};
-
-static void pagemap_flush_page(struct page *page)
-{
-#ifdef CONFIG_HIGHMEM
- void *km = NULL;
-
- if (!page_address(page)) {
- km = kmap(page);
- if (!km) {
- pr_err("unable to map high page\n");
- return;
- }
- }
-#endif
-
- flush_dcache_page(page_address(page));
- outer_flush_range(page_to_phys(page), page_to_phys(page)+PAGE_SIZE);
- dsb();
-
-#ifdef CONFIG_HIGHMEM
- if (km) kunmap(page);
-#endif
-}
-
-static void nv_free_pages(struct nvos_pagemap *pm)
-{
- unsigned int i;
-
- if (pm->addr) vm_unmap_ram(pm->addr, pm->nr_pages);
-
- for (i=0; i<pm->nr_pages; i++) {
- ClearPageReserved(pm->pages[i]);
- __free_page(pm->pages[i]);
- }
- kfree(pm);
-}
-
-static struct nvos_pagemap *nv_alloc_pages(unsigned int count,
- pgprot_t prot, bool contiguous, int create_mapping)
-{
- struct nvos_pagemap *pm;
- size_t size;
- unsigned int i = 0;
-
- size = sizeof(struct nvos_pagemap) + sizeof(struct page *)*(count-1);
- pm = kzalloc(size, GFP_KERNEL);
- if (!pm)
- return NULL;
-
- if (count==1) contiguous = true;
-
- if (contiguous) {
- size_t order = get_order(count << PAGE_SHIFT);
- struct page *compound_page;
- compound_page = alloc_pages(nv_gfp_pool, order);
- if (!compound_page) goto fail;
-
- split_page(compound_page, order);
- for (i=0; i<count; i++)
- pm->pages[i] = nth_page(compound_page, i);
-
- for ( ; i < (1<<order); i++)
- __free_page(nth_page(compound_page, i));
- i = count;
- } else {
- for (i=0; i<count; i++) {
- pm->pages[i] = alloc_page(nv_gfp_pool);
- if (!pm->pages[i]) goto fail;
- }
- }
-
- if (create_mapping) {
- /* since the linear kernel mapping uses sections and super-
- * sections rather than PTEs, it's not possible to overwrite
- * it with the correct caching attributes, so use a local
- * mapping */
- pm->addr = vm_map_ram(pm->pages, count, -1, prot);
- if (!pm->addr) {
- pr_err("nv_alloc_pages fail to vmap contiguous area\n");
- goto fail;
- }
- }
-
- pm->nr_pages = count;
- for (i=0; i<count; i++) {
- SetPageReserved(pm->pages[i]);
- pagemap_flush_page(pm->pages[i]);
- }
-
- return pm;
-
-fail:
- while (i) __free_page(pm->pages[--i]);
- if (pm) kfree(pm);
- return NULL;
-}
-
-NvError NvOsPageMap(NvOsPageAllocHandle desc, size_t offs,
- size_t size, void **ptr)
-{
- struct nvos_pagemap *pm = (struct nvos_pagemap *)desc;
- if (!desc || !ptr || !size)
- return NvError_BadParameter;
-
- if (pm->addr) *ptr = (void*)((unsigned long)pm->addr + offs);
- else *ptr = NULL;
-
- return (*ptr) ? NvSuccess : NvError_MemoryMapFailed;
-}
-
-struct page *NvOsPageGetPage(NvOsPageAllocHandle desc, size_t offs)
-{
- struct nvos_pagemap *pm = (struct nvos_pagemap *)desc;
- if (!pm) return NULL;
-
- offs >>= PAGE_SHIFT;
- return (likely(offs<pm->nr_pages)) ? pm->pages[offs] : NULL;
-}
-
-NvOsPhysAddr NvOsPageAddress(NvOsPageAllocHandle desc, size_t offs)
-{
- struct nvos_pagemap *pm = (struct nvos_pagemap *)desc;
- size_t index;
-
- if (unlikely(!pm)) return (NvOsPhysAddr)0;
-
- index = offs >> PAGE_SHIFT;
- offs &= (PAGE_SIZE - 1);
-
- return (NvOsPhysAddr)(page_to_phys(pm->pages[index]) + offs);
-}
-
-
-void NvOsPageUnmap(NvOsPageAllocHandle desc, void *ptr, size_t size)
-{
- return;
-}
-
-NvError NvOsPageAlloc(size_t size, NvOsMemAttribute attrib,
- NvOsPageFlags flags, NvU32 protect, NvOsPageAllocHandle *desc)
-{
- struct nvos_pagemap *pm;
- pgprot_t prot = pgprot_kernel;
- size += PAGE_SIZE-1;
- size >>= PAGE_SHIFT;
-
- /* writeback is implemented as inner-cacheable only, since these
- * allocators are only used to allocate buffers for DMA-driven
- * clients, and the cost of L2 maintenance makes outer cacheability
- * a net performance loss more often than not */
- if (attrib == NvOsMemAttribute_WriteBack)
- prot = pgprot_inner_writeback(prot);
- else
- prot = pgprot_writecombine(prot);
-
- pm = nv_alloc_pages(size, prot, (flags==NvOsPageFlags_Contiguous), 1);
-
- if (!pm) return NvError_InsufficientMemory;
-
- *desc = (NvOsPageAllocHandle)pm;
- return NvSuccess;
-}
-
-void NvOsPageFree(NvOsPageAllocHandle desc)
-{
- struct nvos_pagemap *pm = (struct nvos_pagemap *)desc;
-
- if (pm) nv_free_pages(pm);
-}
-
-
-NvError NvOsPageLock(void *ptr, size_t size, NvU32 protect,
- NvOsPageAllocHandle *descriptor)
-{
- return NvError_NotImplemented;
-}
-
-NvError NvOsPageMapIntoPtr(NvOsPageAllocHandle desc, void *ptr,
- size_t offset, size_t size)
-{
- return NvError_NotImplemented;
-}
*/
ldr r5, =0xff0a81a8 @ PRRR
ldr r6, =0x40e040e0 @ NMRR
- ldr r5, =0xff0a89a8
- ldr r6, =0xc0e0c4e0
-
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
int i;
char buff[256];
- tegra_dc_io_start(dc);
-
DUMP_REG(DC_CMD_DISPLAY_COMMAND_OPTION0);
DUMP_REG(DC_CMD_DISPLAY_COMMAND);
DUMP_REG(DC_CMD_SIGNAL_RAISE);
DUMP_REG(DC_WINBUF_ADDR_H_OFFSET);
DUMP_REG(DC_WINBUF_ADDR_V_OFFSET);
}
-
- tegra_dc_io_end(dc);
}
#undef DUMP_REG
snprintf(name, sizeof(name), "tegra_dc%d_regs", dc->ndev->id);
(void) debugfs_create_file(name, S_IRUGO, NULL, dc, &dbg_fops);
+
}
#else
static void tegra_dc_dbg_add(struct tegra_dc *dc) {}
}
EXPORT_SYMBOL(tegra_dc_get_window);
-
static int get_topmost_window(u32 *depths, unsigned long *wins)
{
int idx, best = -1;
}
tegra_dc_writel(dc, update_mask, DC_CMD_STATE_CONTROL);
+
mutex_unlock(&dc->lock);
return 0;
}
EXPORT_SYMBOL(tegra_dc_update_windows);
-u32 tegra_dc_get_syncpt_id(struct tegra_dc *dc)
-{
- return dc->syncpt_id;
-}
-EXPORT_SYMBOL(tegra_dc_get_syncpt_id);
-
-u32 tegra_dc_incr_syncpt_max(struct tegra_dc *dc)
-{
- u32 max;
-
- mutex_lock(&dc->lock);
- max = nvhost_syncpt_incr_max(&dc->ndev->host->syncpt, dc->syncpt_id, 1);
- dc->syncpt_max = max;
- mutex_unlock(&dc->lock);
-
- return max;
-}
-
-void tegra_dc_incr_syncpt_min(struct tegra_dc *dc, u32 val)
-{
- mutex_lock(&dc->lock);
- while (dc->syncpt_min < val) {
- dc->syncpt_min++;
- nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
- }
- mutex_unlock(&dc->lock);
-}
-
static bool tegra_dc_windows_are_clean(struct tegra_dc_win *windows[],
int n)
{
static void tegra_dc_init(struct tegra_dc *dc)
{
- u32 disp_syncpt;
- u32 vblank_syncpt;
-
tegra_dc_writel(dc, 0x00000100, DC_CMD_GENERAL_INCR_SYNCPT_CNTRL);
- if (dc->ndev->id == 0) {
- disp_syncpt = NVSYNCPT_DISP0;
- vblank_syncpt = NVSYNCPT_VBLANK0;
- } else if (dc->ndev->id == 1) {
- disp_syncpt = NVSYNCPT_DISP1;
- vblank_syncpt = NVSYNCPT_VBLANK1;
- }
- tegra_dc_writel(dc, 0x00000100 | vblank_syncpt, DC_CMD_CONT_SYNCPT_VSYNC);
+ if (dc->ndev->id == 0)
+ tegra_dc_writel(dc, 0x0000011a, DC_CMD_CONT_SYNCPT_VSYNC);
+ else
+ tegra_dc_writel(dc, 0x0000011b, DC_CMD_CONT_SYNCPT_VSYNC);
tegra_dc_writel(dc, 0x00004700, DC_CMD_INT_TYPE);
tegra_dc_writel(dc, 0x0001c700, DC_CMD_INT_POLARITY);
tegra_dc_writel(dc, 0x00000020, DC_DISP_MEM_HIGH_PRIORITY);
tegra_dc_set_color_control(dc);
- dc->syncpt_id = disp_syncpt;
-
- dc->syncpt_min = dc->syncpt_max =
- nvhost_syncpt_read(&dc->ndev->host->syncpt, disp_syncpt);
-
if (dc->mode.pclk)
tegra_dc_program_mode(dc, &dc->mode);
}
static void _tegra_dc_enable(struct tegra_dc *dc)
{
- tegra_dc_io_start(dc);
-
if (dc->out && dc->out->enable)
dc->out->enable();
tegra_dc_setup_clk(dc, dc->clk);
+ clk_enable(dc->host1x_clk);
clk_enable(dc->clk);
tegra_periph_reset_deassert(dc->clk);
enable_irq(dc->irq);
disable_irq(dc->irq);
tegra_periph_reset_assert(dc->clk);
clk_disable(dc->clk);
+ clk_disable(dc->host1x_clk);
if (dc->out && dc->out->disable)
dc->out->disable();
-
- /* flush any pending syncpt waits */
- while (dc->syncpt_min < dc->syncpt_max) {
- dc->syncpt_min++;
- nvhost_syncpt_cpu_incr(&dc->ndev->host->syncpt, dc->syncpt_id);
- }
-
- tegra_dc_io_end(dc);
}
{
struct tegra_dc *dc;
struct clk *clk;
+ struct clk *host1x_clk;
struct resource *res;
struct resource *base_res;
struct resource *fb_mem = NULL;
fb_mem = nvhost_get_resource_byname(ndev, IORESOURCE_MEM, "fbmem");
+ host1x_clk = clk_get(&ndev->dev, "host1x");
+ if (IS_ERR_OR_NULL(host1x_clk)) {
+ dev_err(&ndev->dev, "can't get host1x clock\n");
+ ret = -ENOENT;
+ goto err_iounmap_reg;
+ }
+
clk = clk_get(&ndev->dev, NULL);
if (IS_ERR_OR_NULL(clk)) {
dev_err(&ndev->dev, "can't get clock\n");
ret = -ENOENT;
- goto err_iounmap_reg;
+
+ goto err_put_host1x_clk;
}
dc->clk = clk;
+ dc->host1x_clk = host1x_clk;
dc->base_res = base_res;
dc->base = base;
dc->irq = irq;
free_irq(irq, dc);
err_put_clk:
clk_put(clk);
+err_put_host1x_clk:
+ clk_put(host1x_clk);
err_iounmap_reg:
iounmap(base);
if (fb_mem)
free_irq(dc->irq, dc);
clk_put(dc->clk);
+ clk_put(dc->host1x_clk);
iounmap(dc->base);
if (dc->fb_mem)
release_resource(dc->base_res);
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/wait.h>
-#include "../host/dev.h"
struct tegra_dc;
int irq;
struct clk *clk;
+ struct clk *host1x_clk;
bool enabled;
struct resource *fb_mem;
struct tegra_fb_info *fb;
-
- u32 syncpt_id;
- u32 syncpt_min;
- u32 syncpt_max;
};
-static inline void tegra_dc_io_start(struct tegra_dc *dc)
-{
- nvhost_module_busy(&dc->ndev->host->mod);
-}
-
-static inline void tegra_dc_io_end(struct tegra_dc *dc)
-{
- nvhost_module_idle(&dc->ndev->host->mod);
-}
-
static inline unsigned long tegra_dc_readl(struct tegra_dc *dc,
unsigned long reg)
{
- BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
return readl(dc->base + reg * 4);
}
static inline void tegra_dc_writel(struct tegra_dc *dc, unsigned long val,
unsigned long reg)
{
- BUG_ON(!nvhost_module_powered(&dc->ndev->host->mod));
writel(val, dc->base + reg * 4);
}
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
-#include <linux/uaccess.h>
#include <linux/slab.h>
-#include <linux/file.h>
#include <linux/nvhost.h>
-#include <linux/nvmap.h>
-#include <linux/workqueue.h>
#include <asm/atomic.h>
-#include <video/tegrafb.h>
-
#include <mach/dc.h>
#include <mach/fb.h>
-#include "host/dev.h"
-
struct tegra_fb_info {
struct tegra_dc_win *win;
struct nvhost_device *ndev;
int yres;
atomic_t in_use;
- struct file *nvmap_file;
-
- struct workqueue_struct *flip_wq;
-};
-
-struct tegra_fb_flip_data {
- struct work_struct work;
- struct tegra_fb_info *fb;
- struct tegra_fb_flip_args args;
- u32 syncpt_max;
};
/* palette array used by the fbcon */
if (atomic_xchg(&tegra_fb->in_use, 1))
return -EBUSY;
- tegra_fb->nvmap_file = NULL;
-
return 0;
}
{
struct tegra_fb_info *tegra_fb = info->par;
- if (tegra_fb->nvmap_file)
- fput(tegra_fb->nvmap_file);
-
- flush_workqueue(tegra_fb->flip_wq);
-
WARN_ON(!atomic_xchg(&tegra_fb->in_use, 0));
return 0;
return -EINVAL;
}
info->fix.line_length = var->xres * var->bits_per_pixel / 8;
- tegra_fb->win->stride = info->fix.line_length;
if (var->pixclock) {
struct tegra_dc_mode mode;
cfb_imageblit(info, image);
}
-/* TODO: implement ALLOC, FREE, BLANK ioctls */
-
-static int tegra_fb_set_nvmap_fd(struct tegra_fb_info *tegra_fb, int fd)
-{
- struct file *nvmap_file = NULL;
- int err;
-
- if (fd < 0)
- return -EINVAL;
-
- if (fd > 0) {
- nvmap_file = fget(fd);
- if (!nvmap_file)
- return -EINVAL;
-
- err = nvmap_validate_file(nvmap_file);
- if (err)
- goto err;
- }
-
- if (tegra_fb->nvmap_file)
- fput(tegra_fb->nvmap_file);
-
- tegra_fb->nvmap_file = nvmap_file;
-
- return 0;
-
-err:
- fput(nvmap_file);
- return err;
-}
-
-static void tegra_fb_set_windowattr(struct tegra_fb_info *tegra_fb,
- struct tegra_dc_win *win,
- struct tegra_fb_windowattr *attr)
-{
- if (!attr->buff_id) {
- win->flags = 0;
- return;
- }
- win->flags = TEGRA_WIN_FLAG_ENABLED;
- if (attr->blend == TEGRA_FB_WIN_BLEND_PREMULT)
- win->flags |= TEGRA_WIN_FLAG_BLEND_PREMULT;
- else if (attr->blend == TEGRA_FB_WIN_BLEND_COVERAGE)
- win->flags |= TEGRA_WIN_FLAG_BLEND_COVERAGE;
- win->fmt = attr->pixformat;
- win->x = attr->x;
- win->y = attr->y;
- win->w = attr->w;
- win->h = attr->h;
- win->out_x = attr->out_x;
- win->out_y = attr->out_y;
- win->out_w = attr->out_w;
- win->out_h = attr->out_h;
- win->z = attr->z;
- // STOPSHIP need to check perms on handle
- win->phys_addr = nvmap_pin_single((struct nvmap_handle *)attr->buff_id);
- win->phys_addr += attr->offset;
- win->stride = attr->stride;
- if ((s32)attr->pre_syncpt_id >= 0) {
- nvhost_syncpt_wait_timeout(&tegra_fb->ndev->host->syncpt,
- attr->pre_syncpt_id,
- attr->pre_syncpt_val,
- msecs_to_jiffies(500));
- }
-}
-
-static void tegra_fb_set_windowhandle(struct tegra_fb_info *tegra_fb,
- struct tegra_dc_win *win,
- unsigned long handle)
-{
- if (win->cur_handle)
- nvmap_unpin((struct nvmap_handle **)&win->cur_handle, 1);
- win->cur_handle = handle;
-}
-
-static void tegra_fb_flip_worker(struct work_struct *work)
-{
- struct tegra_fb_flip_data *data =
- container_of(work, struct tegra_fb_flip_data, work);
- struct tegra_fb_info *tegra_fb = data->fb;
- struct tegra_dc_win *win;
- struct tegra_dc_win *wins[TEGRA_FB_FLIP_N_WINDOWS];
- struct tegra_dc_win **w = wins;
- struct tegra_dc *dc = tegra_fb->win->dc;
- int i;
-
-
- for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
- int idx = data->args.win[i].index;
- win = tegra_dc_get_window(dc, idx);
- if (win) {
- tegra_fb_set_windowattr(tegra_fb, win,
- &data->args.win[i]);
- *w++ = win;
- } else if (idx != -1) {
- dev_warn(&tegra_fb->ndev->dev,
- "invalid window index %d on flip\n", idx);
- }
- }
-
- tegra_dc_update_windows(wins, w - wins);
-
- for (i = 0; i < TEGRA_FB_FLIP_N_WINDOWS; i++) {
- win = tegra_dc_get_window(dc, data->args.win[i].index);
- if (win)
- tegra_fb_set_windowhandle(tegra_fb, win,
- data->args.win[i].buff_id);
- }
-
- /* TODO: implement swapinterval here */
- tegra_dc_sync_windows(wins, w - wins);
-
- tegra_dc_incr_syncpt_min(tegra_fb->win->dc, data->syncpt_max);
-
- kfree(data);
-}
-
-
-static int tegra_fb_flip(struct tegra_fb_info *tegra_fb,
- struct tegra_fb_flip_args *args)
-{
- struct tegra_fb_flip_data *data;
- u32 syncpt_max;
-
- if (WARN_ON(!tegra_fb->nvmap_file))
- return -EFAULT;
-
- if (WARN_ON(!tegra_fb->ndev))
- return -EFAULT;
-
- data = kmalloc(sizeof(*data), GFP_KERNEL);
- if (data == NULL) {
- dev_err(&tegra_fb->ndev->dev,
- "can't allocate memory for flip\n");
- return -ENOMEM;
- }
-
- INIT_WORK(&data->work, tegra_fb_flip_worker);
- data->fb = tegra_fb;
- memcpy(&data->args, args, sizeof(data->args));
-
- syncpt_max = tegra_dc_incr_syncpt_max(tegra_fb->win->dc);
- data->syncpt_max = syncpt_max;
-
- queue_work(tegra_fb->flip_wq, &data->work);
-
- args->post_syncpt_val = syncpt_max;
- args->post_syncpt_id = tegra_dc_get_syncpt_id(tegra_fb->win->dc);
-
- return 0;
-}
-
-static int tegra_fb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg)
-{
- struct tegra_fb_info *tegra_fb = info->par;
- struct tegra_fb_flip_args flip_args;
- int fd;
- int ret;
-
- switch (cmd) {
- case FBIO_TEGRA_SET_NVMAP_FD:
- if (copy_from_user(&fd, (void __user *)arg, sizeof(fd)))
- return -EFAULT;
-
- return tegra_fb_set_nvmap_fd(tegra_fb, fd);
-
- case FBIO_TEGRA_FLIP:
- if (copy_from_user(&flip_args, (void __user *)arg, sizeof(flip_args)))
- return -EFAULT;
-
- ret = tegra_fb_flip(tegra_fb, &flip_args);
-
- if (copy_to_user((void __user *)arg, &flip_args, sizeof(flip_args)))
- return -EFAULT;
-
- return ret;
-
- default:
- return -ENOTTY;
- }
-
- return 0;
-}
-
static struct fb_ops tegra_fb_ops = {
.owner = THIS_MODULE,
.fb_open = tegra_fb_open,
.fb_fillrect = tegra_fb_fillrect,
.fb_copyarea = tegra_fb_copyarea,
.fb_imageblit = tegra_fb_imageblit,
- .fb_ioctl = tegra_fb_ioctl,
};
void tegra_fb_update_monspecs(struct tegra_fb_info *fb_info,
tegra_fb->yres = fb_data->yres;
atomic_set(&tegra_fb->in_use, 0);
- tegra_fb->flip_wq = create_singlethread_workqueue("tegra_flip");
- if (!tegra_fb->flip_wq) {
- ret = -ENOMEM;
- goto err_free;
- }
-
if (fb_mem) {
fb_size = resource_size(fb_mem);
fb_phys = fb_mem->start;
if (!fb_base) {
dev_err(&ndev->dev, "fb can't be mapped\n");
ret = -EBUSY;
- goto err_delete_wq;
+ goto err_free;
}
tegra_fb->valid = true;
}
err_iounmap_fb:
iounmap(fb_base);
-err_delete_wq:
-
err_free:
framebuffer_release(info);
err:
struct fb_info *info = fb_info->info;
unregister_framebuffer(info);
-
- flush_workqueue(fb_info->flip_wq);
- destroy_workqueue(fb_info->flip_wq);
-
iounmap(info->screen_base);
framebuffer_release(info);
}
nvhost-objs = \
- nvhost_acm.o \
- nvhost_syncpt.o \
- nvhost_cdma.o \
- nvhost_cpuaccess.o \
- nvhost_intr.o \
- nvhost_channel.o \
- nvhost_3dctx.o \
dev.o \
- bus.o \
- debug.o
+ bus.o
obj-$(CONFIG_TEGRA_GRHOST) += nvhost.o
+++ /dev/null
-/*
- * drivers/video/tegra/dc/dc.c
- *
- * Copyright (C) 2010 Google, Inc.
- * Author: Erik Gilling <konkers@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/debugfs.h>
-#include <linux/seq_file.h>
-
-#include <asm/io.h>
-
-#include "dev.h"
-
-#ifdef CONFIG_DEBUG_FS
-
-enum {
- NVHOST_DBG_STATE_CMD = 0,
- NVHOST_DBG_STATE_DATA = 1,
-};
-
-static int nvhost_debug_handle_cmd(struct seq_file *s, u32 val, int *count)
-{
- unsigned mask;
- unsigned subop;
-
- switch (val >> 28) {
- case 0x0:
- mask = val & 0x3f;
- if (mask) {
- seq_printf(s, "SETCL(class=%03x, offset=%03x, mask=%02x, [",
- val >> 6 & 0x3ff, val >> 16 & 0xfff, mask);
- *count = hweight8(mask);
- return NVHOST_DBG_STATE_DATA;
- } else {
- seq_printf(s, "SETCL(class=%03x)\n", val >> 6 & 0x3ff);
- return NVHOST_DBG_STATE_CMD;
- }
-
- case 0x1:
- seq_printf(s, "INCR(offset=%03x, [", val >> 16 & 0x3ff);
- *count = val & 0xffff;
- return NVHOST_DBG_STATE_DATA;
-
- case 0x2:
- seq_printf(s, "NOMINCR(offset=%03x, [", val >> 16 & 0x3ff);
- *count = val & 0xffff;
- return NVHOST_DBG_STATE_DATA;
-
- case 0x3:
- mask = val & 0xffff;
- seq_printf(s, "MASK(offset=%03x, mask=%03x, [",
- val >> 16 & 0x3ff, mask);
- *count = hweight16(mask);
- return NVHOST_DBG_STATE_DATA;
-
- case 0x4:
- seq_printf(s, "IMM(offset=%03x, data=%03x)\n",
- val >> 16 & 0x3ff, val & 0xffff);
- return NVHOST_DBG_STATE_CMD;
-
- case 0x5:
- seq_printf(s, "RESTART(offset=%08x)\n", val << 4);
- return NVHOST_DBG_STATE_CMD;
-
- case 0x6:
- seq_printf(s, "GATHER(offset=%03x, insert=%d, type=%d, count=%04x, addr=[",
- val >> 16 & 0x3ff, val >> 15 & 0x1, val >> 15 & 0x1,
- val & 0x3fff);
- *count = 1;
- return NVHOST_DBG_STATE_DATA;
-
- case 0xe:
- subop = val >> 24 & 0xf;
- if (subop == 0)
- seq_printf(s, "ACQUIRE_MLOCK(index=%d)\n", val & 0xff);
- else if (subop == 1)
- seq_printf(s, "RELEASE_MLOCK(index=%d)\n", val & 0xff);
- else
- seq_printf(s, "EXTEND_UNKNOWN(%08x)\n", val);
-
- return NVHOST_DBG_STATE_CMD;
-
- case 0xf:
- seq_printf(s, "DONE()\n");
- return NVHOST_DBG_STATE_CMD;
-
- default:
- return NVHOST_DBG_STATE_CMD;
- }
-}
-
-static int nvhost_debug_show(struct seq_file *s, void *unused)
-{
- struct nvhost_master *m = s->private;
- int i;
-
- nvhost_module_busy(&m->mod);
-
- for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
- void __iomem *regs = m->channels[i].aperture;
- u32 dmaput, dmaget, dmactrl;
- u32 cbstat, cbread;
- u32 fifostat;
- u32 val, base;
- unsigned start, end;
- unsigned wr_ptr, rd_ptr;
- int state;
- int count = 0;
-
- dmaput = readl(regs + HOST1X_CHANNEL_DMAPUT);
- dmaget = readl(regs + HOST1X_CHANNEL_DMAGET);
- dmactrl = readl(regs + HOST1X_CHANNEL_DMACTRL);
- cbread = readl(m->aperture + HOST1X_SYNC_CBREAD(i));
- cbstat = readl(m->aperture + HOST1X_SYNC_CBSTAT(i));
-
- if (dmactrl != 0x0 || !m->channels[i].cdma.push_buffer.mapped) {
- seq_printf(s, "%d: inactive\n\n", i);
- continue;
- }
-
- switch (cbstat) {
- case 0x00010008:
- seq_printf(s, "%d: waiting on syncpt %d val %d\n",
- i, cbread >> 24, cbread & 0xffffff);
- break;
-
- case 0x00010009:
- base = cbread >> 15 & 0xf;
-
- val = readl(m->aperture + HOST1X_SYNC_SYNCPT_BASE(base)) & 0xffff;
- val += cbread & 0xffff;
-
- seq_printf(s, "%d: waiting on syncpt %d val %d\n",
- i, cbread >> 24, val);
- break;
-
- default:
- seq_printf(s, "%d: active class %02x, offset %04x, val %08x\n",
- i, cbstat >> 16, cbstat & 0xffff, cbread);
- break;
- }
-
- fifostat = readl(regs + HOST1X_CHANNEL_FIFOSTAT);
- if ((fifostat & 1 << 10) == 0 ) {
-
- writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
- writel(1 << 31 | i << 16, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
- rd_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) & 0x1ff;
- wr_ptr = readl(m->aperture + HOST1X_SYNC_CFPEEK_PTRS) >> 16 & 0x1ff;
-
- start = readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) & 0x1ff;
- end = (readl(m->aperture + HOST1X_SYNC_CF_SETUP(i)) >> 16) & 0x1ff;
-
- state = NVHOST_DBG_STATE_CMD;
-
- do {
- writel(0x0, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
- writel(1 << 31 | i << 16 | rd_ptr, m->aperture + HOST1X_SYNC_CFPEEK_CTRL);
- val = readl(m->aperture + HOST1X_SYNC_CFPEEK_READ);
-
- switch (state) {
- case NVHOST_DBG_STATE_CMD:
- seq_printf(s, "%d: %08x:", i, val);
-
- state = nvhost_debug_handle_cmd(s, val, &count);
- if (state == NVHOST_DBG_STATE_DATA && count == 0) {
- state = NVHOST_DBG_STATE_CMD;
- seq_printf(s, "])\n");
- }
- break;
-
- case NVHOST_DBG_STATE_DATA:
- count--;
- seq_printf(s, "%08x%s", val, count > 0 ? ", " : "])\n");
- if (count == 0)
- state = NVHOST_DBG_STATE_CMD;
- break;
- }
-
- if (rd_ptr == end)
- rd_ptr = start;
- else
- rd_ptr++;
-
-
- } while (rd_ptr != wr_ptr);
-
- if (state == NVHOST_DBG_STATE_DATA)
- seq_printf(s, ", ...])\n");
- }
- seq_printf(s, "\n");
- }
-
- nvhost_module_idle(&m->mod);
- return 0;
-}
-
-
-static int nvhost_debug_open(struct inode *inode, struct file *file)
-{
- return single_open(file, nvhost_debug_show, inode->i_private);
-}
-
-static const struct file_operations nvhost_debug_fops = {
- .open = nvhost_debug_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-void nvhost_debug_init(struct nvhost_master *master)
-{
- debugfs_create_file("tegra_host", S_IRUGO, NULL, master, &nvhost_debug_fops);
-}
-#else
-void nvhost_debug_add(struct nvhost_master *master)
-{
-}
-
-#endif
-
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#include "dev.h"
+
#include <linux/nvhost.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <asm/io.h>
-#include "dev.h"
-
#define DRIVER_NAME "tegra_grhost"
#define IFACE_NAME "nvhost"
-static int nvhost_major = NVHOST_MAJOR;
-static int nvhost_minor = NVHOST_CHANNEL_BASE;
-
-struct nvhost_channel_userctx {
- struct nvhost_channel *ch;
- struct nvhost_hwctx *hwctx;
- struct file *nvmapctx;
- u32 syncpt_id;
- u32 syncpt_incrs;
- u32 cmdbufs_pending;
- u32 relocs_pending;
- struct nvmap_handle *gather_mem;
- struct nvhost_op_pair *gathers;
- int num_gathers;
- int pinarray_size;
- struct nvmap_pinarray_elem pinarray[NVHOST_MAX_HANDLES];
- struct nvmap_handle *unpinarray[NVHOST_MAX_HANDLES];
-};
-
-struct nvhost_ctrl_userctx {
- struct nvhost_master *dev;
- u32 mod_locks[NV_HOST1X_NB_MLOCKS];
-};
-
-static int nvhost_channelrelease(struct inode *inode, struct file *filp)
-{
- struct nvhost_channel_userctx *priv = filp->private_data;
- filp->private_data = NULL;
-
- nvhost_putchannel(priv->ch, priv->hwctx);
- if (priv->hwctx)
- priv->ch->ctxhandler.put(priv->hwctx);
- if (priv->gather_mem)
- nvmap_free(priv->gather_mem, priv->gathers);
- if (priv->nvmapctx)
- fput(priv->nvmapctx);
- kfree(priv);
- return 0;
-}
-
-static int nvhost_channelopen(struct inode *inode, struct file *filp)
-{
- struct nvhost_channel_userctx *priv;
- struct nvhost_channel *ch;
-
- ch = container_of(inode->i_cdev, struct nvhost_channel, cdev);
- ch = nvhost_getchannel(ch);
- if (!ch)
- return -ENOMEM;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv) {
- nvhost_putchannel(ch, NULL);
- return -ENOMEM;
- }
- filp->private_data = priv;
- priv->ch = ch;
- priv->gather_mem = nvmap_alloc(
- sizeof(struct nvhost_op_pair) * NVHOST_MAX_GATHERS, 32,
- NVMEM_HANDLE_CACHEABLE, (void**)&priv->gathers);
- if (IS_ERR_OR_NULL(priv->gather_mem))
- goto fail;
- if (ch->ctxhandler.alloc) {
- priv->hwctx = ch->ctxhandler.alloc(ch);
- if (!priv->hwctx)
- goto fail;
- }
-
- return 0;
-fail:
- nvhost_channelrelease(inode, filp);
- return -ENOMEM;
-}
-
-static void add_gather(struct nvhost_channel_userctx *ctx, int idx,
- struct nvmap_handle *mem, u32 words, u32 offset)
-{
- struct nvmap_pinarray_elem *pin;
- pin = &ctx->pinarray[ctx->pinarray_size++];
- pin->patch_mem = ctx->gather_mem;
- pin->patch_offset = (idx * sizeof(struct nvhost_op_pair)) +
- offsetof(struct nvhost_op_pair, op2);
- pin->pin_mem = mem;
- pin->pin_offset = offset;
- ctx->gathers[idx].op1 = nvhost_opcode_gather(0, words);
-}
-
-static void reset_submit(struct nvhost_channel_userctx *ctx)
-{
- ctx->cmdbufs_pending = 0;
- ctx->relocs_pending = 0;
-}
-
-static ssize_t nvhost_channelwrite(struct file *filp, const char __user *buf,
- size_t count, loff_t *offp)
-{
- struct nvhost_channel_userctx *priv = filp->private_data;
- size_t remaining = count;
- int err = 0;
-
- while (remaining) {
- size_t consumed;
- if (!priv->relocs_pending && !priv->cmdbufs_pending) {
- consumed = sizeof(struct nvhost_submit_hdr);
- if (remaining < consumed)
- break;
- if (copy_from_user(&priv->syncpt_id, buf, consumed)) {
- err = -EFAULT;
- break;
- }
- if (!priv->cmdbufs_pending) {
- err = -EFAULT;
- break;
- }
- /* leave room for ctx switch */
- priv->num_gathers = 2;
- priv->pinarray_size = 0;
- } else if (priv->cmdbufs_pending) {
- struct nvhost_cmdbuf cmdbuf;
- consumed = sizeof(cmdbuf);
- if (remaining < consumed)
- break;
- if (copy_from_user(&cmdbuf, buf, consumed)) {
- err = -EFAULT;
- break;
- }
- add_gather(priv, priv->num_gathers++,
- (struct nvmap_handle *)cmdbuf.mem,
- cmdbuf.words, cmdbuf.offset);
- priv->cmdbufs_pending--;
- } else if (priv->relocs_pending) {
- int numrelocs = remaining / sizeof(struct nvhost_reloc);
- if (!numrelocs)
- break;
- numrelocs = min_t(int, numrelocs, priv->relocs_pending);
- consumed = numrelocs * sizeof(struct nvhost_reloc);
- if (copy_from_user(&priv->pinarray[priv->pinarray_size],
- buf, consumed)) {
- err = -EFAULT;
- break;
- }
- priv->pinarray_size += numrelocs;
- priv->relocs_pending -= numrelocs;
- } else {
- err = -EFAULT;
- break;
- }
- remaining -= consumed;
- buf += consumed;
- }
-
- if (err < 0) {
- dev_err(&priv->ch->dev->pdev->dev, "channel write error\n");
- reset_submit(priv);
- return err;
- }
-
- return (count - remaining);
-}
-
-static int nvhost_ioctl_channel_flush(
- struct nvhost_channel_userctx *ctx,
- struct nvhost_get_param_args *args)
-{
- struct nvhost_cpuinterrupt ctxsw;
- int gather_idx = 2;
- int num_intrs = 0;
- u32 syncval;
- int num_unpin;
- int err;
-
- if (ctx->relocs_pending || ctx->cmdbufs_pending) {
- reset_submit(ctx);
- dev_err(&ctx->ch->dev->pdev->dev, "channel submit out of sync\n");
- return -EFAULT;
- }
- if (!ctx->nvmapctx) {
- dev_err(&ctx->ch->dev->pdev->dev, "no nvmap context set\n");
- return -EFAULT;
- }
- if (ctx->num_gathers <= 2)
- return 0;
-
- /* keep module powered */
- nvhost_module_busy(&ctx->ch->mod);
-
- /* pin mem handles and patch physical addresses */
- err = nvmap_pin_array(ctx->nvmapctx, ctx->pinarray, ctx->pinarray_size,
- ctx->unpinarray, &num_unpin, true);
- if (err) {
- dev_warn(&ctx->ch->dev->pdev->dev, "nvmap_pin_array failed: %d\n", err);
- nvhost_module_idle(&ctx->ch->mod);
- return err;
- }
-
- /* get submit lock */
- err = mutex_lock_interruptible(&ctx->ch->submitlock);
- if (err) {
- nvmap_unpin(ctx->unpinarray, num_unpin);
- nvhost_module_idle(&ctx->ch->mod);
- return err;
- }
-
- /* context switch */
- if (ctx->ch->cur_ctx != ctx->hwctx) {
- struct nvhost_hwctx *hw = ctx->hwctx;
- if (hw && hw->valid) {
- gather_idx--;
- ctx->gathers[gather_idx].op1 =
- nvhost_opcode_gather(0, hw->restore_size);
- ctx->gathers[gather_idx].op2 = hw->restore_phys;
- ctx->syncpt_incrs += hw->restore_incrs;
- }
- hw = ctx->ch->cur_ctx;
- if (hw) {
- gather_idx--;
- ctx->gathers[gather_idx].op1 =
- nvhost_opcode_gather(0, hw->save_size);
- ctx->gathers[gather_idx].op2 = hw->save_phys;
- ctx->syncpt_incrs += hw->save_incrs;
- num_intrs = 1;
- ctxsw.syncpt_val = hw->save_incrs - 1;
- ctxsw.intr_data = hw;
- hw->valid = true;
- ctx->ch->ctxhandler.get(hw);
- }
- ctx->ch->cur_ctx = ctx->hwctx;
- }
-
- /* add a setclass for modules that require it */
- if (gather_idx == 2 && ctx->ch->desc->class) {
- gather_idx--;
- ctx->gathers[gather_idx].op1 =
- nvhost_opcode_setclass(ctx->ch->desc->class, 0, 0);
- ctx->gathers[gather_idx].op2 = NVHOST_OPCODE_NOOP;
- }
-
- /* get absolute sync value */
- if (BIT(ctx->syncpt_id) & NVSYNCPTS_CLIENT_MANAGED)
- syncval = nvhost_syncpt_set_max(&ctx->ch->dev->syncpt,
- ctx->syncpt_id, ctx->syncpt_incrs);
- else
- syncval = nvhost_syncpt_incr_max(&ctx->ch->dev->syncpt,
- ctx->syncpt_id, ctx->syncpt_incrs);
-
- /* patch absolute syncpt value into interrupt triggers */
- ctxsw.syncpt_val += syncval - ctx->syncpt_incrs;
-
- nvhost_channel_submit(ctx->ch, &ctx->gathers[gather_idx],
- ctx->num_gathers - gather_idx, &ctxsw, num_intrs,
- ctx->unpinarray, num_unpin, ctx->syncpt_id, syncval);
-
- /* schedule a submit complete interrupt */
- nvhost_intr_add_action(&ctx->ch->dev->intr, ctx->syncpt_id, syncval,
- NVHOST_INTR_ACTION_SUBMIT_COMPLETE, ctx->ch, NULL);
-
- mutex_unlock(&ctx->ch->submitlock);
- args->value = syncval;
- return 0;
-}
-
-static long nvhost_channelctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct nvhost_channel_userctx *priv = filp->private_data;
- u8 buf[NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE];
- int err = 0;
-
- if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
- (_IOC_NR(cmd) == 0) ||
- (_IOC_NR(cmd) > NVHOST_IOCTL_CHANNEL_LAST))
- return -EFAULT;
-
- BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE);
-
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
- }
-
- switch (cmd) {
- case NVHOST_IOCTL_CHANNEL_FLUSH:
- err = nvhost_ioctl_channel_flush(priv, (void *)buf);
- break;
- case NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS:
- ((struct nvhost_get_param_args *)buf)->value =
- priv->ch->desc->syncpts;
- break;
- case NVHOST_IOCTL_CHANNEL_GET_WAITBASES:
- ((struct nvhost_get_param_args *)buf)->value =
- priv->ch->desc->waitbases;
- break;
- case NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES:
- ((struct nvhost_get_param_args *)buf)->value =
- priv->ch->desc->modulemutexes;
- break;
- case NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD:
- {
- int fd = (int)((struct nvhost_set_nvmap_fd_args *)buf)->fd;
- struct file *newctx = NULL;
- if (fd) {
- newctx = fget(fd);
- if (!newctx) {
- err = -EFAULT;
- break;
- }
- err = nvmap_validate_file(newctx);
- if (err) {
- fput(newctx);
- break;
- }
- }
- if (priv->nvmapctx)
- fput(priv->nvmapctx);
- priv->nvmapctx = newctx;
- break;
- }
- default:
- err = -ENOTTY;
- break;
- }
-
- if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
- err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
-
- return err;
-}
-
-static struct file_operations nvhost_channelops = {
- .owner = THIS_MODULE,
- .release = nvhost_channelrelease,
- .open = nvhost_channelopen,
- .write = nvhost_channelwrite,
- .unlocked_ioctl = nvhost_channelctl
-};
-
-static int nvhost_ctrlrelease(struct inode *inode, struct file *filp)
-{
- struct nvhost_ctrl_userctx *priv = filp->private_data;
- int i;
-
- filp->private_data = NULL;
- if (priv->mod_locks[0])
- nvhost_module_idle(&priv->dev->mod);
- for (i = 1; i < NV_HOST1X_NB_MLOCKS; i++)
- if (priv->mod_locks[i])
- nvhost_mutex_unlock(&priv->dev->cpuaccess, i);
- kfree(priv);
- return 0;
-}
-
-static int nvhost_ctrlopen(struct inode *inode, struct file *filp)
-{
- struct nvhost_master *host = container_of(inode->i_cdev, struct nvhost_master, cdev);
- struct nvhost_ctrl_userctx *priv;
-
- priv = kzalloc(sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- priv->dev = host;
- filp->private_data = priv;
- return 0;
-}
-
-static int nvhost_ioctl_ctrl_syncpt_read(
- struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_syncpt_read_args *args)
-{
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
- return -EINVAL;
- args->value = nvhost_syncpt_read(&ctx->dev->syncpt, args->id);
- return 0;
-}
-
-static int nvhost_ioctl_ctrl_syncpt_incr(
- struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_syncpt_incr_args *args)
-{
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
- return -EINVAL;
- nvhost_syncpt_incr(&ctx->dev->syncpt, args->id);
- return 0;
-}
-
-static int nvhost_ioctl_ctrl_syncpt_wait(
- struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_syncpt_wait_args *args)
-{
- u32 timeout;
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS)
- return -EINVAL;
- if (args->timeout == NVHOST_NO_TIMEOUT)
- timeout = MAX_SCHEDULE_TIMEOUT;
- else
- timeout = (u32)msecs_to_jiffies(args->timeout);
-
- return nvhost_syncpt_wait_timeout(&ctx->dev->syncpt, args->id,
- args->thresh, timeout);
-}
-
-static int nvhost_ioctl_ctrl_module_mutex(
- struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_module_mutex_args *args)
-{
- int err = 0;
- if (args->id >= NV_HOST1X_SYNCPT_NB_PTS ||
- args->lock > 1)
- return -EINVAL;
-
- if (args->lock && !ctx->mod_locks[args->id]) {
- if (args->id == 0)
- nvhost_module_busy(&ctx->dev->mod);
- else
- err = nvhost_mutex_try_lock(&ctx->dev->cpuaccess, args->id);
- if (!err)
- ctx->mod_locks[args->id] = 1;
- }
- else if (!args->lock && ctx->mod_locks[args->id]) {
- if (args->id == 0)
- nvhost_module_idle(&ctx->dev->mod);
- else
- nvhost_mutex_unlock(&ctx->dev->cpuaccess, args->id);
- ctx->mod_locks[args->id] = 0;
- }
- return err;
-}
-
-static int nvhost_ioctl_ctrl_module_regrdwr(
- struct nvhost_ctrl_userctx *ctx,
- struct nvhost_ctrl_module_regrdwr_args *args)
-{
- u32 num_offsets = args->num_offsets;
- u32 *offsets = args->offsets;
- void *values = args->values;
- u32 vals[64];
-
- if (!nvhost_access_module_regs(&ctx->dev->cpuaccess, args->id) ||
- (num_offsets == 0))
- return -EINVAL;
-
- while (num_offsets--) {
- u32 remaining = args->block_size;
- u32 offs;
- if (get_user(offs, offsets))
- return -EFAULT;
- offsets++;
- while (remaining) {
- u32 batch = min(remaining, 64*sizeof(u32));
- if (args->write) {
- if (copy_from_user(vals, values, batch))
- return -EFAULT;
- nvhost_write_module_regs(&ctx->dev->cpuaccess,
- args->id, offs, batch, vals);
- } else {
- nvhost_read_module_regs(&ctx->dev->cpuaccess,
- args->id, offs, batch, vals);
- if (copy_to_user(values, vals, batch))
- return -EFAULT;
- }
- remaining -= batch;
- offs += batch;
- values += batch;
- }
- }
-
- return 0;
-}
-
-static long nvhost_ctrlctl(struct file *filp,
- unsigned int cmd, unsigned long arg)
-{
- struct nvhost_ctrl_userctx *priv = filp->private_data;
- u8 buf[NVHOST_IOCTL_CTRL_MAX_ARG_SIZE];
- int err = 0;
-
- if ((_IOC_TYPE(cmd) != NVHOST_IOCTL_MAGIC) ||
- (_IOC_NR(cmd) == 0) ||
- (_IOC_NR(cmd) > NVHOST_IOCTL_CTRL_LAST))
- return -EFAULT;
-
- BUG_ON(_IOC_SIZE(cmd) > NVHOST_IOCTL_CTRL_MAX_ARG_SIZE);
-
- if (_IOC_DIR(cmd) & _IOC_WRITE) {
- if (copy_from_user(buf, (void __user *)arg, _IOC_SIZE(cmd)))
- return -EFAULT;
- }
-
- switch (cmd) {
- case NVHOST_IOCTL_CTRL_SYNCPT_READ:
- err = nvhost_ioctl_ctrl_syncpt_read(priv, (void *)buf);
- break;
- case NVHOST_IOCTL_CTRL_SYNCPT_INCR:
- err = nvhost_ioctl_ctrl_syncpt_incr(priv, (void *)buf);
- break;
- case NVHOST_IOCTL_CTRL_SYNCPT_WAIT:
- err = nvhost_ioctl_ctrl_syncpt_wait(priv, (void *)buf);
- break;
- case NVHOST_IOCTL_CTRL_MODULE_MUTEX:
- err = nvhost_ioctl_ctrl_module_mutex(priv, (void *)buf);
- break;
- case NVHOST_IOCTL_CTRL_MODULE_REGRDWR:
- err = nvhost_ioctl_ctrl_module_regrdwr(priv, (void *)buf);
- break;
- default:
- err = -ENOTTY;
- break;
- }
-
- if ((err == 0) && (_IOC_DIR(cmd) & _IOC_READ))
- err = copy_to_user((void __user *)arg, buf, _IOC_SIZE(cmd));
-
- return err;
-}
-
-static struct file_operations nvhost_ctrlops = {
- .owner = THIS_MODULE,
- .release = nvhost_ctrlrelease,
- .open = nvhost_ctrlopen,
- .unlocked_ioctl = nvhost_ctrlctl
-};
-
-static void power_host(struct nvhost_module *mod, enum nvhost_power_action action)
-{
- struct nvhost_master *dev = container_of(mod, struct nvhost_master, mod);
-
- if (action == NVHOST_POWER_ACTION_ON) {
- nvhost_intr_configure(&dev->intr, clk_get_rate(mod->clk[0]));
- nvhost_syncpt_reset(&dev->syncpt);
- }
- else if (action == NVHOST_POWER_ACTION_OFF) {
- int i;
- for (i = 0; i < NVHOST_NUMCHANNELS; i++)
- nvhost_channel_suspend(&dev->channels[i]);
- nvhost_syncpt_save(&dev->syncpt);
- }
-}
-
-static int __init nvhost_user_init(struct nvhost_master *host)
-{
- int i, err, devno;
-
- host->nvhost_class = class_create(THIS_MODULE, IFACE_NAME);
- if (IS_ERR(host->nvhost_class)) {
- err = PTR_ERR(host->nvhost_class);
- dev_err(&host->pdev->dev, "failed to create class\n");
- goto fail;
- }
-
- if (nvhost_major) {
- devno = MKDEV(nvhost_major, nvhost_minor);
- err = register_chrdev_region(devno, NVHOST_NUMCHANNELS + 1, IFACE_NAME);
- } else {
- err = alloc_chrdev_region(&devno, nvhost_minor,
- NVHOST_NUMCHANNELS + 1, IFACE_NAME);
- nvhost_major = MAJOR(devno);
- }
- if (err < 0) {
- dev_err(&host->pdev->dev, "failed to reserve chrdev region\n");
- goto fail;
- }
-
- for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
- struct nvhost_channel *ch = &host->channels[i];
-
- cdev_init(&ch->cdev, &nvhost_channelops);
- ch->cdev.owner = THIS_MODULE;
-
- devno = MKDEV(nvhost_major, nvhost_minor + i);
- err = cdev_add(&ch->cdev, devno, 1);
- if (err < 0) {
- dev_err(&host->pdev->dev, "failed to add chan %i cdev\n", i);
- goto fail;
- }
- ch->node = device_create(host->nvhost_class, NULL, devno, NULL,
- IFACE_NAME "-%s", ch->desc->name);
- if (IS_ERR(ch->node)) {
- err = PTR_ERR(ch->node);
- dev_err(&host->pdev->dev, "failed to create chan %i device\n", i);
- goto fail;
- }
- }
-
- cdev_init(&host->cdev, &nvhost_ctrlops);
- host->cdev.owner = THIS_MODULE;
- devno = MKDEV(nvhost_major, nvhost_minor + NVHOST_NUMCHANNELS);
- err = cdev_add(&host->cdev, devno, 1);
- if (err < 0)
- goto fail;
- host->ctrl = device_create(host->nvhost_class, NULL, devno, NULL,
- IFACE_NAME "-ctrl");
- if (IS_ERR(host->ctrl)) {
- err = PTR_ERR(host->ctrl);
- dev_err(&host->pdev->dev, "failed to create ctrl device\n");
- goto fail;
- }
-
- return 0;
-fail:
- return err;
-}
-
static int __devinit nvhost_probe(struct platform_device *pdev)
{
struct nvhost_master *host;
- struct resource *regs, *intr0, *intr1;
- int i, err;
-
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- intr0 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- intr1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
-
- if (!regs || !intr0 || !intr1) {
- dev_err(&pdev->dev, "missing required platform resources\n");
- return -ENXIO;
- }
host = kzalloc(sizeof(*host), GFP_KERNEL);
if (!host)
host->pdev = pdev;
- host->reg_mem = request_mem_region(regs->start,
- resource_size(regs), pdev->name);
- if (!host->reg_mem) {
- dev_err(&pdev->dev, "failed to get host register memory\n");
- err = -ENXIO;
- goto fail;
- }
- host->aperture = ioremap(regs->start, resource_size(regs));
- if (!host->aperture) {
- dev_err(&pdev->dev, "failed to remap host registers\n");
- err = -ENXIO;
- goto fail;
- }
- host->sync_aperture = host->aperture +
- (NV_HOST1X_CHANNEL0_BASE +
- HOST1X_CHANNEL_SYNC_REG_BASE);
-
- for (i = 0; i < NVHOST_NUMCHANNELS; i++) {
- struct nvhost_channel *ch = &host->channels[i];
- err = nvhost_channel_init(ch, host, i);
- if (err < 0) {
- dev_err(&pdev->dev, "failed to init channel %d\n", i);
- goto fail;
- }
- }
-
- err = nvhost_cpuaccess_init(&host->cpuaccess, pdev);
- if (err) goto fail;
- err = nvhost_intr_init(&host->intr, intr1->start, intr0->start);
- if (err) goto fail;
- err = nvhost_user_init(host);
- if (err) goto fail;
- err = nvhost_module_init(&host->mod, "host1x", power_host, NULL, &pdev->dev);
- if (err) goto fail;
-
platform_set_drvdata(pdev, host);
nvhost_bus_register(host);
- nvhost_debug_init(host);
-
dev_info(&pdev->dev, "initialized\n");
return 0;
-
-fail:
- /* TODO: [ahatala 2010-05-04] */
- kfree(host);
- return err;
}
static int __exit nvhost_remove(struct platform_device *pdev)
return 0;
}
-static int nvhost_suspend(struct platform_device *pdev, pm_message_t state)
-{
- struct nvhost_master *host = platform_get_drvdata(pdev);
- dev_info(&pdev->dev, "suspending\n");
- nvhost_module_suspend(&host->mod);
- dev_info(&pdev->dev, "suspended\n");
- return 0;
-}
-
static struct platform_driver nvhost_driver = {
+ .probe = nvhost_probe,
.remove = __exit_p(nvhost_remove),
- .suspend = nvhost_suspend,
.driver = {
.owner = THIS_MODULE,
.name = DRIVER_NAME
static int __init nvhost_mod_init(void)
{
- return platform_driver_probe(&nvhost_driver, nvhost_probe);
+ return platform_driver_register(&nvhost_driver);
}
static void __exit nvhost_mod_exit(void)
#ifndef __NVHOST_DEV_H
#define __NVHOST_DEV_H
-#include "nvhost_acm.h"
-#include "nvhost_syncpt.h"
-#include "nvhost_intr.h"
-#include "nvhost_cpuaccess.h"
-#include "nvhost_channel.h"
-#include "nvhost_hardware.h"
-
-#define NVHOST_MAJOR 0 /* dynamic */
struct nvhost_master {
- void __iomem *aperture;
- void __iomem *sync_aperture;
- struct resource *reg_mem;
struct platform_device *pdev;
- struct class *nvhost_class;
- struct cdev cdev;
- struct device *ctrl;
- struct nvhost_syncpt syncpt;
- struct nvhost_cpuaccess cpuaccess;
- struct nvhost_intr intr;
- struct nvhost_module mod;
- struct nvhost_channel channels[NVHOST_NUMCHANNELS];
};
-void nvhost_debug_init(struct nvhost_master *master);
-
#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_3dctx.c
- *
- * Tegra Graphics Host 3d hardware context
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_hwctx.h"
-#include "dev.h"
-
-#include <linux/slab.h>
-
-const struct hwctx_reginfo ctxsave_regs_3d[] = {
- HWCTX_REGINFO(0xe00, 16, DIRECT),
- HWCTX_REGINFO(0xe10, 16, DIRECT),
- HWCTX_REGINFO(0xe20, 1, DIRECT),
- HWCTX_REGINFO(0xe21, 1, DIRECT),
- HWCTX_REGINFO(0xe22, 1, DIRECT),
- HWCTX_REGINFO(0xe25, 1, DIRECT),
- HWCTX_REGINFO(0xe26, 1, DIRECT),
- HWCTX_REGINFO(0xe28, 2, DIRECT),
- HWCTX_REGINFO(0xe2a, 1, DIRECT),
- HWCTX_REGINFO(0x1, 1, DIRECT),
- HWCTX_REGINFO(0x2, 1, DIRECT),
- HWCTX_REGINFO(0xc, 2, DIRECT),
- HWCTX_REGINFO(0xe, 2, DIRECT),
- HWCTX_REGINFO(0x10, 2, DIRECT),
- HWCTX_REGINFO(0x12, 2, DIRECT),
- HWCTX_REGINFO(0x14, 2, DIRECT),
- HWCTX_REGINFO(0x100, 32, DIRECT),
- HWCTX_REGINFO(0x120, 1, DIRECT),
- HWCTX_REGINFO(0x121, 1, DIRECT),
- HWCTX_REGINFO(0x124, 1, DIRECT),
- HWCTX_REGINFO(0x125, 1, DIRECT),
- HWCTX_REGINFO(0x200, 1, DIRECT),
- HWCTX_REGINFO(0x201, 1, DIRECT),
- HWCTX_REGINFO(0x202, 1, DIRECT),
- HWCTX_REGINFO(0x203, 1, DIRECT),
- HWCTX_REGINFO(0x204, 1, DIRECT),
- HWCTX_REGINFO(0x207, 1024, INDIRECT),
- HWCTX_REGINFO(0x209, 1, DIRECT),
- HWCTX_REGINFO(0x300, 64, DIRECT),
- HWCTX_REGINFO(0x343, 1, DIRECT),
- HWCTX_REGINFO(0x344, 1, DIRECT),
- HWCTX_REGINFO(0x345, 1, DIRECT),
- HWCTX_REGINFO(0x346, 1, DIRECT),
- HWCTX_REGINFO(0x347, 1, DIRECT),
- HWCTX_REGINFO(0x348, 1, DIRECT),
- HWCTX_REGINFO(0x349, 1, DIRECT),
- HWCTX_REGINFO(0x34a, 1, DIRECT),
- HWCTX_REGINFO(0x34b, 1, DIRECT),
- HWCTX_REGINFO(0x34c, 1, DIRECT),
- HWCTX_REGINFO(0x34d, 1, DIRECT),
- HWCTX_REGINFO(0x34e, 1, DIRECT),
- HWCTX_REGINFO(0x34f, 1, DIRECT),
- HWCTX_REGINFO(0x350, 1, DIRECT),
- HWCTX_REGINFO(0x351, 1, DIRECT),
- HWCTX_REGINFO(0x352, 1, DIRECT),
- HWCTX_REGINFO(0x353, 1, DIRECT),
- HWCTX_REGINFO(0x354, 1, DIRECT),
- HWCTX_REGINFO(0x355, 1, DIRECT),
- HWCTX_REGINFO(0x356, 1, DIRECT),
- HWCTX_REGINFO(0x357, 1, DIRECT),
- HWCTX_REGINFO(0x358, 1, DIRECT),
- HWCTX_REGINFO(0x359, 1, DIRECT),
- HWCTX_REGINFO(0x35a, 1, DIRECT),
- HWCTX_REGINFO(0x35b, 1, DIRECT),
- HWCTX_REGINFO(0x363, 1, DIRECT),
- HWCTX_REGINFO(0x364, 1, DIRECT),
- HWCTX_REGINFO(0x400, 2, DIRECT),
- HWCTX_REGINFO(0x402, 1, DIRECT),
- HWCTX_REGINFO(0x403, 1, DIRECT),
- HWCTX_REGINFO(0x404, 1, DIRECT),
- HWCTX_REGINFO(0x405, 1, DIRECT),
- HWCTX_REGINFO(0x406, 1, DIRECT),
- HWCTX_REGINFO(0x407, 1, DIRECT),
- HWCTX_REGINFO(0x408, 1, DIRECT),
- HWCTX_REGINFO(0x409, 1, DIRECT),
- HWCTX_REGINFO(0x40a, 1, DIRECT),
- HWCTX_REGINFO(0x40b, 1, DIRECT),
- HWCTX_REGINFO(0x40c, 1, DIRECT),
- HWCTX_REGINFO(0x40d, 1, DIRECT),
- HWCTX_REGINFO(0x40e, 1, DIRECT),
- HWCTX_REGINFO(0x40f, 1, DIRECT),
- HWCTX_REGINFO(0x411, 1, DIRECT),
- HWCTX_REGINFO(0x500, 1, DIRECT),
- HWCTX_REGINFO(0x501, 1, DIRECT),
- HWCTX_REGINFO(0x502, 1, DIRECT),
- HWCTX_REGINFO(0x503, 1, DIRECT),
- HWCTX_REGINFO(0x520, 32, DIRECT),
- HWCTX_REGINFO(0x540, 64, INDIRECT),
- HWCTX_REGINFO(0x600, 0, INDIRECT_OFFSET),
- HWCTX_REGINFO(0x602, 16, INDIRECT_DATA),
- HWCTX_REGINFO(0x603, 128, INDIRECT),
- HWCTX_REGINFO(0x608, 4, DIRECT),
- HWCTX_REGINFO(0x60e, 1, DIRECT),
- HWCTX_REGINFO(0x700, 64, INDIRECT),
- HWCTX_REGINFO(0x710, 16, DIRECT),
- HWCTX_REGINFO(0x720, 32, DIRECT),
- HWCTX_REGINFO(0x740, 1, DIRECT),
- HWCTX_REGINFO(0x741, 1, DIRECT),
- HWCTX_REGINFO(0x800, 0, INDIRECT_OFFSET),
- HWCTX_REGINFO(0x802, 16, INDIRECT_DATA),
- HWCTX_REGINFO(0x803, 512, INDIRECT),
- HWCTX_REGINFO(0x805, 64, INDIRECT),
- HWCTX_REGINFO(0x820, 32, DIRECT),
- HWCTX_REGINFO(0x900, 64, INDIRECT),
- HWCTX_REGINFO(0x902, 1, DIRECT),
- HWCTX_REGINFO(0x903, 1, DIRECT),
- HWCTX_REGINFO(0xa02, 1, DIRECT),
- HWCTX_REGINFO(0xa03, 1, DIRECT),
- HWCTX_REGINFO(0xa04, 1, DIRECT),
- HWCTX_REGINFO(0xa05, 1, DIRECT),
- HWCTX_REGINFO(0xa06, 1, DIRECT),
- HWCTX_REGINFO(0xa07, 1, DIRECT),
- HWCTX_REGINFO(0xa08, 1, DIRECT),
- HWCTX_REGINFO(0xa09, 1, DIRECT),
- HWCTX_REGINFO(0xa0a, 1, DIRECT),
- HWCTX_REGINFO(0xa0b, 1, DIRECT),
- HWCTX_REGINFO(0x205, 1024, INDIRECT)
-};
-
-
-/*** restore ***/
-
-static unsigned int context_restore_size = 0;
-
-static void restore_begin(u32 *ptr, u32 waitbase)
-{
- /* set class to host */
- ptr[0] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
- NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
- /* increment sync point base */
- ptr[1] = nvhost_class_host_incr_syncpt_base(waitbase, 1);
- /* set class to 3D */
- ptr[2] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
- /* program PSEQ_QUAD_ID */
- ptr[3] = nvhost_opcode_imm(0x545, 0);
-}
-#define RESTORE_BEGIN_SIZE 4
-
-static void restore_end(u32 *ptr, u32 syncpt_id)
-{
- /* syncpt increment to track restore gather. */
- ptr[0] = nvhost_opcode_imm(0x0, ((1UL << 8) | (u8)(syncpt_id & 0xff)));
-}
-#define RESTORE_END_SIZE 1
-
-static void restore_direct(u32 *ptr, u32 start_reg, u32 count)
-{
- ptr[0] = nvhost_opcode_incr(start_reg, count);
-}
-#define RESTORE_DIRECT_SIZE 1
-
-static void restore_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
-{
- ptr[0] = nvhost_opcode_imm(offset_reg, offset);
-}
-#define RESTORE_INDOFFSET_SIZE 1
-
-static void restore_inddata(u32 *ptr, u32 data_reg, u32 count)
-{
- ptr[0] = nvhost_opcode_nonincr(data_reg, count);
-}
-#define RESTORE_INDDATA_SIZE 1
-
-static void restore_registers_from_fifo(u32 *ptr, unsigned int count,
- struct nvhost_channel *channel,
- unsigned int *pending)
-{
- void __iomem *chan_regs = channel->aperture;
- unsigned int entries = *pending;
- while (count) {
- unsigned int num;
-
- while (!entries) {
- /* query host for number of entries in fifo */
- entries = nvhost_channel_fifostat_outfentries(
- readl(chan_regs + HOST1X_CHANNEL_FIFOSTAT));
- if (!entries)
- cpu_relax();
- /* TODO: [ahowe 2010-06-14] timeout */
- }
- num = min(entries, count);
- entries -= num;
- count -= num;
-
- while (num & ~0x3) {
- u32 arr[4];
- arr[0] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
- arr[1] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
- arr[2] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
- arr[3] = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
- memcpy(ptr, arr, 4*sizeof(u32));
- ptr += 4;
- num -= 4;
- }
- while (num--)
- *ptr++ = readl(chan_regs + HOST1X_CHANNEL_INDDATA);
- }
- *pending = entries;
-}
-
-static void setup_restore(u32 *ptr, u32 waitbase)
-{
- const struct hwctx_reginfo *r;
- const struct hwctx_reginfo *rend;
-
- restore_begin(ptr, waitbase);
- ptr += RESTORE_BEGIN_SIZE;
-
- r = ctxsave_regs_3d;
- rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
- for ( ; r != rend; ++r) {
- u32 offset = r->offset;
- u32 count = r->count;
- switch (r->type) {
- case HWCTX_REGINFO_DIRECT:
- restore_direct(ptr, offset, count);
- ptr += RESTORE_DIRECT_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT:
- restore_indoffset(ptr, offset, 0);
- ptr += RESTORE_INDOFFSET_SIZE;
- restore_inddata(ptr, offset + 1, count);
- ptr += RESTORE_INDDATA_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT_OFFSET:
- restore_indoffset(ptr, offset, count);
- ptr += RESTORE_INDOFFSET_SIZE;
- continue; /* INDIRECT_DATA follows with real count */
- case HWCTX_REGINFO_INDIRECT_DATA:
- restore_inddata(ptr, offset, count);
- ptr += RESTORE_INDDATA_SIZE;
- break;
- }
- ptr += count;
- }
-
- restore_end(ptr, NVSYNCPT_3D);
- wmb();
-}
-
-/*** save ***/
-
-/* the same context save command sequence is used for all contexts. */
-static struct nvmap_handle *context_save_buf = NULL;
-static u32 context_save_phys = 0;
-static u32 *context_save_ptr = NULL;
-static unsigned int context_save_size = 0;
-
-static void save_begin(u32 *ptr, u32 syncpt_id, u32 waitbase)
-{
- /* set class to the unit to flush */
- ptr[0] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
- /*
- * Flush pipe and signal context read thread to start reading
- * sync point increment
- */
- ptr[1] = nvhost_opcode_imm(0, 0x100 | syncpt_id);
- ptr[2] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID,
- NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
- /* wait for base+1 */
- ptr[3] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 1);
- ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
- ptr[5] = nvhost_opcode_imm(0, syncpt_id);
- ptr[6] = nvhost_opcode_setclass(NV_HOST1X_CLASS_ID, 0, 0);
-}
-#define SAVE_BEGIN_SIZE 7
-
-static void save_direct(u32 *ptr, u32 start_reg, u32 count)
-{
- ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
- ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
- start_reg, true);
- ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
-}
-#define SAVE_DIRECT_SIZE 3
-
-static void save_indoffset(u32 *ptr, u32 offset_reg, u32 offset)
-{
- ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
- ptr[1] = nvhost_class_host_indoff_reg_write(NV_HOST_MODULE_GR3D,
- offset_reg, true);
- ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, 1);
- ptr[3] = offset;
-}
-#define SAVE_INDOFFSET_SIZE 4
-
-static inline void save_inddata(u32 *ptr, u32 data_reg, u32 count)
-{
- ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDOFF, 1);
- ptr[1] = nvhost_class_host_indoff_reg_read(NV_HOST_MODULE_GR3D,
- data_reg, false);
- ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INDDATA, count);
-}
-#define SAVE_INDDDATA_SIZE 3
-
-static void save_end(u32 *ptr, u32 syncpt_id, u32 waitbase)
-{
- /* Wait for context read service */
- ptr[0] = nvhost_opcode_nonincr(NV_CLASS_HOST_WAIT_SYNCPT_BASE, 1);
- ptr[1] = nvhost_class_host_wait_syncpt_base(syncpt_id, waitbase, 3);
- /* Increment syncpoint base */
- ptr[2] = nvhost_opcode_nonincr(NV_CLASS_HOST_INCR_SYNCPT_BASE, 1);
- ptr[3] = nvhost_class_host_incr_syncpt_base(waitbase, 3);
- /* set class back to the unit */
- ptr[4] = nvhost_opcode_setclass(NV_GRAPHICS_3D_CLASS_ID, 0, 0);
-}
-#define SAVE_END_SIZE 5
-
-static void __init setup_save(
- u32 *ptr, unsigned int *words_save, unsigned int *words_restore,
- u32 syncpt_id, u32 waitbase)
-{
- const struct hwctx_reginfo *r;
- const struct hwctx_reginfo *rend;
- unsigned int save = SAVE_BEGIN_SIZE + SAVE_END_SIZE;
- unsigned int restore = RESTORE_BEGIN_SIZE + RESTORE_END_SIZE;
-
- if (ptr) {
- save_begin(ptr, syncpt_id, waitbase);
- ptr += SAVE_BEGIN_SIZE;
- }
-
- r = ctxsave_regs_3d;
- rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
- for ( ; r != rend; ++r) {
- u32 offset = r->offset;
- u32 count = r->count;
- switch (r->type) {
- case HWCTX_REGINFO_DIRECT:
- if (ptr) {
- save_direct(ptr, offset, count);
- ptr += SAVE_DIRECT_SIZE;
- }
- save += SAVE_DIRECT_SIZE;
- restore += RESTORE_DIRECT_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT:
- if (ptr) {
- save_indoffset(ptr, offset, 0);
- ptr += SAVE_INDOFFSET_SIZE;
- save_inddata(ptr, offset + 1, count);
- ptr += SAVE_INDDDATA_SIZE;
- }
- save += SAVE_INDOFFSET_SIZE;
- restore += RESTORE_INDOFFSET_SIZE;
- save += SAVE_INDDDATA_SIZE;
- restore += RESTORE_INDDATA_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT_OFFSET:
- if (ptr) {
- save_indoffset(ptr, offset, count);
- ptr += SAVE_INDOFFSET_SIZE;
- }
- save += SAVE_INDOFFSET_SIZE;
- restore += RESTORE_INDOFFSET_SIZE;
- continue; /* INDIRECT_DATA follows with real count */
- case HWCTX_REGINFO_INDIRECT_DATA:
- if (ptr) {
- save_inddata(ptr, offset, count);
- ptr += SAVE_INDDDATA_SIZE;
- }
- save += SAVE_INDDDATA_SIZE;
- restore += RESTORE_INDDATA_SIZE;
- break;
- }
- if (ptr) {
- memset(ptr, 0, count * 4);
- ptr += count;
- }
- save += count;
- restore += count;
- }
-
- if (ptr)
- save_end(ptr, syncpt_id, waitbase);
-
- if (words_save)
- *words_save = save;
- if (words_restore)
- *words_restore = restore;
- wmb();
-}
-
-/*** ctx3d ***/
-
-static struct nvhost_hwctx *ctx3d_alloc(struct nvhost_channel *ch)
-{
- struct nvhost_hwctx *ctx;
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return NULL;
- ctx->restore = nvmap_alloc(context_restore_size * 4, 32,
- NVMEM_HANDLE_WRITE_COMBINE,
- (void**)&ctx->save_cpu_data);
- if (IS_ERR_OR_NULL(ctx->restore)) {
- kfree(ctx);
- return NULL;
- }
- setup_restore(ctx->save_cpu_data, NVWAITBASE_3D);
- ctx->channel = ch;
- ctx->restore_phys = nvmap_pin_single(ctx->restore);
- ctx->restore_size = context_restore_size;
- ctx->save = context_save_buf;
- ctx->save_phys = context_save_phys;
- ctx->save_size = context_save_size;
- ctx->save_incrs = 3;
- ctx->restore_incrs = 1;
- ctx->valid = false;
- kref_init(&ctx->ref);
- return ctx;
-}
-
-static void ctx3d_free(struct kref *ref)
-{
- struct nvhost_hwctx *ctx = container_of(ref, struct nvhost_hwctx, ref);
- nvmap_free(ctx->restore, ctx->save_cpu_data);
- kfree(ctx);
-}
-
-static void ctx3d_get(struct nvhost_hwctx *ctx)
-{
- kref_get(&ctx->ref);
-}
-
-static void ctx3d_put(struct nvhost_hwctx *ctx)
-{
- kref_put(&ctx->ref, ctx3d_free);
-}
-
-static void ctx3d_save_service(struct nvhost_hwctx *ctx)
-{
- const struct hwctx_reginfo *r;
- const struct hwctx_reginfo *rend;
- unsigned int pending = 0;
- u32 *ptr = (u32 *)ctx->save_cpu_data + RESTORE_BEGIN_SIZE;
-
- BUG_ON(!ctx->save_cpu_data);
-
- r = ctxsave_regs_3d;
- rend = ctxsave_regs_3d + ARRAY_SIZE(ctxsave_regs_3d);
- for ( ; r != rend; ++r) {
- u32 count = r->count;
- switch (r->type) {
- case HWCTX_REGINFO_DIRECT:
- ptr += RESTORE_DIRECT_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT:
- ptr += RESTORE_INDOFFSET_SIZE + RESTORE_INDDATA_SIZE;
- break;
- case HWCTX_REGINFO_INDIRECT_OFFSET:
- ptr += RESTORE_INDOFFSET_SIZE;
- continue; /* INDIRECT_DATA follows with real count */
- case HWCTX_REGINFO_INDIRECT_DATA:
- ptr += RESTORE_INDDATA_SIZE;
- break;
- }
- restore_registers_from_fifo(ptr, count, ctx->channel, &pending);
- ptr += count;
- }
-
- BUG_ON((u32)((ptr + RESTORE_END_SIZE) - (u32*)ctx->save_cpu_data)
- != context_restore_size);
-
- wmb();
- nvhost_syncpt_cpu_incr(&ctx->channel->dev->syncpt, NVSYNCPT_3D);
-}
-
-
-/*** nvhost_3dctx ***/
-
-int __init nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h)
-{
- setup_save(NULL, &context_save_size, &context_restore_size, 0, 0);
-
- context_save_buf = nvmap_alloc(context_save_size * 4, 32,
- NVMEM_HANDLE_WRITE_COMBINE,
- (void**)&context_save_ptr);
- if (IS_ERR_OR_NULL(context_save_buf))
- return PTR_ERR(context_save_buf);
- context_save_phys = nvmap_pin_single(context_save_buf);
- setup_save(context_save_ptr, NULL, NULL, NVSYNCPT_3D, NVWAITBASE_3D);
-
- h->alloc = ctx3d_alloc;
- h->get = ctx3d_get;
- h->put = ctx3d_put;
- h->save_service = ctx3d_save_service;
- return 0;
-}
-
-/* TODO: [ahatala 2010-05-27] */
-int __init nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h)
-{
- return 0;
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_acm.c
- *
- * Tegra Graphics Host Automatic Clock Management
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_acm.h"
-#include <linux/string.h>
-#include <linux/sched.h>
-#include <linux/err.h>
-#include <linux/device.h>
-#include <mach/powergate.h>
-#include <mach/clk.h>
-
-#define ACM_TIMEOUT 1*HZ
-
-void nvhost_module_busy(struct nvhost_module *mod)
-{
- mutex_lock(&mod->lock);
- cancel_delayed_work(&mod->powerdown);
- if ((atomic_inc_return(&mod->refcount) == 1) && !mod->powered) {
- if (mod->parent)
- nvhost_module_busy(mod->parent);
- if (mod->powergate_id != -1) {
- BUG_ON(mod->num_clks != 1);
- tegra_powergate_sequence_power_up(
- mod->powergate_id, mod->clk[0]);
- } else {
- int i;
- for (i = 0; i < mod->num_clks; i++)
- clk_enable(mod->clk[i]);
- }
- if (mod->func)
- mod->func(mod, NVHOST_POWER_ACTION_ON);
- mod->powered = true;
- }
- mutex_unlock(&mod->lock);
-}
-
-static void powerdown_handler(struct work_struct *work)
-{
- struct nvhost_module *mod;
- mod = container_of(to_delayed_work(work), struct nvhost_module, powerdown);
- mutex_lock(&mod->lock);
- if ((atomic_read(&mod->refcount) == 0) && mod->powered) {
- int i;
- if (mod->func)
- mod->func(mod, NVHOST_POWER_ACTION_OFF);
- for (i = 0; i < mod->num_clks; i++) {
- clk_disable(mod->clk[i]);
- }
- if (mod->powergate_id != -1) {
- tegra_periph_reset_assert(mod->clk[0]);
- tegra_powergate_power_off(mod->powergate_id);
- }
- mod->powered = false;
- if (mod->parent)
- nvhost_module_idle(mod->parent);
- }
- mutex_unlock(&mod->lock);
-}
-
-void nvhost_module_idle_mult(struct nvhost_module *mod, int refs)
-{
- bool kick = false;
-
- mutex_lock(&mod->lock);
- if (atomic_sub_return(refs, &mod->refcount) == 0) {
- BUG_ON(!mod->powered);
- schedule_delayed_work(&mod->powerdown, ACM_TIMEOUT);
- kick = true;
- }
- mutex_unlock(&mod->lock);
-
- if (kick)
- wake_up(&mod->idle);
-}
-
-static const char *get_module_clk_id(const char *module, int index)
-{
- if (index == 1 && strcmp(module, "gr2d") == 0)
- return "epp";
- else if (index == 0)
- return module;
- return NULL;
-}
-
-static int get_module_powergate_id(const char *module)
-{
- if (strcmp(module, "gr3d") == 0)
- return TEGRA_POWERGATE_3D;
- else if (strcmp(module, "mpe") == 0)
- return TEGRA_POWERGATE_MPE;
- return -1;
-}
-
-int nvhost_module_init(struct nvhost_module *mod, const char *name,
- nvhost_modulef func, struct nvhost_module *parent,
- struct device *dev)
-{
- int i = 0;
- mod->name = name;
-
- while (i < NVHOST_MODULE_MAX_CLOCKS) {
- long rate;
- mod->clk[i] = clk_get(dev, get_module_clk_id(name, i));
- if (IS_ERR_OR_NULL(mod->clk[i]))
- break;
- rate = clk_round_rate(mod->clk[i], UINT_MAX);
- if (rate < 0) {
- pr_err("%s: can't get maximum rate for %s\n",
- __func__, name);
- break;
- }
- if (rate != clk_get_rate(mod->clk[i])) {
- clk_set_rate(mod->clk[i], rate);
- }
- i++;
- }
-
- mod->num_clks = i;
- mod->func = func;
- mod->parent = parent;
- mod->powered = false;
- mod->powergate_id = get_module_powergate_id(name);
- mutex_init(&mod->lock);
- init_waitqueue_head(&mod->idle);
- INIT_DELAYED_WORK(&mod->powerdown, powerdown_handler);
-
- return 0;
-}
-
-static int is_module_idle(struct nvhost_module *mod)
-{
- int count;
- mutex_lock(&mod->lock);
- count = atomic_read(&mod->refcount);
- mutex_unlock(&mod->lock);
- return (count == 0);
-}
-
-void nvhost_module_suspend(struct nvhost_module *mod)
-{
- wait_event(mod->idle, is_module_idle(mod));
- flush_delayed_work(&mod->powerdown);
- BUG_ON(mod->powered);
-}
-
-void nvhost_module_deinit(struct nvhost_module *mod)
-{
- int i;
- nvhost_module_suspend(mod);
- for (i = 0; i < mod->num_clks; i++)
- clk_put(mod->clk[i]);
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_acm.h
- *
- * Tegra Graphics Host Automatic Clock Management
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_ACM_H
-#define __NVHOST_ACM_H
-
-#include <linux/workqueue.h>
-#include <linux/wait.h>
-#include <linux/mutex.h>
-#include <linux/clk.h>
-
-#define NVHOST_MODULE_MAX_CLOCKS 2
-
-struct nvhost_module;
-
-enum nvhost_power_action {
- NVHOST_POWER_ACTION_OFF,
- NVHOST_POWER_ACTION_ON,
-};
-
-typedef void (*nvhost_modulef)(struct nvhost_module *mod, enum nvhost_power_action action);
-
-struct nvhost_module {
- const char *name;
- nvhost_modulef func;
- struct delayed_work powerdown;
- struct clk *clk[NVHOST_MODULE_MAX_CLOCKS];
- int num_clks;
- struct mutex lock;
- bool powered;
- atomic_t refcount;
- wait_queue_head_t idle;
- struct nvhost_module *parent;
- int powergate_id;
-};
-
-int nvhost_module_init(struct nvhost_module *mod, const char *name,
- nvhost_modulef func, struct nvhost_module *parent,
- struct device *dev);
-void nvhost_module_deinit(struct nvhost_module *mod);
-void nvhost_module_suspend(struct nvhost_module *mod);
-
-void nvhost_module_busy(struct nvhost_module *mod);
-void nvhost_module_idle_mult(struct nvhost_module *mod, int refs);
-
-static inline bool nvhost_module_powered(struct nvhost_module *mod)
-{
- return mod->powered;
-}
-
-static inline void nvhost_module_idle(struct nvhost_module *mod)
-{
- nvhost_module_idle_mult(mod, 1);
-
-}
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_cdma.c
- *
- * Tegra Graphics Host Command DMA
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_cdma.h"
-#include "dev.h"
-#include <asm/cacheflush.h>
-
-/*
- * TODO:
- * stats
- * - for figuring out what to optimize further
- * resizable push buffer & sync queue
- * - some channels hardly need any, some channels (3d) could use more
- */
-
-#define cdma_to_channel(cdma) container_of(cdma, struct nvhost_channel, cdma)
-#define cdma_to_dev(cdma) ((cdma_to_channel(cdma))->dev)
-
-/*
- * push_buffer
- *
- * The push buffer is a circular array of words to be fetched by command DMA.
- * Note that it works slightly differently to the sync queue; fence == cur
- * means that the push buffer is full, not empty.
- */
-
-// 8 bytes per slot. (This number does not include the final RESTART.)
-#define PUSH_BUFFER_SIZE (NVHOST_GATHER_QUEUE_SIZE * 8)
-
-static void destroy_push_buffer(struct push_buffer *pb);
-
-/**
- * Reset to empty push buffer
- */
-static void reset_push_buffer(struct push_buffer *pb)
-{
- pb->fence = PUSH_BUFFER_SIZE - 8;
- pb->cur = 0;
-}
-
-/**
- * Init push buffer resources
- */
-static int init_push_buffer(struct push_buffer *pb)
-{
- pb->mem = NULL;
- pb->mapped = NULL;
- pb->phys = 0;
- reset_push_buffer(pb);
-
- /* allocate and map pushbuffer memory */
- pb->mem = nvmap_alloc(PUSH_BUFFER_SIZE + 4, 32,
- NVMEM_HANDLE_WRITE_COMBINE, (void**)&pb->mapped);
- if (IS_ERR_OR_NULL(pb->mem)) {
- pb->mem = NULL;
- goto fail;
- }
-
- /* pin pushbuffer and get physical address */
- pb->phys = nvmap_pin_single(pb->mem);
-
- /* put the restart at the end of pushbuffer memory */
- *(pb->mapped + (PUSH_BUFFER_SIZE >> 2)) = nvhost_opcode_restart(pb->phys);
-
- return 0;
-
-fail:
- destroy_push_buffer(pb);
- return -ENOMEM;
-}
-
-/**
- * Clean up push buffer resources
- */
-static void destroy_push_buffer(struct push_buffer *pb)
-{
- if (pb->mem) {
- if (pb->phys != 0) {
- nvmap_unpin(&pb->mem, 1);
- pb->phys = 0;
- }
-
- nvmap_free(pb->mem, pb->mapped);
- pb->mem = NULL;
- pb->mapped = NULL;
- }
-}
-
-/**
- * Push two words to the push buffer
- * Caller must ensure push buffer is not full
- */
-static void push_to_push_buffer(struct push_buffer *pb, u32 op1, u32 op2)
-{
- u32 cur = pb->cur;
- u32 *p = (u32*)((u32)pb->mapped + cur);
- BUG_ON(cur == pb->fence);
- *(p++) = op1;
- *(p++) = op2;
- pb->cur = (cur + 8) & (PUSH_BUFFER_SIZE - 1);
- /* printk("push_to_push_buffer: op1=%08x; op2=%08x; cur=%x\n", op1, op2, pb->cur); */
-}
-
-/**
- * Pop a number of two word slots from the push buffer
- * Caller must ensure push buffer is not empty
- */
-static void pop_from_push_buffer(struct push_buffer *pb, unsigned int slots)
-{
- pb->fence = (pb->fence + slots * 8) & (PUSH_BUFFER_SIZE - 1);
-}
-
-/**
- * Return the number of two word slots free in the push buffer
- */
-static u32 push_buffer_space(struct push_buffer *pb)
-{
- return ((pb->fence - pb->cur) & (PUSH_BUFFER_SIZE - 1)) / 8;
-}
-
-static u32 push_buffer_putptr(struct push_buffer *pb)
-{
- return pb->phys + pb->cur;
-}
-
-
-/* Sync Queue
- *
- * The sync queue is a circular buffer of u32s interpreted as:
- * 0: SyncPointID
- * 1: SyncPointValue
- * 2: NumSlots (how many pushbuffer slots to free)
- * 3: NumHandles
- * 4..: NumHandles * nvmemhandle to unpin
- *
- * There's always one word unused, so (accounting for wrap):
- * - Write == Read => queue empty
- * - Write + 1 == Read => queue full
- * The queue must not be left with less than SYNC_QUEUE_MIN_ENTRY words
- * of space at the end of the array.
- *
- * We want to pass contiguous arrays of handles to NrRmMemUnpin, so arrays
- * that would wrap at the end of the buffer will be split into two (or more)
- * entries.
- */
-
-/* Number of words needed to store an entry containing one handle */
-#define SYNC_QUEUE_MIN_ENTRY 5
-
-/**
- * Reset to empty queue.
- */
-static void reset_sync_queue(struct sync_queue *queue)
-{
- queue->read = 0;
- queue->write = 0;
-}
-
-/**
- * Find the number of handles that can be stashed in the sync queue without
- * waiting.
- * 0 -> queue is full, must update to wait for some entries to be freed.
- */
-static unsigned int sync_queue_space(struct sync_queue *queue)
-{
- unsigned int read = queue->read;
- unsigned int write = queue->write;
- u32 size;
-
- BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
- BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
-
- /*
- * We can use all of the space up to the end of the buffer, unless the
- * read position is within that space (the read position may advance
- * asynchronously, but that can't take space away once we've seen it).
- */
- if (read > write) {
- size = (read - 1) - write;
- } else {
- size = NVHOST_SYNC_QUEUE_SIZE - write;
-
- /*
- * If the read position is zero, it gets complicated. We can't
- * use the last word in the buffer, because that would leave
- * the queue empty.
- * But also if we use too much we would not leave enough space
- * for a single handle packet, and would have to wrap in
- * add_to_sync_queue - also leaving write == read == 0,
- * an empty queue.
- */
- if (read == 0)
- size -= SYNC_QUEUE_MIN_ENTRY;
- }
-
- /*
- * There must be room for an entry header and at least one handle,
- * otherwise we report a full queue.
- */
- if (size < SYNC_QUEUE_MIN_ENTRY)
- return 0;
- /* Minimum entry stores one handle */
- return (size - SYNC_QUEUE_MIN_ENTRY) + 1;
-}
-
-/**
- * Add an entry to the sync queue.
- */
-static void add_to_sync_queue(struct sync_queue *queue,
- u32 sync_point_id, u32 sync_point_value,
- u32 nr_slots,
- struct nvmap_handle **handles, u32 nr_handles)
-{
- u32 write = queue->write;
- u32 *p = queue->buffer + write;
- u32 size = 4 + nr_handles;
-
- BUG_ON(sync_point_id == NVSYNCPT_INVALID);
- BUG_ON(sync_queue_space(queue) < nr_handles);
-
- write += size;
- BUG_ON(write > NVHOST_SYNC_QUEUE_SIZE);
-
- *p++ = sync_point_id;
- *p++ = sync_point_value;
- *p++ = nr_slots;
- *p++ = nr_handles;
- if (nr_handles)
- memcpy(p, handles, nr_handles*sizeof(struct nvmap_handle *));
-
- /* If there's not enough room for another entry, wrap to the start. */
- if ((write + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE) {
- /*
- * It's an error for the read position to be zero, as that
- * would mean we emptied the queue while adding something.
- */
- BUG_ON(queue->read == 0);
- write = 0;
- }
-
- queue->write = write;
-}
-
-/**
- * Get a pointer to the next entry in the queue, or NULL if the queue is empty.
- * Doesn't consume the entry.
- */
-static u32 *sync_queue_head(struct sync_queue *queue)
-{
- u32 read = queue->read;
- u32 write = queue->write;
-
- BUG_ON(read > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
- BUG_ON(write > (NVHOST_SYNC_QUEUE_SIZE - SYNC_QUEUE_MIN_ENTRY));
-
- if (read == write)
- return NULL;
- return queue->buffer + read;
-}
-
-/**
- * Advances to the next queue entry, if you want to consume it.
- */
-static void
-dequeue_sync_queue_head(struct sync_queue *queue)
-{
- u32 read = queue->read;
- u32 size;
-
- BUG_ON(read == queue->write);
-
- size = 4 + queue->buffer[read + 3];
-
- read += size;
- BUG_ON(read > NVHOST_SYNC_QUEUE_SIZE);
-
- /* If there's not enough room for another entry, wrap to the start. */
- if ((read + SYNC_QUEUE_MIN_ENTRY) > NVHOST_SYNC_QUEUE_SIZE)
- read = 0;
-
- queue->read = read;
-}
-
-
-/*** Cdma internal stuff ***/
-
-/**
- * Kick channel DMA into action by writing its PUT offset (if it has changed)
- */
-static void kick_cdma(struct nvhost_cdma *cdma)
-{
- u32 put = push_buffer_putptr(&cdma->push_buffer);
- if (put != cdma->last_put) {
- void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
- wmb();
- writel(put, chan_regs + HOST1X_CHANNEL_DMAPUT);
- cdma->last_put = put;
- }
-}
-
-/**
- * Return the status of the cdma's sync queue or push buffer for the given event
- * - sq empty: returns 1 for empty, 0 for not empty (as in "1 empty queue" :-)
- * - sq space: returns the number of handles that can be stored in the queue
- * - pb space: returns the number of free slots in the channel's push buffer
- * Must be called with the cdma lock held.
- */
-static unsigned int cdma_status(struct nvhost_cdma *cdma, enum cdma_event event)
-{
- switch (event) {
- case CDMA_EVENT_SYNC_QUEUE_EMPTY:
- return sync_queue_head(&cdma->sync_queue) ? 0 : 1;
- case CDMA_EVENT_SYNC_QUEUE_SPACE:
- return sync_queue_space(&cdma->sync_queue);
- case CDMA_EVENT_PUSH_BUFFER_SPACE:
- return push_buffer_space(&cdma->push_buffer);
- default:
- return 0;
- }
-}
-
-/**
- * Sleep (if necessary) until the requested event happens
- * - CDMA_EVENT_SYNC_QUEUE_EMPTY : sync queue is completely empty.
- * - Returns 1
- * - CDMA_EVENT_SYNC_QUEUE_SPACE : there is space in the sync queue.
- * - CDMA_EVENT_PUSH_BUFFER_SPACE : there is space in the push buffer
- * - Return the amount of space (> 0)
- * Must be called with the cdma lock held.
- */
-static unsigned int wait_cdma(struct nvhost_cdma *cdma, enum cdma_event event)
-{
- for (;;) {
- unsigned int space = cdma_status(cdma, event);
- if (space)
- return space;
-
- BUG_ON(cdma->event != CDMA_EVENT_NONE);
- cdma->event = event;
-
- mutex_unlock(&cdma->lock);
- down(&cdma->sem);
- mutex_lock(&cdma->lock);
- }
-}
-
-/**
- * For all sync queue entries that have already finished according to the
- * current sync point registers:
- * - unpin & unref their mems
- * - pop their push buffer slots
- * - remove them from the sync queue
- * This is normally called from the host code's worker thread, but can be
- * called manually if necessary.
- * Must be called with the cdma lock held.
- */
-static void update_cdma(struct nvhost_cdma *cdma)
-{
- bool signal = false;
- struct nvhost_master *dev = cdma_to_dev(cdma);
-
- BUG_ON(!cdma->running);
-
- /*
- * Walk the sync queue, reading the sync point registers as necessary,
- * to consume as many sync queue entries as possible without blocking
- */
- for (;;) {
- u32 syncpt_id, syncpt_val;
- unsigned int nr_slots, nr_handles;
- struct nvmap_handle **handles;
- u32 *sync;
-
- sync = sync_queue_head(&cdma->sync_queue);
- if (!sync) {
- if (cdma->event == CDMA_EVENT_SYNC_QUEUE_EMPTY)
- signal = true;
- break;
- }
-
- syncpt_id = sync[0];
- syncpt_val = sync[1];
-
- BUG_ON(syncpt_id == NVSYNCPT_INVALID);
-
- /* Check whether this syncpt has completed, and bail if not */
- if (!nvhost_syncpt_min_cmp(&dev->syncpt, syncpt_id, syncpt_val))
- break;
-
- nr_slots = sync[2];
- nr_handles = sync[3];
- handles = (struct nvmap_handle **)(sync + 4);
-
- /* Unpin the memory */
- if (nr_handles)
- nvmap_unpin(handles, nr_handles);
-
- /* Pop push buffer slots */
- if (nr_slots) {
- pop_from_push_buffer(&cdma->push_buffer, nr_slots);
- if (cdma->event == CDMA_EVENT_PUSH_BUFFER_SPACE)
- signal = true;
- }
-
- dequeue_sync_queue_head(&cdma->sync_queue);
- if (cdma->event == CDMA_EVENT_SYNC_QUEUE_SPACE)
- signal = true;
- }
-
- /* Wake up CdmaWait() if the requested event happened */
- if (signal) {
- cdma->event = CDMA_EVENT_NONE;
- up(&cdma->sem);
- }
-}
-
-/**
- * Create a cdma
- */
-int nvhost_cdma_init(struct nvhost_cdma *cdma)
-{
- int err;
-
- mutex_init(&cdma->lock);
- sema_init(&cdma->sem, 0);
- cdma->event = CDMA_EVENT_NONE;
- cdma->running = false;
- err = init_push_buffer(&cdma->push_buffer);
- if (err)
- return err;
- reset_sync_queue(&cdma->sync_queue);
- return 0;
-}
-
-/**
- * Destroy a cdma
- */
-void nvhost_cdma_deinit(struct nvhost_cdma *cdma)
-{
- BUG_ON(cdma->running);
- destroy_push_buffer(&cdma->push_buffer);
-}
-
-static void start_cdma(struct nvhost_cdma *cdma)
-{
- void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
-
- if (cdma->running)
- return;
-
- cdma->last_put = push_buffer_putptr(&cdma->push_buffer);
-
- writel(nvhost_channel_dmactrl(true, false, false),
- chan_regs + HOST1X_CHANNEL_DMACTRL);
-
- /* set base, put, end pointer (all of memory) */
- writel(0, chan_regs + HOST1X_CHANNEL_DMASTART);
- writel(cdma->last_put, chan_regs + HOST1X_CHANNEL_DMAPUT);
- writel(0xFFFFFFFF, chan_regs + HOST1X_CHANNEL_DMAEND);
-
- /* reset GET */
- writel(nvhost_channel_dmactrl(true, true, true),
- chan_regs + HOST1X_CHANNEL_DMACTRL);
-
- /* start the command DMA */
- writel(nvhost_channel_dmactrl(false, false, false),
- chan_regs + HOST1X_CHANNEL_DMACTRL);
-
- cdma->running = true;
-
-}
-
-void nvhost_cdma_stop(struct nvhost_cdma *cdma)
-{
- void __iomem *chan_regs = cdma_to_channel(cdma)->aperture;
-
- if (!cdma->running)
- return;
-
- mutex_lock(&cdma->lock);
- wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_EMPTY);
- mutex_unlock(&cdma->lock);
- writel(nvhost_channel_dmactrl(true, false, false),
- chan_regs + HOST1X_CHANNEL_DMACTRL);
-
- cdma->running = false;
-}
-
-/**
- * Begin a cdma submit
- */
-void nvhost_cdma_begin(struct nvhost_cdma *cdma)
-{
- if (!cdma->running)
- start_cdma(cdma);
- mutex_lock(&cdma->lock);
- cdma->slots_free = 0;
- cdma->slots_used = 0;
-}
-
-/**
- * Push two words into a push buffer slot
- * Blocks as necessary if the push buffer is full.
- */
-void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2)
-{
- u32 slots_free = cdma->slots_free;
- if (slots_free == 0) {
- kick_cdma(cdma);
- slots_free = wait_cdma(cdma, CDMA_EVENT_PUSH_BUFFER_SPACE);
- }
- cdma->slots_free = slots_free - 1;
- cdma->slots_used++;
- push_to_push_buffer(&cdma->push_buffer, op1, op2);
-}
-
-/**
- * End a cdma submit
- * Kick off DMA, add a contiguous block of memory handles to the sync queue,
- * and a number of slots to be freed from the pushbuffer.
- * Blocks as necessary if the sync queue is full.
- * The handles for a submit must all be pinned at the same time, but they
- * can be unpinned in smaller chunks.
- */
-void nvhost_cdma_end(struct nvhost_cdma *cdma,
- u32 sync_point_id, u32 sync_point_value,
- struct nvmap_handle **handles, unsigned int nr_handles)
-{
- kick_cdma(cdma);
-
- while (nr_handles || cdma->slots_used) {
- unsigned int count;
- /*
- * Wait until there's enough room in the
- * sync queue to write something.
- */
- count = wait_cdma(cdma, CDMA_EVENT_SYNC_QUEUE_SPACE);
-
- /*
- * Add reloc entries to sync queue (as many as will fit)
- * and unlock it
- */
- if (count > nr_handles)
- count = nr_handles;
- add_to_sync_queue(&cdma->sync_queue,
- sync_point_id, sync_point_value,
- cdma->slots_used, handles, count);
- /* NumSlots only goes in the first packet */
- cdma->slots_used = 0;
- handles += count;
- nr_handles -= count;
- }
-
- mutex_unlock(&cdma->lock);
-}
-
-/**
- * Update cdma state according to current sync point values
- */
-void nvhost_cdma_update(struct nvhost_cdma *cdma)
-{
- mutex_lock(&cdma->lock);
- update_cdma(cdma);
- mutex_unlock(&cdma->lock);
-}
-
-/**
- * Manually spin until all CDMA has finished. Used if an async update
- * cannot be scheduled for any reason.
- */
-void nvhost_cdma_flush(struct nvhost_cdma *cdma)
-{
- mutex_lock(&cdma->lock);
- while (sync_queue_head(&cdma->sync_queue)) {
- update_cdma(cdma);
- mutex_unlock(&cdma->lock);
- schedule();
- mutex_lock(&cdma->lock);
- }
- mutex_unlock(&cdma->lock);
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_cdma.h
- *
- * Tegra Graphics Host Command DMA
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_CDMA_H
-#define __NVHOST_CDMA_H
-
-#include <linux/sched.h>
-#include <linux/semaphore.h>
-#include <linux/nvhost.h>
-#include <linux/nvmap.h>
-
-#include "nvhost_acm.h"
-
-/*
- * cdma
- *
- * This is in charge of a host command DMA channel.
- * Sends ops to a push buffer, and takes responsibility for unpinning
- * (& possibly freeing) of memory after those ops have completed.
- * Producer:
- * begin
- * push - send ops to the push buffer
- * end - start command DMA and enqueue handles to be unpinned
- * Consumer:
- * update - call to update sync queue and push buffer, unpin memory
- */
-
-/* Size of the sync queue. If it is too small, we won't be able to queue up
- * many command buffers. If it is too large, we waste memory. */
-#define NVHOST_SYNC_QUEUE_SIZE 8192
-
-/* Number of gathers we allow to be queued up per channel. Must be a
- power of two. Currently sized such that pushbuffer is 4KB (512*8B). */
-#define NVHOST_GATHER_QUEUE_SIZE 512
-
-struct push_buffer {
- struct nvmap_handle *mem; /* handle to pushbuffer memory */
- u32 *mapped; /* mapped pushbuffer memory */
- u32 phys; /* physical address of pushbuffer */
- u32 fence; /* index we've written */
- u32 cur; /* index to write to */
-};
-
-struct sync_queue {
- unsigned int read; /* read position within buffer */
- unsigned int write; /* write position within buffer */
- u32 buffer[NVHOST_SYNC_QUEUE_SIZE]; /* queue data */
-};
-
-enum cdma_event {
- CDMA_EVENT_NONE, /* not waiting for any event */
- CDMA_EVENT_SYNC_QUEUE_EMPTY, /* wait for empty sync queue */
- CDMA_EVENT_SYNC_QUEUE_SPACE, /* wait for space in sync queue */
- CDMA_EVENT_PUSH_BUFFER_SPACE /* wait for space in push buffer */
-};
-
-struct nvhost_cdma {
- struct mutex lock; /* controls access to shared state */
- struct semaphore sem; /* signalled when event occurs */
- enum cdma_event event; /* event that sem is waiting for */
- unsigned int slots_used; /* pb slots used in current submit */
- unsigned int slots_free; /* pb slots free in current submit */
- unsigned int last_put; /* last value written to DMAPUT */
- struct push_buffer push_buffer; /* channel's push buffer */
- struct sync_queue sync_queue; /* channel's sync queue */
- bool running;
-};
-
-int nvhost_cdma_init(struct nvhost_cdma *cdma);
-void nvhost_cdma_deinit(struct nvhost_cdma *cdma);
-void nvhost_cdma_stop(struct nvhost_cdma *cdma);
-void nvhost_cdma_begin(struct nvhost_cdma *cdma);
-void nvhost_cdma_push(struct nvhost_cdma *cdma, u32 op1, u32 op2);
-void nvhost_cdma_end(struct nvhost_cdma *cdma,
- u32 sync_point_id, u32 sync_point_value,
- struct nvmap_handle **handles, unsigned int nr_handles);
-void nvhost_cdma_update(struct nvhost_cdma *cdma);
-void nvhost_cdma_flush(struct nvhost_cdma *cdma);
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_channel.c
- *
- * Tegra Graphics Host Channel
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_channel.h"
-#include "dev.h"
-#include "nvhost_hwctx.h"
-#include <linux/platform_device.h>
-
-#define NVMODMUTEX_2D_FULL (1)
-#define NVMODMUTEX_2D_SIMPLE (2)
-#define NVMODMUTEX_2D_SB_A (3)
-#define NVMODMUTEX_2D_SB_B (4)
-#define NVMODMUTEX_3D (5)
-#define NVMODMUTEX_DISPLAYA (6)
-#define NVMODMUTEX_DISPLAYB (7)
-#define NVMODMUTEX_VI (8)
-#define NVMODMUTEX_DSI (9)
-
-static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action);
-static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action);
-static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action);
-
-static const struct nvhost_channeldesc channelmap[] = {
-{
- /* channel 0 */
- .name = "display",
- .syncpts = BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) |
- BIT(NVSYNCPT_VBLANK0) | BIT(NVSYNCPT_VBLANK1),
- .modulemutexes = BIT(NVMODMUTEX_DISPLAYA) | BIT(NVMODMUTEX_DISPLAYB),
-},
-{
- /* channel 1 */
- .name = "gr3d",
- .syncpts = BIT(NVSYNCPT_3D),
- .waitbases = BIT(NVWAITBASE_3D),
- .modulemutexes = BIT(NVMODMUTEX_3D),
- .class = NV_GRAPHICS_3D_CLASS_ID,
- .power = power_3d,
-},
-{
- /* channel 2 */
- .name = "gr2d",
- .syncpts = BIT(NVSYNCPT_2D_0) | BIT(NVSYNCPT_2D_1),
- .waitbases = BIT(NVWAITBASE_2D_0) | BIT(NVWAITBASE_2D_1),
- .modulemutexes = BIT(NVMODMUTEX_2D_FULL) | BIT(NVMODMUTEX_2D_SIMPLE) |
- BIT(NVMODMUTEX_2D_SB_A) | BIT(NVMODMUTEX_2D_SB_B),
- .power = power_2d,
-},
-{
- /* channel 3 */
- .name = "isp",
- .syncpts = 0,
-},
-{
- /* channel 4 */
- .name = "vi",
- .syncpts = BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_1) |
- BIT(NVSYNCPT_VI_ISP_2) | BIT(NVSYNCPT_VI_ISP_3) |
- BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5),
- .modulemutexes = BIT(NVMODMUTEX_VI),
-},
-{
- /* channel 5 */
- .name = "mpe",
- .syncpts = BIT(NVSYNCPT_MPE) | BIT(NVSYNCPT_MPE_EBM_EOF) |
- BIT(NVSYNCPT_MPE_WR_SAFE),
- .waitbases = BIT(NVWAITBASE_MPE),
- .class = NV_VIDEO_ENCODE_MPEG_CLASS_ID,
- .power = power_mpe,
-},
-{
- /* channel 6 */
- .name = "dsi",
- .syncpts = BIT(NVSYNCPT_DSI),
- .modulemutexes = BIT(NVMODMUTEX_DSI),
-}};
-
-static inline void __iomem *channel_aperture(void __iomem *p, int ndx)
-{
- ndx += NVHOST_CHANNEL_BASE;
- p += NV_HOST1X_CHANNEL0_BASE;
- p += ndx * NV_HOST1X_CHANNEL_MAP_SIZE_BYTES;
- return p;
-}
-
-int __init nvhost_channel_init(struct nvhost_channel *ch,
- struct nvhost_master *dev, int index)
-{
- BUILD_BUG_ON(NVHOST_NUMCHANNELS != ARRAY_SIZE(channelmap));
-
- ch->dev = dev;
- ch->desc = &channelmap[index];
- ch->aperture = channel_aperture(dev->aperture, index);
- mutex_init(&ch->reflock);
- mutex_init(&ch->submitlock);
-
- return nvhost_hwctx_handler_init(&ch->ctxhandler, ch->desc->name);
-}
-
-struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch)
-{
- int err = 0;
- mutex_lock(&ch->reflock);
- if (ch->refcount == 0) {
- err = nvhost_module_init(&ch->mod, ch->desc->name,
- ch->desc->power, &ch->dev->mod,
- &ch->dev->pdev->dev);
- if (!err) {
- err = nvhost_cdma_init(&ch->cdma);
- if (err)
- nvhost_module_deinit(&ch->mod);
- }
- }
- if (!err) {
- ch->refcount++;
- }
- mutex_unlock(&ch->reflock);
-
- return err ? NULL : ch;
-}
-
-void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx)
-{
- if (ctx) {
- mutex_lock(&ch->submitlock);
- if (ch->cur_ctx == ctx)
- ch->cur_ctx = NULL;
- mutex_unlock(&ch->submitlock);
- }
-
- mutex_lock(&ch->reflock);
- if (ch->refcount == 1) {
- nvhost_module_deinit(&ch->mod);
- /* cdma may already be stopped, that's ok */
- nvhost_cdma_stop(&ch->cdma);
- nvhost_cdma_deinit(&ch->cdma);
- }
- ch->refcount--;
- mutex_unlock(&ch->reflock);
-}
-
-void nvhost_channel_suspend(struct nvhost_channel *ch)
-{
- mutex_lock(&ch->reflock);
- BUG_ON(nvhost_module_powered(&ch->mod));
- nvhost_cdma_stop(&ch->cdma);
- mutex_unlock(&ch->reflock);
-}
-
-void nvhost_channel_submit(
- struct nvhost_channel *ch,
- struct nvhost_op_pair *ops,
- int num_pairs,
- struct nvhost_cpuinterrupt *intrs,
- int num_intrs,
- struct nvmap_handle **unpins,
- int num_unpins,
- u32 syncpt_id,
- u32 syncpt_val)
-{
- int i;
- struct nvhost_op_pair* p;
-
- /* schedule interrupts */
- for (i = 0; i < num_intrs; i++) {
- nvhost_intr_add_action(&ch->dev->intr, syncpt_id, intrs[i].syncpt_val,
- NVHOST_INTR_ACTION_CTXSAVE, intrs[i].intr_data, NULL);
- }
-
- /* begin a CDMA submit */
- nvhost_cdma_begin(&ch->cdma);
-
- /* push ops */
- for (i = 0, p = ops; i < num_pairs; i++, p++)
- nvhost_cdma_push(&ch->cdma, p->op1, p->op2);
-
- /* end CDMA submit & stash pinned hMems into sync queue for later cleanup */
- nvhost_cdma_end(&ch->cdma, syncpt_id, syncpt_val, unpins, num_unpins);
-}
-
-static void power_2d(struct nvhost_module *mod, enum nvhost_power_action action)
-{
- /* TODO: [ahatala 2010-06-17] reimplement EPP hang war */
- if (action == NVHOST_POWER_ACTION_OFF) {
- /* TODO: [ahatala 2010-06-17] reset EPP */
- }
-}
-
-static void power_3d(struct nvhost_module *mod, enum nvhost_power_action action)
-{
- struct nvhost_channel *ch = container_of(mod, struct nvhost_channel, mod);
-
- if (action == NVHOST_POWER_ACTION_OFF) {
- mutex_lock(&ch->submitlock);
- if (ch->cur_ctx) {
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- struct nvhost_op_pair save;
- struct nvhost_cpuinterrupt ctxsw;
- u32 syncval;
- syncval = nvhost_syncpt_incr_max(&ch->dev->syncpt,
- NVSYNCPT_3D,
- ch->cur_ctx->save_incrs);
- save.op1 = nvhost_opcode_gather(0, ch->cur_ctx->save_size);
- save.op2 = ch->cur_ctx->save_phys;
- ctxsw.intr_data = ch->cur_ctx;
- ctxsw.syncpt_val = syncval - 1;
- ch->cur_ctx->valid = true;
- ch->ctxhandler.get(ch->cur_ctx);
- ch->cur_ctx = NULL;
- nvhost_channel_submit(ch, &save, 1, &ctxsw, 1, NULL, 0, NVSYNCPT_3D, syncval);
- nvhost_intr_add_action(&ch->dev->intr, NVSYNCPT_3D, syncval,
- NVHOST_INTR_ACTION_WAKEUP, &wq, NULL);
- wait_event(wq, nvhost_syncpt_min_cmp(&ch->dev->syncpt, NVSYNCPT_3D, syncval));
- nvhost_cdma_update(&ch->cdma);
- }
- mutex_unlock(&ch->submitlock);
- }
-}
-
-static void power_mpe(struct nvhost_module *mod, enum nvhost_power_action action)
-{
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_channel.h
- *
- * Tegra Graphics Host Channel
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_CHANNEL_H
-#define __NVHOST_CHANNEL_H
-
-#include "nvhost_cdma.h"
-#include "nvhost_acm.h"
-#include "nvhost_hwctx.h"
-
-#include <linux/cdev.h>
-#include <linux/io.h>
-
-#define NVHOST_CHANNEL_BASE 0
-#define NVHOST_NUMCHANNELS (NV_HOST1X_CHANNELS - 1)
-#define NVHOST_MAX_GATHERS 512
-#define NVHOST_MAX_HANDLES 1280
-
-struct nvhost_master;
-
-struct nvhost_channeldesc {
- const char *name;
- nvhost_modulef power;
- u32 syncpts;
- u32 waitbases;
- u32 modulemutexes;
- u32 class;
-};
-
-struct nvhost_channel {
- int refcount;
- struct mutex reflock;
- struct mutex submitlock;
- void __iomem *aperture;
- struct nvhost_master *dev;
- const struct nvhost_channeldesc *desc;
- struct nvhost_hwctx *cur_ctx;
- struct device *node;
- struct cdev cdev;
- struct nvhost_hwctx_handler ctxhandler;
- struct nvhost_module mod;
- struct nvhost_cdma cdma;
-};
-
-struct nvhost_op_pair {
- u32 op1;
- u32 op2;
-};
-
-struct nvhost_cpuinterrupt {
- u32 syncpt_val;
- void *intr_data;
-};
-
-int nvhost_channel_init(
- struct nvhost_channel *ch,
- struct nvhost_master *dev, int index);
-
-void nvhost_channel_submit(
- struct nvhost_channel *ch,
- struct nvhost_op_pair *ops,
- int num_pairs,
- struct nvhost_cpuinterrupt *intrs,
- int num_intrs,
- struct nvmap_handle **unpins,
- int num_unpins,
- u32 syncpt_id,
- u32 syncpt_val);
-
-struct nvhost_channel *nvhost_getchannel(struct nvhost_channel *ch);
-void nvhost_putchannel(struct nvhost_channel *ch, struct nvhost_hwctx *ctx);
-void nvhost_channel_suspend(struct nvhost_channel *ch);
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_cpuaccess.c
- *
- * Tegra Graphics Host Cpu Register Access
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_cpuaccess.h"
-#include "dev.h"
-#include <linux/string.h>
-
-#define cpuaccess_to_dev(ctx) container_of(ctx, struct nvhost_master, cpuaccess)
-
-int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
- struct platform_device *pdev)
-{
- int i;
- for (i = 0; i < NVHOST_MODULE_NUM; i++) {
- struct resource *mem;
- mem = platform_get_resource(pdev, IORESOURCE_MEM, i+1);
- if (!mem) {
- dev_err(&pdev->dev, "missing module memory resource\n");
- return -ENXIO;
- }
-
- ctx->regs[i] = ioremap(mem->start, resource_size(mem));
- if (!ctx->regs[i]) {
- dev_err(&pdev->dev, "failed to map module registers\n");
- return -ENXIO;
- }
- }
-
- return 0;
-}
-
-void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx)
-{
- int i;
- for (i = 0; i < NVHOST_MODULE_NUM; i++) {
- iounmap(ctx->regs[i]);
- release_resource(ctx->reg_mem[i]);
- }
-}
-
-int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx)
-{
- struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *sync_regs = dev->sync_aperture;
- u32 reg;
-
- /* mlock registers returns 0 when the lock is aquired.
- * writing 0 clears the lock. */
- nvhost_module_busy(&dev->mod);
- reg = readl(sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
- if (reg) {
- nvhost_module_idle(&dev->mod);
- return -ERESTARTSYS;
- }
- return 0;
-}
-
-void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx)
-{
- struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *sync_regs = dev->sync_aperture;
- writel(0, sync_regs + (HOST1X_SYNC_MLOCK_0 + idx * 4));
- nvhost_module_idle(&dev->mod);
-}
-
-void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
- u32 offset, size_t size, void *values)
-{
- struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *p = ctx->regs[module] + offset;
- u32* out = (u32*)values;
- BUG_ON(size & 3);
- size >>= 2;
- nvhost_module_busy(&dev->mod);
- while (size--) {
- *(out++) = readl(p);
- p += 4;
- }
- rmb();
- nvhost_module_idle(&dev->mod);
-}
-
-void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
- u32 offset, size_t size, const void *values)
-{
- struct nvhost_master *dev = cpuaccess_to_dev(ctx);
- void __iomem *p = ctx->regs[module] + offset;
- const u32* in = (const u32*)values;
- BUG_ON(size & 3);
- size >>= 2;
- nvhost_module_busy(&dev->mod);
- while (size--) {
- writel(*(in++), p);
- p += 4;
- }
- wmb();
- nvhost_module_idle(&dev->mod);
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_cpuaccess.h
- *
- * Tegra Graphics Host Cpu Register Access
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_CPUACCESS_H
-#define __NVHOST_CPUACCESS_H
-
-#include "nvhost_hardware.h"
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-enum nvhost_module_id {
- NVHOST_MODULE_DISPLAY_A = 0,
- NVHOST_MODULE_DISPLAY_B,
- NVHOST_MODULE_VI,
- NVHOST_MODULE_ISP,
- NVHOST_MODULE_MPE,
-#if 0
- /* TODO: [ahatala 2010-07-02] find out if these are needed */
- NVHOST_MODULE_FUSE,
- NVHOST_MODULE_APB_MISC,
- NVHOST_MODULE_CLK_RESET,
-#endif
- NVHOST_MODULE_NUM
-};
-
-struct nvhost_cpuaccess {
- struct resource *reg_mem[NVHOST_MODULE_NUM];
- void __iomem *regs[NVHOST_MODULE_NUM];
-};
-
-int nvhost_cpuaccess_init(struct nvhost_cpuaccess *ctx,
- struct platform_device *pdev);
-
-void nvhost_cpuaccess_deinit(struct nvhost_cpuaccess *ctx);
-
-int nvhost_mutex_try_lock(struct nvhost_cpuaccess *ctx, unsigned int idx);
-
-void nvhost_mutex_unlock(struct nvhost_cpuaccess *ctx, unsigned int idx);
-
-static inline bool nvhost_access_module_regs(
- struct nvhost_cpuaccess *ctx, u32 module)
-{
- return (module < NVHOST_MODULE_NUM);
-}
-
-void nvhost_read_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
- u32 offset, size_t size, void *values);
-
-void nvhost_write_module_regs(struct nvhost_cpuaccess *ctx, u32 module,
- u32 offset, size_t size, const void *values);
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_hardware.h
- *
- * Tegra Graphics Host Register Offsets
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_HARDWARE_H
-#define __NVHOST_HARDWARE_H
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-
-/* class ids */
-enum {
- NV_HOST1X_CLASS_ID = 0x1,
- NV_VIDEO_ENCODE_MPEG_CLASS_ID = 0x20,
- NV_GRAPHICS_3D_CLASS_ID = 0x60
-};
-
-
-/* channel registers */
-#define NV_HOST1X_CHANNELS 8
-#define NV_HOST1X_CHANNEL0_BASE 0
-#define NV_HOST1X_CHANNEL_MAP_SIZE_BYTES 16384
-
-
-#define HOST1X_CHANNEL_FIFOSTAT 0x00
-#define HOST1X_CHANNEL_INDDATA 0x0c
-#define HOST1X_CHANNEL_DMASTART 0x14
-#define HOST1X_CHANNEL_DMAPUT 0x18
-#define HOST1X_CHANNEL_DMAGET 0x1c
-#define HOST1X_CHANNEL_DMAEND 0x20
-#define HOST1X_CHANNEL_DMACTRL 0x24
-
-#define HOST1X_SYNC_CF_SETUP(x) (0x3080 + (4 * (x)))
-
-#define HOST1X_SYNC_SYNCPT_BASE(x) (0x3600 + (4 * (x)))
-
-#define HOST1X_SYNC_CBREAD(x) (0x3720 + (4 * (x)))
-#define HOST1X_SYNC_CFPEEK_CTRL 0x374c
-#define HOST1X_SYNC_CFPEEK_READ 0x3750
-#define HOST1X_SYNC_CFPEEK_PTRS 0x3754
-#define HOST1X_SYNC_CBSTAT(x) (0x3758 + (4 * (x)))
-
-static inline unsigned nvhost_channel_fifostat_outfentries(u32 reg)
-{
- return (reg >> 24) & 0x1f;
-}
-
-static inline u32 nvhost_channel_dmactrl(bool stop, bool get_rst, bool init_get)
-{
- u32 v = stop ? 1 : 0;
- if (get_rst)
- v |= 2;
- if (init_get)
- v |= 4;
- return v;
-}
-
-
-/* sync registers */
-#define NV_HOST1X_SYNCPT_NB_PTS 32
-#define NV_HOST1X_SYNCPT_NB_BASES 8
-#define NV_HOST1X_NB_MLOCKS 16
-#define HOST1X_CHANNEL_SYNC_REG_BASE 12288
-
-enum {
- HOST1X_SYNC_INTMASK = 0x4,
- HOST1X_SYNC_INTC0MASK = 0x8,
- HOST1X_SYNC_HINTSTATUS = 0x20,
- HOST1X_SYNC_HINTMASK = 0x24,
- HOST1X_SYNC_HINTSTATUS_EXT = 0x28,
- HOST1X_SYNC_HINTMASK_EXT = 0x2c,
- HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS = 0x40,
- HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0 = 0x50,
- HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1 = 0x54,
- HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE = 0x60,
- HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0 = 0x68,
- HOST1X_SYNC_USEC_CLK = 0x1a4,
- HOST1X_SYNC_CTXSW_TIMEOUT_CFG = 0x1a8,
- HOST1X_SYNC_IP_BUSY_TIMEOUT = 0x1bc,
- HOST1X_SYNC_IP_READ_TIMEOUT_ADDR = 0x1c0,
- HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR = 0x1c4,
- HOST1X_SYNC_MLOCK_0 = 0x2c0,
- HOST1X_SYNC_MLOCK_OWNER_0 = 0x340,
- HOST1X_SYNC_SYNCPT_0 = 0x400,
- HOST1X_SYNC_SYNCPT_INT_THRESH_0 = 0x500,
- HOST1X_SYNC_SYNCPT_BASE_0 = 0x600,
- HOST1X_SYNC_SYNCPT_CPU_INCR = 0x700
-};
-
-static inline bool nvhost_sync_hintstatus_ext_ip_read_int(u32 reg)
-{
- return (reg & BIT(30)) != 0;
-}
-
-static inline bool nvhost_sync_hintstatus_ext_ip_write_int(u32 reg)
-{
- return (reg & BIT(31)) != 0;
-}
-
-static inline bool nvhost_sync_mlock_owner_ch_owns(u32 reg)
-{
- return (reg & BIT(0)) != 0;
-}
-
-static inline bool nvhost_sync_mlock_owner_cpu_owns(u32 reg)
-{
- return (reg & BIT(1)) != 0;
-}
-
-static inline unsigned int nvhost_sync_mlock_owner_owner_chid(u32 reg)
-{
- return (reg >> 8) & 0xf;
-}
-
-
-/* host class */
-enum {
- NV_CLASS_HOST_INCR_SYNCPT = 0x0,
- NV_CLASS_HOST_WAIT_SYNCPT = 0x8,
- NV_CLASS_HOST_WAIT_SYNCPT_BASE = 0x9,
- NV_CLASS_HOST_INCR_SYNCPT_BASE = 0xc,
- NV_CLASS_HOST_INDOFF = 0x2d,
- NV_CLASS_HOST_INDDATA = 0x2e
-};
-
-static inline u32 nvhost_class_host_wait_syncpt_base(
- unsigned indx, unsigned base_indx, unsigned offset)
-{
- return (indx << 24) | (base_indx << 16) | offset;
-}
-
-static inline u32 nvhost_class_host_incr_syncpt_base(
- unsigned base_indx, unsigned offset)
-{
- return (base_indx << 24) | offset;
-}
-
-enum {
- NV_HOST_MODULE_HOST1X = 0,
- NV_HOST_MODULE_MPE = 1,
- NV_HOST_MODULE_GR3D = 6
-};
-
-static inline u32 nvhost_class_host_indoff_reg_write(
- unsigned mod_id, unsigned offset, bool auto_inc)
-{
- u32 v = (0xf << 28) | (mod_id << 18) | (offset << 2);
- if (auto_inc)
- v |= BIT(27);
- return v;
-}
-
-static inline u32 nvhost_class_host_indoff_reg_read(
- unsigned mod_id, unsigned offset, bool auto_inc)
-{
- u32 v = (mod_id << 18) | (offset << 2) | 1;
- if (auto_inc)
- v |= BIT(27);
- return v;
-}
-
-
-/* cdma opcodes */
-static inline u32 nvhost_opcode_setclass(
- unsigned class_id, unsigned offset, unsigned mask)
-{
- return (0 << 28) | (offset << 16) | (class_id << 6) | mask;
-}
-
-static inline u32 nvhost_opcode_incr(unsigned offset, unsigned count)
-{
- return (1 << 28) | (offset << 16) | count;
-}
-
-static inline u32 nvhost_opcode_nonincr(unsigned offset, unsigned count)
-{
- return (2 << 28) | (offset << 16) | count;
-}
-
-static inline u32 nvhost_opcode_mask(unsigned offset, unsigned mask)
-{
- return (3 << 28) | (offset << 16) | mask;
-}
-
-static inline u32 nvhost_opcode_imm(unsigned offset, unsigned value)
-{
- return (4 << 28) | (offset << 16) | value;
-}
-
-static inline u32 nvhost_opcode_restart(unsigned address)
-{
- return (5 << 28) | (address >> 4);
-}
-
-static inline u32 nvhost_opcode_gather(unsigned offset, unsigned count)
-{
- return (6 << 28) | (offset << 16) | count;
-}
-
-static inline u32 nvhost_opcode_gather_nonincr(unsigned offset, unsigned count)
-{
- return (6 << 28) | (offset << 16) | BIT(15) | count;
-}
-
-static inline u32 nvhost_opcode_gather_incr(unsigned offset, unsigned count)
-{
- return (6 << 28) | (offset << 16) | BIT(15) | BIT(14) | count;
-}
-
-#define NVHOST_OPCODE_NOOP nvhost_opcode_nonincr(0, 0)
-
-
-
-#endif /* __NVHOST_HARDWARE_H */
-
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_hwctx.h
- *
- * Tegra Graphics Host Hardware Context Interface
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_HWCTX_H
-#define __NVHOST_HWCTX_H
-
-#include <linux/string.h>
-#include <linux/nvhost.h>
-#include <linux/nvmap.h>
-#include <linux/kref.h>
-
-struct nvhost_channel;
-
-struct nvhost_hwctx {
- struct kref ref;
-
- struct nvhost_channel *channel;
- bool valid;
-
- struct nvmap_handle *save;
- u32 save_phys;
- u32 save_size;
- u32 save_incrs;
- void *save_cpu_data;
-
- struct nvmap_handle *restore;
- u32 restore_phys;
- u32 restore_size;
- u32 restore_incrs;
-};
-
-struct nvhost_hwctx_handler {
- struct nvhost_hwctx * (*alloc) (struct nvhost_channel *ch);
- void (*get) (struct nvhost_hwctx *ctx);
- void (*put) (struct nvhost_hwctx *ctx);
- void (*save_service) (struct nvhost_hwctx *ctx);
-};
-
-int nvhost_3dctx_handler_init(struct nvhost_hwctx_handler *h);
-int nvhost_mpectx_handler_init(struct nvhost_hwctx_handler *h);
-
-static inline int nvhost_hwctx_handler_init(
- struct nvhost_hwctx_handler *h,
- const char *module)
-{
- if (strcmp(module, "gr3d") == 0)
- return nvhost_3dctx_handler_init(h);
- else if (strcmp(module, "mpe") == 0)
- return nvhost_mpectx_handler_init(h);
-
- return 0;
-}
-
-struct hwctx_reginfo {
- unsigned int offset:12;
- unsigned int count:16;
- unsigned int type:2;
-};
-
-enum {
- HWCTX_REGINFO_DIRECT = 0,
- HWCTX_REGINFO_INDIRECT,
- HWCTX_REGINFO_INDIRECT_OFFSET,
- HWCTX_REGINFO_INDIRECT_DATA
-};
-
-#define HWCTX_REGINFO(offset, count, type) {offset, count, HWCTX_REGINFO_##type}
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_intr.c
- *
- * Tegra Graphics Host Interrupt Management
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_intr.h"
-#include "dev.h"
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <linux/irq.h>
-
-#define intr_to_dev(x) container_of(x, struct nvhost_master, intr)
-
-
-/*** HW sync point threshold interrupt management ***/
-
-static void set_syncpt_threshold(void __iomem *sync_regs, u32 id, u32 thresh)
-{
- thresh &= 0xffff;
- writel(thresh, sync_regs + (HOST1X_SYNC_SYNCPT_INT_THRESH_0 + id * 4));
-}
-
-static void enable_syncpt_interrupt(void __iomem *sync_regs, u32 id)
-{
- writel(BIT(id), sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0);
-}
-
-
-/*** Wait list management ***/
-
-struct nvhost_waitlist {
- struct list_head list;
- struct kref refcount;
- u32 thresh;
- enum nvhost_intr_action action;
- atomic_t state;
- void *data;
- int count;
-};
-
-enum waitlist_state
-{
- WLS_PENDING,
- WLS_REMOVED,
- WLS_CANCELLED,
- WLS_HANDLED
-};
-
-static void waiter_release(struct kref *kref)
-{
- kfree(container_of(kref, struct nvhost_waitlist, refcount));
-}
-
-/*
- * add a waiter to a waiter queue, sorted by threshold
- * returns true if it was added at the head of the queue
- */
-static bool add_waiter_to_queue(struct nvhost_waitlist *waiter,
- struct list_head *queue)
-{
- struct nvhost_waitlist *pos;
- u32 thresh = waiter->thresh;
-
- list_for_each_entry_reverse(pos, queue, list)
- if ((s32)(pos->thresh - thresh) <= 0) {
- list_add(&waiter->list, &pos->list);
- return false;
- }
-
- list_add(&waiter->list, queue);
- return true;
-}
-
-/*
- * run through a waiter queue for a single sync point ID
- * and gather all completed waiters into lists by actions
- */
-static void remove_completed_waiters(struct list_head *head, u32 sync,
- struct list_head completed[NVHOST_INTR_ACTION_COUNT])
-{
- struct list_head *dest;
- struct nvhost_waitlist *waiter, *next, *prev;
-
- list_for_each_entry_safe(waiter, next, head, list) {
- if ((s32)(waiter->thresh - sync) > 0)
- break;
-
- dest = completed + waiter->action;
-
- /* consolidate submit cleanups */
- if (waiter->action == NVHOST_INTR_ACTION_SUBMIT_COMPLETE
- && !list_empty(dest)) {
- prev = list_entry(dest->prev,
- struct nvhost_waitlist, list);
- if (prev->data == waiter->data) {
- prev->count++;
- dest = NULL;
- }
- }
-
- /* PENDING->REMOVED or CANCELLED->HANDLED */
- if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) {
- list_del(&waiter->list);
- kref_put(&waiter->refcount, waiter_release);
- } else {
- list_move_tail(&waiter->list, dest);
- }
- }
-}
-
-static void action_submit_complete(struct nvhost_waitlist *waiter)
-{
- struct nvhost_channel *channel = waiter->data;
- int nr_completed = waiter->count;
-
- nvhost_cdma_update(&channel->cdma);
- nvhost_module_idle_mult(&channel->mod, nr_completed);
-}
-
-static void action_ctxsave(struct nvhost_waitlist *waiter)
-{
- struct nvhost_hwctx *hwctx = waiter->data;
- struct nvhost_channel *channel = hwctx->channel;
-
- channel->ctxhandler.save_service(hwctx);
- channel->ctxhandler.put(hwctx);
-}
-
-static void action_wakeup(struct nvhost_waitlist *waiter)
-{
- wait_queue_head_t *wq = waiter->data;
-
- wake_up(wq);
-}
-
-static void action_wakeup_interruptible(struct nvhost_waitlist *waiter)
-{
- wait_queue_head_t *wq = waiter->data;
-
- wake_up_interruptible(wq);
-}
-
-typedef void (*action_handler)(struct nvhost_waitlist *waiter);
-
-static action_handler action_handlers[NVHOST_INTR_ACTION_COUNT] = {
- action_submit_complete,
- action_ctxsave,
- action_wakeup,
- action_wakeup_interruptible,
-};
-
-static void run_handlers(struct list_head completed[NVHOST_INTR_ACTION_COUNT])
-{
- struct list_head *head = completed;
- int i;
-
- for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i, ++head) {
- action_handler handler = action_handlers[i];
- struct nvhost_waitlist *waiter, *next;
-
- list_for_each_entry_safe(waiter, next, head, list) {
- list_del(&waiter->list);
- handler(waiter);
- atomic_set(&waiter->state, WLS_HANDLED);
- smp_wmb();
- kref_put(&waiter->refcount, waiter_release);
- }
- }
-}
-
-
-/*** Interrupt service functions ***/
-
-/**
- * Host1x intterrupt service function
- * Handles read / write failures
- */
-static irqreturn_t host1x_isr(int irq, void *dev_id)
-{
- struct nvhost_intr *intr = dev_id;
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
- u32 stat;
- u32 ext_stat;
- u32 addr;
-
- stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS);
- ext_stat = readl(sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
-
- if (nvhost_sync_hintstatus_ext_ip_read_int(ext_stat)) {
- addr = readl(sync_regs + HOST1X_SYNC_IP_READ_TIMEOUT_ADDR);
- pr_err("Host read timeout at address %x\n", addr);
- }
-
- if (nvhost_sync_hintstatus_ext_ip_write_int(ext_stat)) {
- addr = readl(sync_regs + HOST1X_SYNC_IP_WRITE_TIMEOUT_ADDR);
- pr_err("Host write timeout at address %x\n", addr);
- }
-
- writel(ext_stat, sync_regs + HOST1X_SYNC_HINTSTATUS_EXT);
- writel(stat, sync_regs + HOST1X_SYNC_HINTSTATUS);
-
- return IRQ_HANDLED;
-}
-
-/**
- * Sync point threshold interrupt service function
- * Handles sync point threshold triggers, in interrupt context
- */
-static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id)
-{
- struct nvhost_intr_syncpt *syncpt = dev_id;
- unsigned int id = syncpt->id;
- struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
- syncpt[id]);
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
-
- writel(BIT(id),
- sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE);
- writel(BIT(id),
- sync_regs + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS);
-
- return IRQ_WAKE_THREAD;
-}
-
-
-/**
- * Sync point threshold interrupt service thread function
- * Handles sync point threshold triggers, in thread context
- */
-static irqreturn_t syncpt_thresh_fn(int irq, void *dev_id)
-{
- struct nvhost_intr_syncpt *syncpt = dev_id;
- unsigned int id = syncpt->id;
- struct nvhost_intr *intr = container_of(syncpt, struct nvhost_intr,
- syncpt[id]);
- struct nvhost_master *dev = intr_to_dev(intr);
- void __iomem *sync_regs = dev->sync_aperture;
-
- struct list_head completed[NVHOST_INTR_ACTION_COUNT];
- u32 sync;
- unsigned int i;
-
- for (i = 0; i < NVHOST_INTR_ACTION_COUNT; ++i)
- INIT_LIST_HEAD(completed + i);
-
- sync = nvhost_syncpt_update_min(&dev->syncpt, id);
-
- spin_lock(&syncpt->lock);
-
- remove_completed_waiters(&syncpt->wait_head, sync, completed);
-
- if (!list_empty(&syncpt->wait_head)) {
- u32 thresh = list_first_entry(&syncpt->wait_head,
- struct nvhost_waitlist, list)->thresh;
-
- set_syncpt_threshold(sync_regs, id, thresh);
- enable_syncpt_interrupt(sync_regs, id);
- }
-
- spin_unlock(&syncpt->lock);
-
- run_handlers(completed);
-
- return IRQ_HANDLED;
-}
-
-/*
- * lazily request a syncpt's irq
- */
-static int request_syncpt_irq(struct nvhost_intr_syncpt *syncpt)
-{
- static DEFINE_MUTEX(mutex);
- int err;
-
- mutex_lock(&mutex);
- if (!syncpt->irq_requested) {
- err = request_threaded_irq(syncpt->irq,
- syncpt_thresh_isr, syncpt_thresh_fn,
- 0, syncpt->thresh_irq_name, syncpt);
- if (!err)
- syncpt->irq_requested = 1;
- }
- mutex_unlock(&mutex);
- return err;
-}
-
-
-/*** Main API ***/
-
-int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
- enum nvhost_intr_action action, void *data,
- void **ref)
-{
- struct nvhost_waitlist *waiter;
- struct nvhost_intr_syncpt *syncpt;
- void __iomem *sync_regs;
- int queue_was_empty;
- int err;
-
- /* create and initialize a new waiter */
- waiter = kmalloc(sizeof(*waiter), GFP_KERNEL);
- if (!waiter)
- return -ENOMEM;
- INIT_LIST_HEAD(&waiter->list);
- kref_init(&waiter->refcount);
- if (ref)
- kref_get(&waiter->refcount);
- waiter->thresh = thresh;
- waiter->action = action;
- atomic_set(&waiter->state, WLS_PENDING);
- waiter->data = data;
- waiter->count = 1;
-
- BUG_ON(id >= NV_HOST1X_SYNCPT_NB_PTS);
- syncpt = intr->syncpt + id;
- sync_regs = intr_to_dev(intr)->sync_aperture;
-
- spin_lock(&syncpt->lock);
-
- /* lazily request irq for this sync point */
- if (!syncpt->irq_requested) {
- spin_unlock(&syncpt->lock);
-
- err = request_syncpt_irq(syncpt);
- if (err) {
- kfree(waiter);
- return err;
- }
-
- spin_lock(&syncpt->lock);
- }
-
- queue_was_empty = list_empty(&syncpt->wait_head);
-
- if (add_waiter_to_queue(waiter, &syncpt->wait_head)) {
- /* added at head of list - new threshold value */
- set_syncpt_threshold(sync_regs, id, thresh);
-
- /* added as first waiter - enable interrupt */
- if (queue_was_empty)
- enable_syncpt_interrupt(sync_regs, id);
- }
-
- spin_unlock(&syncpt->lock);
-
- if (ref)
- *ref = waiter;
- return 0;
-}
-
-void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref)
-{
- struct nvhost_waitlist *waiter = ref;
-
- while (atomic_cmpxchg(&waiter->state,
- WLS_PENDING, WLS_CANCELLED) == WLS_REMOVED)
- schedule();
-
- kref_put(&waiter->refcount, waiter_release);
-}
-
-
-/*** Init & shutdown ***/
-
-int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync)
-{
- unsigned int id;
- struct nvhost_intr_syncpt *syncpt;
- int err;
-
- err = request_irq(irq_gen, host1x_isr, 0, "host_status", intr);
- if (err)
- goto fail;
- intr->host1x_irq = irq_gen;
- intr->host1x_isr_started = true;
-
- for (id = 0, syncpt = intr->syncpt;
- id < NV_HOST1X_SYNCPT_NB_PTS;
- ++id, ++syncpt) {
- syncpt->id = id;
- syncpt->irq = irq_sync + id;
- syncpt->irq_requested = 0;
- spin_lock_init(&syncpt->lock);
- INIT_LIST_HEAD(&syncpt->wait_head);
- snprintf(syncpt->thresh_irq_name,
- sizeof(syncpt->thresh_irq_name),
- "host_sp_%02d", id);
- }
-
- return 0;
-
-fail:
- nvhost_intr_deinit(intr);
- return err;
-}
-
-void nvhost_intr_deinit(struct nvhost_intr *intr)
-{
- unsigned int id;
- struct nvhost_intr_syncpt *syncpt;
-
- for (id = 0, syncpt = intr->syncpt;
- id < NV_HOST1X_SYNCPT_NB_PTS;
- ++id, ++syncpt)
- if (syncpt->irq_requested)
- free_irq(syncpt->irq, syncpt);
-
- if (intr->host1x_isr_started) {
- free_irq(intr->host1x_irq, intr);
- intr->host1x_isr_started = false;
- }
-}
-
-void nvhost_intr_configure (struct nvhost_intr *intr, u32 hz)
-{
- void __iomem *sync_regs = intr_to_dev(intr)->sync_aperture;
-
- // write microsecond clock register
- writel((hz + 1000000 - 1)/1000000, sync_regs + HOST1X_SYNC_USEC_CLK);
-
- /* disable the ip_busy_timeout. this prevents write drops, etc.
- * there's no real way to recover from a hung client anyway.
- */
- writel(0, sync_regs + HOST1X_SYNC_IP_BUSY_TIMEOUT);
-
- /* increase the auto-ack timout to the maximum value. 2d will hang
- * otherwise on ap20.
- */
- writel(0xff, sync_regs + HOST1X_SYNC_CTXSW_TIMEOUT_CFG);
-
- /* disable interrupts for both cpu's */
- writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_0);
- writel(0, sync_regs + HOST1X_SYNC_SYNCPT_THRESH_INT_MASK_1);
-
- /* masking all of the interrupts actually means "enable" */
- writel(BIT(0), sync_regs + HOST1X_SYNC_INTMASK);
-
- /* enable HOST_INT_C0MASK */
- writel(BIT(0), sync_regs + HOST1X_SYNC_INTC0MASK);
-
- /* enable HINTMASK_EXT */
- writel(BIT(31), sync_regs + HOST1X_SYNC_HINTMASK);
-
- /* enable IP_READ_INT and IP_WRITE_INT */
- writel(BIT(30) | BIT(31), sync_regs + HOST1X_SYNC_HINTMASK_EXT);
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_intr.h
- *
- * Tegra Graphics Host Interrupt Management
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_INTR_H
-#define __NVHOST_INTR_H
-
-#include <linux/kthread.h>
-#include <linux/semaphore.h>
-
-#include "nvhost_hardware.h"
-
-struct nvhost_channel;
-
-enum nvhost_intr_action {
- /**
- * Perform cleanup after a submit has completed.
- * 'data' points to a channel
- */
- NVHOST_INTR_ACTION_SUBMIT_COMPLETE = 0,
-
- /**
- * Save a HW context.
- * 'data' points to a context
- */
- NVHOST_INTR_ACTION_CTXSAVE,
-
- /**
- * Wake up a task.
- * 'data' points to a wait_queue_head_t
- */
- NVHOST_INTR_ACTION_WAKEUP,
-
- /**
- * Wake up a interruptible task.
- * 'data' points to a wait_queue_head_t
- */
- NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
-
- NVHOST_INTR_ACTION_COUNT
-};
-
-struct nvhost_intr_syncpt {
- u8 id;
- u8 irq_requested;
- u16 irq;
- spinlock_t lock;
- struct list_head wait_head;
- char thresh_irq_name[12];
-};
-
-struct nvhost_intr {
- struct nvhost_intr_syncpt syncpt[NV_HOST1X_SYNCPT_NB_PTS];
- int host1x_irq;
- bool host1x_isr_started;
-};
-
-/**
- * Schedule an action to be taken when a sync point reaches the given threshold.
- *
- * @id the sync point
- * @thresh the threshold
- * @action the action to take
- * @data a pointer to extra data depending on action, see above
- * @ref must be passed if cancellation is possible, else NULL
- *
- * This is a non-blocking api.
- */
-int nvhost_intr_add_action(struct nvhost_intr *intr, u32 id, u32 thresh,
- enum nvhost_intr_action action, void *data,
- void **ref);
-
-/**
- * Unreference an action submitted to nvhost_intr_add_action().
- * You must call this if you passed non-NULL as ref.
- * @ref the ref returned from nvhost_intr_add_action()
- */
-void nvhost_intr_put_ref(struct nvhost_intr *intr, void *ref);
-
-int nvhost_intr_init(struct nvhost_intr *intr, u32 irq_gen, u32 irq_sync);
-void nvhost_intr_deinit(struct nvhost_intr *intr);
-void nvhost_intr_configure(struct nvhost_intr *intr, u32 hz);
-
-#endif
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_mpectx.c
- *
- * Tegra Graphics Host MPE HW Context
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-/* Placeholder */
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_syncpt.c
- *
- * Tegra Graphics Host Syncpoints
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include "nvhost_syncpt.h"
-#include "dev.h"
-
-#define client_managed(id) (BIT(id) & NVSYNCPTS_CLIENT_MANAGED)
-#define syncpt_to_dev(sp) container_of(sp, struct nvhost_master, syncpt)
-#define SYNCPT_CHECK_PERIOD 2*HZ
-
-static bool check_max(struct nvhost_syncpt *sp, u32 id, u32 real)
-{
- u32 max;
- if (client_managed(id))
- return true;
- smp_rmb();
- max = (u32)atomic_read(&sp->max_val[id]);
- return ((s32)(max - real) >= 0);
-}
-
-/**
- * Write the current syncpoint value back to hw.
- */
-static void reset_syncpt(struct nvhost_syncpt *sp, u32 id)
-{
- struct nvhost_master *dev = syncpt_to_dev(sp);
- int min;
- smp_rmb();
- min = atomic_read(&sp->min_val[id]);
- writel(min, dev->sync_aperture + (HOST1X_SYNC_SYNCPT_0 + id * 4));
-}
-
-/**
- * Write the current waitbase value back to hw.
- */
-static void reset_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
-{
- struct nvhost_master *dev = syncpt_to_dev(sp);
- writel(sp->base_val[id],
- dev->sync_aperture + (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
-}
-
-/**
- * Read waitbase value from hw.
- */
-static void read_syncpt_wait_base(struct nvhost_syncpt *sp, u32 id)
-{
- struct nvhost_master *dev = syncpt_to_dev(sp);
- sp->base_val[id] = readl(dev->sync_aperture +
- (HOST1X_SYNC_SYNCPT_BASE_0 + id * 4));
-}
-
-/**
- * Resets syncpoint and waitbase values to sw shadows
- */
-void nvhost_syncpt_reset(struct nvhost_syncpt *sp)
-{
- u32 i;
- for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++)
- reset_syncpt(sp, i);
- for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
- reset_syncpt_wait_base(sp, i);
- wmb();
-}
-
-/**
- * Updates sw shadow state for client managed registers
- */
-void nvhost_syncpt_save(struct nvhost_syncpt *sp)
-{
- u32 i;
-
- for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
- if (client_managed(i))
- nvhost_syncpt_update_min(sp, i);
- else
- BUG_ON(!nvhost_syncpt_min_eq_max(sp, i));
- }
-
- for (i = 0; i < NV_HOST1X_SYNCPT_NB_BASES; i++)
- read_syncpt_wait_base(sp, i);
-}
-
-/**
- * Updates the last value read from hardware.
- */
-u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id)
-{
- struct nvhost_master *dev = syncpt_to_dev(sp);
- void __iomem *sync_regs = dev->sync_aperture;
- u32 old, live;
-
- do {
- smp_rmb();
- old = (u32)atomic_read(&sp->min_val[id]);
- live = readl(sync_regs + (HOST1X_SYNC_SYNCPT_0 + id * 4));
- } while ((u32)atomic_cmpxchg(&sp->min_val[id], old, live) != old);
-
- BUG_ON(!check_max(sp, id, live));
-
- return live;
-}
-
-/**
- * Get the current syncpoint value
- */
-u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id)
-{
- u32 val;
-
- nvhost_module_busy(&syncpt_to_dev(sp)->mod);
- val = nvhost_syncpt_update_min(sp, id);
- nvhost_module_idle(&syncpt_to_dev(sp)->mod);
- return val;
-}
-
-/**
- * Write a cpu syncpoint increment to the hardware, without touching
- * the cache. Caller is responsible for host being powered.
- */
-void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id)
-{
- struct nvhost_master *dev = syncpt_to_dev(sp);
- BUG_ON(!nvhost_module_powered(&dev->mod));
- BUG_ON(!client_managed(id) && nvhost_syncpt_min_eq_max(sp, id));
- writel(BIT(id), dev->sync_aperture + HOST1X_SYNC_SYNCPT_CPU_INCR);
- wmb();
-}
-
-/**
- * Increment syncpoint value from cpu, updating cache
- */
-void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id)
-{
- nvhost_syncpt_incr_max(sp, id, 1);
- nvhost_module_busy(&syncpt_to_dev(sp)->mod);
- nvhost_syncpt_cpu_incr(sp, id);
- nvhost_module_idle(&syncpt_to_dev(sp)->mod);
-}
-
-/**
- * Main entrypoint for syncpoint value waits.
- */
-int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id,
- u32 thresh, u32 timeout)
-{
- DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
- void *ref;
- int err = 0;
-
- BUG_ON(!check_max(sp, id, thresh));
-
- /* first check cache */
- if (nvhost_syncpt_min_cmp(sp, id, thresh))
- return 0;
-
- /* keep host alive */
- nvhost_module_busy(&syncpt_to_dev(sp)->mod);
-
- if (client_managed(id) || !nvhost_syncpt_min_eq_max(sp, id)) {
- /* try to read from register */
- u32 val = nvhost_syncpt_update_min(sp, id);
- if ((s32)(val - thresh) >= 0)
- goto done;
- }
-
- if (!timeout) {
- err = -EAGAIN;
- goto done;
- }
-
- /* schedule a wakeup when the syncpoint value is reached */
- err = nvhost_intr_add_action(&(syncpt_to_dev(sp)->intr), id, thresh,
- NVHOST_INTR_ACTION_WAKEUP_INTERRUPTIBLE, &wq, &ref);
- if (err)
- goto done;
-
- err = -EAGAIN;
- /* wait for the syncpoint, or timeout, or signal */
- while (timeout) {
- u32 check = min_t(u32, SYNCPT_CHECK_PERIOD, timeout);
- int remain = wait_event_interruptible_timeout(wq,
- nvhost_syncpt_min_cmp(sp, id, thresh),
- check);
- if (remain > 0 || nvhost_syncpt_min_cmp(sp, id, thresh)) {
- err = 0;
- break;
- }
- if (remain < 0) {
- err = remain;
- break;
- }
- if (timeout != NVHOST_NO_TIMEOUT)
- timeout -= check;
- if (timeout) {
- dev_warn(&syncpt_to_dev(sp)->pdev->dev,
- "syncpoint id %d (%s) stuck waiting %d\n",
- id, nvhost_syncpt_name(id), thresh);
- nvhost_syncpt_debug(sp);
- }
- };
- nvhost_intr_put_ref(&(syncpt_to_dev(sp)->intr), ref);
-
-done:
- nvhost_module_idle(&syncpt_to_dev(sp)->mod);
- return err;
-}
-
-static const char *s_syncpt_names[32] = {
- "", "", "", "", "", "", "", "", "", "", "", "",
- "vi_isp_0", "vi_isp_1", "vi_isp_2", "vi_isp_3", "vi_isp_4", "vi_isp_5",
- "2d_0", "2d_1",
- "", "",
- "3d", "mpe", "disp0", "disp1", "vblank0", "vblank1", "mpe_ebm_eof", "mpe_wr_safe",
- "2d_tinyblt", "dsi"
-};
-
-const char *nvhost_syncpt_name(u32 id)
-{
- BUG_ON(id > ARRAY_SIZE(s_syncpt_names));
- return s_syncpt_names[id];
-}
-
-void nvhost_syncpt_debug(struct nvhost_syncpt *sp)
-{
- u32 i;
- for (i = 0; i < NV_HOST1X_SYNCPT_NB_PTS; i++) {
- u32 max = nvhost_syncpt_read_max(sp, i);
- if (!max)
- continue;
- dev_info(&syncpt_to_dev(sp)->pdev->dev,
- "id %d (%s) min %d max %d\n",
- i, nvhost_syncpt_name(i),
- nvhost_syncpt_update_min(sp, i), max);
-
- }
-}
+++ /dev/null
-/*
- * drivers/video/tegra/host/nvhost_syncpt.h
- *
- * Tegra Graphics Host Syncpoints
- *
- * Copyright (c) 2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __NVHOST_SYNCPT_H
-#define __NVHOST_SYNCPT_H
-
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <asm/atomic.h>
-
-#include "nvhost_hardware.h"
-
-#define NVSYNCPT_VI_ISP_0 (12)
-#define NVSYNCPT_VI_ISP_1 (13)
-#define NVSYNCPT_VI_ISP_2 (14)
-#define NVSYNCPT_VI_ISP_3 (15)
-#define NVSYNCPT_VI_ISP_4 (16)
-#define NVSYNCPT_VI_ISP_5 (17)
-#define NVSYNCPT_2D_0 (18)
-#define NVSYNCPT_2D_1 (19)
-#define NVSYNCPT_3D (22)
-#define NVSYNCPT_MPE (23)
-#define NVSYNCPT_DISP0 (24)
-#define NVSYNCPT_DISP1 (25)
-#define NVSYNCPT_VBLANK0 (26)
-#define NVSYNCPT_VBLANK1 (27)
-#define NVSYNCPT_MPE_EBM_EOF (28)
-#define NVSYNCPT_MPE_WR_SAFE (29)
-#define NVSYNCPT_DSI (31)
-#define NVSYNCPT_INVALID (-1)
-
-/*#define NVSYNCPT_2D_CHANNEL2_0 (20) */
-/*#define NVSYNCPT_2D_CHANNEL2_1 (21) */
-/*#define NVSYNCPT_2D_TINYBLT_WAR (30)*/
-/*#define NVSYNCPT_2D_TINYBLT_RESTORE_CLASS_ID (30)*/
-
-/* sync points that are wholly managed by the client */
-#define NVSYNCPTS_CLIENT_MANAGED ( \
- BIT(NVSYNCPT_DISP0) | BIT(NVSYNCPT_DISP1) | BIT(NVSYNCPT_DSI) | \
- BIT(NVSYNCPT_VI_ISP_0) | BIT(NVSYNCPT_VI_ISP_2) | \
- BIT(NVSYNCPT_VI_ISP_3) | BIT(NVSYNCPT_VI_ISP_4) | BIT(NVSYNCPT_VI_ISP_5) | \
- BIT(NVSYNCPT_MPE_EBM_EOF) | BIT(NVSYNCPT_MPE_WR_SAFE) | \
- BIT(NVSYNCPT_2D_1))
-
-#define NVWAITBASE_2D_0 (1)
-#define NVWAITBASE_2D_1 (2)
-#define NVWAITBASE_3D (3)
-#define NVWAITBASE_MPE (4)
-
-struct nvhost_syncpt {
- atomic_t min_val[NV_HOST1X_SYNCPT_NB_PTS];
- atomic_t max_val[NV_HOST1X_SYNCPT_NB_PTS];
- u32 base_val[NV_HOST1X_SYNCPT_NB_BASES];
-};
-
-/**
- * Updates the value sent to hardware.
- */
-static inline u32 nvhost_syncpt_incr_max(struct nvhost_syncpt *sp,
- u32 id, u32 incrs)
-{
- return (u32)atomic_add_return(incrs, &sp->max_val[id]);
-}
-
-/**
- * Updated the value sent to hardware.
- */
-static inline u32 nvhost_syncpt_set_max(struct nvhost_syncpt *sp,
- u32 id, u32 val)
-{
- atomic_set(&sp->max_val[id], val);
- smp_wmb();
- return val;
-}
-
-static inline u32 nvhost_syncpt_read_max(struct nvhost_syncpt *sp, u32 id)
-{
- smp_rmb();
- return (u32)atomic_read(&sp->max_val[id]);
-}
-
-/**
- * Returns true if syncpoint has reached threshold
- */
-static inline bool nvhost_syncpt_min_cmp(struct nvhost_syncpt *sp,
- u32 id, u32 thresh)
-{
- u32 cur;
- smp_rmb();
- cur = (u32)atomic_read(&sp->min_val[id]);
- return ((s32)(cur - thresh) >= 0);
-}
-
-/**
- * Returns true if syncpoint min == max
- */
-static inline bool nvhost_syncpt_min_eq_max(struct nvhost_syncpt *sp, u32 id)
-{
- int min, max;
- smp_rmb();
- min = atomic_read(&sp->min_val[id]);
- max = atomic_read(&sp->max_val[id]);
- return (min == max);
-}
-
-void nvhost_syncpt_cpu_incr(struct nvhost_syncpt *sp, u32 id);
-
-u32 nvhost_syncpt_update_min(struct nvhost_syncpt *sp, u32 id);
-
-void nvhost_syncpt_save(struct nvhost_syncpt *sp);
-
-void nvhost_syncpt_reset(struct nvhost_syncpt *sp);
-
-u32 nvhost_syncpt_read(struct nvhost_syncpt *sp, u32 id);
-
-void nvhost_syncpt_incr(struct nvhost_syncpt *sp, u32 id);
-
-int nvhost_syncpt_wait_timeout(struct nvhost_syncpt *sp, u32 id, u32 thresh,
- u32 timeout);
-
-static inline int nvhost_syncpt_wait(struct nvhost_syncpt *sp, u32 id, u32 thresh)
-{
- return nvhost_syncpt_wait_timeout(sp, id, thresh, MAX_SCHEDULE_TIMEOUT);
-}
-
-
-const char *nvhost_syncpt_name(u32 id);
-
-void nvhost_syncpt_debug(struct nvhost_syncpt *sp);
-
-#endif
+++ /dev/null
-/*
- * include/linux/nvhost.h
- *
- * Tegra graphics host driver
- *
- * Copyright (c) 2009-2010, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#ifndef __LINUX_NVHOST_H
-#define __LINUX_NVHOST_H
-
-#include <linux/device.h>
-#include <linux/ioctl.h>
-#include <linux/types.h>
-
-struct nvhost_master;
-
-struct nvhost_device {
- const char *name;
- struct device dev;
- int id;
- u32 num_resources;
- struct resource *resource;
-
- struct nvhost_master *host;
-};
-
-extern int nvhost_device_register(struct nvhost_device *);
-extern void nvhost_device_unregister(struct nvhost_device *);
-
-extern struct bus_type nvhost_bus_type;
-
-struct nvhost_driver {
- int (*probe)(struct nvhost_device *);
- int (*remove)(struct nvhost_device *);
- void (*shutdown)(struct nvhost_device *);
- int (*suspend)(struct nvhost_device *, pm_message_t state);
- int (*resume)(struct nvhost_device *);
- struct device_driver driver;
-};
-
-extern int nvhost_driver_register(struct nvhost_driver *);
-extern void nvhost_driver_unregister(struct nvhost_driver *);
-extern struct resource *nvhost_get_resource(struct nvhost_device *, unsigned int, unsigned int);
-extern int nvhost_get_irq(struct nvhost_device *, unsigned int);
-extern struct resource *nvhost_get_resource_byname(struct nvhost_device *, unsigned int, const char *);
-extern int nvhost_get_irq_byname(struct nvhost_device *, const char *);
-
-#define to_nvhost_device(x) container_of((x), struct nvhost_device, dev)
-#define to_nvhost_driver(drv) (container_of((drv), struct nvhost_driver, \
- driver))
-
-#define nvhost_get_drvdata(_dev) dev_get_drvdata(&(_dev)->dev)
-#define nvhost_set_drvdata(_dev,data) dev_set_drvdata(&(_dev)->dev, (data))
-
-int nvhost_bus_register(struct nvhost_master *host);
-
-#if !defined(__KERNEL__)
-#define __user
-#endif
-
-#define NVHOST_NO_TIMEOUT (-1)
-#define NVHOST_IOCTL_MAGIC 'H'
-
-struct nvhost_submit_hdr {
- __u32 syncpt_id;
- __u32 syncpt_incrs;
- __u32 num_cmdbufs;
- __u32 num_relocs;
-};
-
-struct nvhost_cmdbuf {
- __u32 mem;
- __u32 offset;
- __u32 words;
-};
-
-struct nvhost_reloc {
- __u32 cmdbuf_mem;
- __u32 cmdbuf_offset;
- __u32 target;
- __u32 target_offset;
-};
-
-struct nvhost_get_param_args {
- __u32 value;
-};
-
-struct nvhost_set_nvmap_fd_args {
- __u32 fd;
-};
-
-#define NVHOST_IOCTL_CHANNEL_FLUSH \
- _IOR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_get_param_args)
-#define NVHOST_IOCTL_CHANNEL_GET_SYNCPOINTS \
- _IOR(NVHOST_IOCTL_MAGIC, 2, struct nvhost_get_param_args)
-#define NVHOST_IOCTL_CHANNEL_GET_WAITBASES \
- _IOR(NVHOST_IOCTL_MAGIC, 3, struct nvhost_get_param_args)
-#define NVHOST_IOCTL_CHANNEL_GET_MODMUTEXES \
- _IOR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_get_param_args)
-#define NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD \
- _IOW(NVHOST_IOCTL_MAGIC, 5, struct nvhost_set_nvmap_fd_args)
-#define NVHOST_IOCTL_CHANNEL_LAST \
- _IOC_NR(NVHOST_IOCTL_CHANNEL_SET_NVMAP_FD)
-#define NVHOST_IOCTL_CHANNEL_MAX_ARG_SIZE sizeof(struct nvhost_get_param_args)
-
-struct nvhost_ctrl_syncpt_read_args {
- __u32 id;
- __u32 value;
-};
-
-struct nvhost_ctrl_syncpt_incr_args {
- __u32 id;
-};
-
-struct nvhost_ctrl_syncpt_wait_args {
- __u32 id;
- __u32 thresh;
- __s32 timeout;
-};
-
-struct nvhost_ctrl_module_mutex_args {
- __u32 id;
- __u32 lock;
-};
-
-struct nvhost_ctrl_module_regrdwr_args {
- __u32 id;
- __u32 num_offsets;
- __u32 block_size;
- __u32 *offsets;
- __u32 *values;
- __u32 write;
-};
-
-#define NVHOST_IOCTL_CTRL_SYNCPT_READ \
- _IOWR(NVHOST_IOCTL_MAGIC, 1, struct nvhost_ctrl_syncpt_read_args)
-#define NVHOST_IOCTL_CTRL_SYNCPT_INCR \
- _IOW(NVHOST_IOCTL_MAGIC, 2, struct nvhost_ctrl_syncpt_incr_args)
-#define NVHOST_IOCTL_CTRL_SYNCPT_WAIT \
- _IOW(NVHOST_IOCTL_MAGIC, 3, struct nvhost_ctrl_syncpt_wait_args)
-
-#define NVHOST_IOCTL_CTRL_MODULE_MUTEX \
- _IOWR(NVHOST_IOCTL_MAGIC, 4, struct nvhost_ctrl_module_mutex_args)
-#define NVHOST_IOCTL_CTRL_MODULE_REGRDWR \
- _IOWR(NVHOST_IOCTL_MAGIC, 5, struct nvhost_ctrl_module_regrdwr_args)
-
-#define NVHOST_IOCTL_CTRL_LAST \
- _IOC_NR(NVHOST_IOCTL_CTRL_MODULE_REGRDWR)
-#define NVHOST_IOCTL_CTRL_MAX_ARG_SIZE sizeof(struct nvhost_ctrl_module_regrdwr_args)
-
-#endif
+++ /dev/null
-/*
- * include/linux/nvmap.h
- *
- * structure declarations for nvmem and nvmap user-space ioctls
- *
- * Copyright (c) 2009, NVIDIA Corporation.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
- */
-
-#include <linux/ioctl.h>
-#include <linux/file.h>
-
-#if !defined(__KERNEL__)
-#define __user
-#endif
-
-#ifndef __NVMAP_H
-#define __NVMAP_H
-
-struct nvmem_create_handle {
- union {
- __u32 key; /* ClaimPreservedHandle */
- __u32 id; /* FromId */
- __u32 size; /* CreateHandle */
- };
- __u32 handle;
-};
-
-#define NVMEM_HEAP_SYSMEM (1ul<<31)
-#define NVMEM_HEAP_IOVMM (1ul<<30)
-
-/* common carveout heaps */
-#define NVMEM_HEAP_CARVEOUT_IRAM (1ul<<29)
-#define NVMEM_HEAP_CARVEOUT_GENERIC (1ul<<0)
-
-#define NVMEM_HEAP_CARVEOUT_MASK (NVMEM_HEAP_IOVMM - 1)
-
-#define NVMEM_HANDLE_UNCACHEABLE (0x0ul << 0)
-#define NVMEM_HANDLE_WRITE_COMBINE (0x1ul << 0)
-#define NVMEM_HANDLE_INNER_CACHEABLE (0x2ul << 0)
-#define NVMEM_HANDLE_CACHEABLE (0x3ul << 0)
-
-#define NVMEM_HANDLE_SECURE (0x1ul << 2)
-
-struct nvmem_alloc_handle {
- __u32 handle;
- __u32 heap_mask;
- __u32 flags;
- __u32 align;
-};
-
-struct nvmem_map_caller {
- __u32 handle; /* hmem */
- __u32 offset; /* offset into hmem; should be page-aligned */
- __u32 length; /* number of bytes to map */
- __u32 flags;
- unsigned long addr; /* user pointer */
-};
-
-struct nvmem_rw_handle {
- unsigned long addr; /* user pointer */
- __u32 handle; /* hmem */
- __u32 offset; /* offset into hmem */
- __u32 elem_size; /* individual atom size */
- __u32 hmem_stride; /* delta in bytes between atoms in hmem */
- __u32 user_stride; /* delta in bytes between atoms in user */
- __u32 count; /* number of atoms to copy */
-};
-
-struct nvmem_pin_handle {
- unsigned long handles; /* array of handles to pin/unpin */
- unsigned long addr; /* array of addresses to return */
- __u32 count; /* number of entries in handles */
-};
-
-struct nvmem_handle_param {
- __u32 handle;
- __u32 param;
- unsigned long result;
-};
-
-enum {
- NVMEM_HANDLE_PARAM_SIZE = 1,
- NVMEM_HANDLE_PARAM_ALIGNMENT,
- NVMEM_HANDLE_PARAM_BASE,
- NVMEM_HANDLE_PARAM_HEAP,
-};
-
-enum {
- NVMEM_CACHE_OP_WB = 0,
- NVMEM_CACHE_OP_INV,
- NVMEM_CACHE_OP_WB_INV,
-};
-
-struct nvmem_cache_op {
- unsigned long addr;
- __u32 handle;
- __u32 len;
- __s32 op;
-};
-
-#define NVMEM_IOC_MAGIC 'N'
-
-/* Creates a new memory handle. On input, the argument is the size of the new
- * handle; on return, the argument is the name of the new handle
- */
-#define NVMEM_IOC_CREATE _IOWR(NVMEM_IOC_MAGIC, 0, struct nvmem_create_handle)
-#define NVMEM_IOC_CLAIM _IOWR(NVMEM_IOC_MAGIC, 1, struct nvmem_create_handle)
-#define NVMEM_IOC_FROM_ID _IOWR(NVMEM_IOC_MAGIC, 2, struct nvmem_create_handle)
-
-/* Actually allocates memory for the specified handle */
-#define NVMEM_IOC_ALLOC _IOW (NVMEM_IOC_MAGIC, 3, struct nvmem_alloc_handle)
-
-/* Frees a memory handle, unpinning any pinned pages and unmapping any mappings
- */
-#define NVMEM_IOC_FREE _IO (NVMEM_IOC_MAGIC, 4)
-
-/* Maps the region of the specified handle into a user-provided virtual address
- * that was previously created via an mmap syscall on this fd */
-#define NVMEM_IOC_MMAP _IOWR(NVMEM_IOC_MAGIC, 5, struct nvmem_map_caller)
-
-/* Reads/writes data (possibly strided) from a user-provided buffer into the
- * hmem at the specified offset */
-#define NVMEM_IOC_WRITE _IOW (NVMEM_IOC_MAGIC, 6, struct nvmem_rw_handle)
-#define NVMEM_IOC_READ _IOW (NVMEM_IOC_MAGIC, 7, struct nvmem_rw_handle)
-
-#define NVMEM_IOC_PARAM _IOWR(NVMEM_IOC_MAGIC, 8, struct nvmem_handle_param)
-
-/* Pins a list of memory handles into IO-addressable memory (either IOVMM
- * space or physical memory, depending on the allocation), and returns the
- * address. Handles may be pinned recursively. */
-#define NVMEM_IOC_PIN_MULT _IOWR(NVMEM_IOC_MAGIC, 10, struct nvmem_pin_handle)
-#define NVMEM_IOC_UNPIN_MULT _IOW (NVMEM_IOC_MAGIC, 11, struct nvmem_pin_handle)
-
-#define NVMEM_IOC_CACHE _IOW (NVMEM_IOC_MAGIC, 12, struct nvmem_cache_op)
-
-/* Returns a global ID usable to allow a remote process to create a handle
- * reference to the same handle */
-#define NVMEM_IOC_GET_ID _IOWR(NVMEM_IOC_MAGIC, 13, struct nvmem_create_handle)
-
-#define NVMEM_IOC_MAXNR (_IOC_NR(NVMEM_IOC_GET_ID))
-
-#if defined(__KERNEL__)
-
-struct nvmap_handle;
-
-struct nvmap_pinarray_elem {
- struct nvmap_handle *patch_mem;
- u32 patch_offset;
- struct nvmap_handle *pin_mem;
- u32 pin_offset;
-};
-
-int nvmap_validate_file(struct file *filep);
-struct nvmap_handle *nvmap_alloc(
- size_t size, size_t align,
- unsigned int flags, void **map);
-void nvmap_free(struct nvmap_handle *h, void *map);
-u32 nvmap_pin_single(struct nvmap_handle *h);
-int nvmap_pin_array(struct file *filp,
- struct nvmap_pinarray_elem *arr, int num_elems,
- struct nvmap_handle **unique_arr, int *num_unique, bool wait);
-void nvmap_unpin(struct nvmap_handle **h, int num_handles);
-
-int nvmap_add_carveout_heap(unsigned long base, size_t size,
- const char *name, unsigned int bitmask);
-
-#endif
-
-#endif
+++ /dev/null
-/*
- * include/video/tegrafb.h
- *
- * Copyright (C) 2010 Google, Inc.
- * Author: Erik Gilling <konkers@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#ifndef _LINUX_TEGRAFB_H_
-#define _LINUX_TEGRAFB_H_
-
-#include <linux/types.h>
-#include <asm/ioctl.h>
-
-#define TEGRA_FB_WIN_FMT_P1 0
-#define TEGRA_FB_WIN_FMT_P2 1
-#define TEGRA_FB_WIN_FMT_P4 2
-#define TEGRA_FB_WIN_FMT_P8 3
-#define TEGRA_FB_WIN_FMT_B4G4R4A4 4
-#define TEGRA_FB_WIN_FMT_B5G5R5A 5
-#define TEGRA_FB_WIN_FMT_B5G6R5 6
-#define TEGRA_FB_WIN_FMT_AB5G5R5 7
-#define TEGRA_FB_WIN_FMT_B8G8R8A8 12
-#define TEGRA_FB_WIN_FMT_R8G8B8A8 13
-#define TEGRA_FB_WIN_FMT_B6x2G6x2R6x2A8 14
-#define TEGRA_FB_WIN_FMT_R6x2G6x2B6x2A8 15
-#define TEGRA_FB_WIN_FMT_YCbCr422 16
-#define TEGRA_FB_WIN_FMT_YUV422 17
-#define TEGRA_FB_WIN_FMT_YCbCr420P 18
-#define TEGRA_FB_WIN_FMT_YUV420P 19
-#define TEGRA_FB_WIN_FMT_YCbCr422P 20
-#define TEGRA_FB_WIN_FMT_YUV422P 21
-#define TEGRA_FB_WIN_FMT_YCbCr422R 22
-#define TEGRA_FB_WIN_FMT_YUV422R 23
-#define TEGRA_FB_WIN_FMT_YCbCr422RA 24
-#define TEGRA_FB_WIN_FMT_YUV422RA 25
-
-#define TEGRA_FB_WIN_BLEND_NONE 0
-#define TEGRA_FB_WIN_BLEND_PREMULT 1
-#define TEGRA_FB_WIN_BLEND_COVERAGE 2
-
-/* set index to -1 to ignore window data */
-struct tegra_fb_windowattr {
- __s32 index;
- __u32 buff_id;
- __u32 blend;
- __u32 offset;
- __u32 stride;
- __u32 pixformat;
- __u32 x;
- __u32 y;
- __u32 w;
- __u32 h;
- __u32 out_x;
- __u32 out_y;
- __u32 out_w;
- __u32 out_h;
- __u32 z;
- __u32 pre_syncpt_id;
- __u32 pre_syncpt_val;
-};
-
-#define TEGRA_FB_FLIP_N_WINDOWS 3
-
-struct tegra_fb_flip_args {
- struct tegra_fb_windowattr win[TEGRA_FB_FLIP_N_WINDOWS];
- __u32 post_syncpt_id;
- __u32 post_syncpt_val;
-};
-
-#define FBIO_TEGRA_SET_NVMAP_FD _IOW('F', 0x40, __u32)
-#define FBIO_TEGRA_FLIP _IOW('F', 0x41, struct tegra_fb_flip_args)
-
-#endif