--- /dev/null
+/* arch/arm/mach-rk29/vpu_mem.c\r
+ *\r
+ * Copyright (C) 2010 ROCKCHIP, Inc.\r
+ *\r
+ * This software is licensed under the terms of the GNU General Public\r
+ * License version 2, as published by the Free Software Foundation, and\r
+ * may be copied, distributed, and modified under those terms.\r
+ *\r
+ * This program is distributed in the hope that it will be useful,\r
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of\r
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
+ * GNU General Public License for more details.\r
+ *\r
+ */\r
+\r
+#include <linux/miscdevice.h>\r
+#include <linux/platform_device.h>\r
+#include <linux/fs.h>\r
+#include <linux/file.h>\r
+#include <linux/mm.h>\r
+#include <linux/list.h>\r
+#include <linux/debugfs.h>\r
+#include <linux/mempolicy.h>\r
+#include <linux/sched.h>\r
+#include <asm/io.h>\r
+#include <asm/uaccess.h>\r
+#include <asm/cacheflush.h>\r
+\r
+#include <mach/vpu_mem.h>\r
+\r
+\r
+#define VPU_MEM_MAX_ORDER 128\r
+#define VPU_MEM_MIN_ALLOC PAGE_SIZE\r
+\r
+#define VPU_MEM_DEBUG 1\r
+\r
+#define VPU_MEM_BITMAP_ERR (-1)\r
+#define VPU_MEM_ERR_FREE_REFN_ERR (-5)\r
+\r
+struct vpu_mem_data {\r
+ /* protects this data field, if the mm_mmap sem will be held at the\r
+ * same time as this sem, the mm sem must be taken first (as this is\r
+ * the order for vma_open and vma_close ops */\r
+ struct rw_semaphore sem;\r
+ /* process id of teh mapping process */\r
+ pid_t pid;\r
+ /* a list of currently available regions if this is a suballocation */\r
+ struct list_head region_list;\r
+ /* a linked list of data so we can access them for debugging */\r
+ struct list_head list;\r
+};\r
+\r
+struct vpu_mem_bits {\r
+ unsigned short pfn; /* page frame number - vpu_mem space max 256M */\r
+ signed refrn:7; /* reference number */\r
+ unsigned first:1; /* 1 if first, 0 if not first */\r
+ signed avail:7; /* available link number */\r
+ unsigned allocated:1; /* 1 if allocated, 0 if free */\r
+};\r
+\r
+struct vpu_mem_region {\r
+ unsigned long offset;\r
+ unsigned long len;\r
+};\r
+\r
+struct vpu_mem_region_node {\r
+ struct vpu_mem_region region;\r
+ struct list_head list;\r
+};\r
+\r
+#define VPU_MEM_DEBUG_MSGS 0\r
+#if VPU_MEM_DEBUG_MSGS\r
+#define DLOG(fmt,args...) \\r
+ do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \\r
+ ##args); } \\r
+ while (0)\r
+#else\r
+#define DLOG(x...) do {} while (0)\r
+#endif\r
+\r
+struct vpu_mem_info {\r
+ struct miscdevice dev;\r
+ /* physical start address of the remaped vpu_mem space */\r
+ unsigned long base;\r
+ /* vitual start address of the remaped vpu_mem space */\r
+ unsigned char __iomem *vbase;\r
+ /* total size of the vpu_mem space */\r
+ unsigned long size;\r
+ /* number of entries in the vpu_mem space */\r
+ unsigned long num_entries;\r
+ /* indicates maps of this region should be cached, if a mix of\r
+ * cached and uncached is desired, set this and open the device with\r
+ * O_SYNC to get an uncached region */\r
+ unsigned cached;\r
+ unsigned buffered;\r
+\r
+ /* no data_list is needed and no master mode is needed */\r
+ /* for debugging, creates a list of vpu_mem file structs, the\r
+ * data_list_sem should be taken before vpu_mem_data->sem if both are\r
+ * needed */\r
+ struct semaphore data_list_sem;\r
+ struct list_head data_list;\r
+\r
+ /* the bitmap for the region indicating which entries are allocated\r
+ * and which are free */\r
+ struct vpu_mem_bits *bitmap;\r
+ /* vpu_mem_sem protects the bitmap array\r
+ * a write lock should be held when modifying entries in bitmap\r
+ * a read lock should be held when reading data from bits or\r
+ * dereferencing a pointer into bitmap\r
+ *\r
+ * vpu_mem_data->sem protects the vpu_mem data of a particular file\r
+ * Many of the function that require the vpu_mem_data->sem have a non-\r
+ * locking version for when the caller is already holding that sem.\r
+ *\r
+ * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:\r
+ * down(vpu_mem_data->sem) => down(bitmap_sem)\r
+ */\r
+ struct rw_semaphore bitmap_sem;\r
+};\r
+\r
+static struct vpu_mem_info vpu_mem;\r
+static int vpu_mem_count;\r
+\r
+#define VPU_MEM_IS_FREE(index) !(vpu_mem.bitmap[index].allocated)\r
+#define VPU_MEM_IS_FIRST(index) (vpu_mem.bitmap[index].first)\r
+#define VPU_MEM_BIT(index) &vpu_mem.bitmap[index]\r
+#define VPU_MEM_PFN(index) vpu_mem.bitmap[index].pfn\r
+#define VPU_MEM_NEXT_INDEX(index) (index + VPU_MEM_PFN(index))\r
+#define VPU_MEM_OFFSET(index) (index * VPU_MEM_MIN_ALLOC)\r
+#define VPU_MEM_START_ADDR(index) (VPU_MEM_OFFSET(index) + vpu_mem.base)\r
+#define VPU_MEM_SIZE(index) ((VPU_MEM_PFN(index)) * VPU_MEM_MIN_ALLOC)\r
+#define VPU_MEM_END_ADDR(index) (VPU_MEM_START_ADDR(index) + VPU_MEM_SIZE(index))\r
+#define VPU_MEM_START_VADDR(index) (VPU_MEM_OFFSET(index) + vpu_mem.vbase)\r
+#define VPU_MEM_END_VADDR(index) (VPU_MEM_START_VADDR(index) + VPU_MEM_SIZE(index))\r
+#define VPU_MEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))\r
+\r
+static int vpu_mem_release(struct inode *, struct file *);\r
+static int vpu_mem_mmap(struct file *, struct vm_area_struct *);\r
+static int vpu_mem_open(struct inode *, struct file *);\r
+static long vpu_mem_ioctl(struct file *, unsigned int, unsigned long);\r
+\r
+struct file_operations vpu_mem_fops = {\r
+ .open = vpu_mem_open,\r
+ .mmap = vpu_mem_mmap,\r
+ .unlocked_ioctl = vpu_mem_ioctl,\r
+ .release = vpu_mem_release,\r
+};\r
+\r
+int is_vpu_mem_file(struct file *file)\r
+{\r
+ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))\r
+ return 0;\r
+ if (unlikely(file->f_dentry->d_inode->i_rdev !=\r
+ MKDEV(MISC_MAJOR, vpu_mem.dev.minor)))\r
+ return 0;\r
+ return 1;\r
+}\r
+\r
+\r
+static void vpu_mem_get_region(struct vpu_mem_data *data,\r
+ struct vpu_mem_region_node *region_node,\r
+ int index, int pfn)\r
+{\r
+ int curr, next = index + pfn;\r
+ struct vpu_mem_bits *pbits;\r
+\r
+ if (VPU_MEM_IS_FREE(next)) {\r
+ pbits = VPU_MEM_BIT(next);\r
+ pbits->first = 1;\r
+ pbits->pfn = VPU_MEM_PFN(index) - pfn;\r
+ } else {\r
+ if (!VPU_MEM_IS_FIRST(next))\r
+ DLOG("something wrong when get_region pfn %d at index %d\n", pfn, index);\r
+ }\r
+\r
+ pbits = VPU_MEM_BIT(index);\r
+\r
+ pbits->first = 1;\r
+ pbits->pfn = pfn;\r
+ pbits->refrn++;\r
+ pbits->avail++;\r
+\r
+ for (curr = 0; curr < pfn; curr++)\r
+ pbits[curr].allocated = 1;\r
+\r
+ region_node->region.offset = index;\r
+ region_node->region.len = pfn;\r
+\r
+ down_write(&data->sem);\r
+ list_add(®ion_node->list, &data->region_list);\r
+ up_write(&data->sem);\r
+\r
+ return ;\r
+}\r
+\r
+static int vpu_mem_put_region_by_index(struct vpu_mem_data *data, int index)\r
+{\r
+ struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
+ pbits->refrn--;\r
+ pbits->avail--;\r
+\r
+ if (!pbits->avail)\r
+ {\r
+ int i;\r
+ for (i = 0; i < pbits->pfn; i++)\r
+ pbits[i].allocated = 0;\r
+\r
+ down_write(&data->sem);\r
+ {\r
+ struct vpu_mem_region_node *region_node;\r
+ struct list_head *elt, *elt2;\r
+ list_for_each_safe(elt, elt2, &data->region_list) {\r
+ region_node = list_entry(elt, struct vpu_mem_region_node, list);\r
+ if (region_node->region.offset == index)\r
+ {\r
+ if (pbits->pfn != region_node->region.len)\r
+ DLOG("something wrong when put_region at index %d\n", index);\r
+ list_del(elt);\r
+ kfree(region_node);\r
+ break;\r
+ }\r
+ }\r
+ }\r
+ up_write(&data->sem);\r
+ }\r
+ return 0;\r
+}\r
+\r
+static int vpu_mem_put_region_by_region(struct vpu_mem_region_node *region_node)\r
+{\r
+ int index = region_node->region.offset;\r
+ struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
+ pbits->refrn--;\r
+ pbits->avail--;\r
+\r
+ if (!pbits->avail)\r
+ {\r
+ int i;\r
+ for (i = 0; i < pbits->pfn; i++)\r
+ pbits[i].allocated = 0;\r
+\r
+ list_del(®ion_node->list);\r
+ kfree(region_node);\r
+ }\r
+\r
+ return 0;\r
+}\r
+\r
+static long vpu_mem_allocate(struct file *file, unsigned int len)\r
+{\r
+ /* caller should hold the write lock on vpu_mem_sem! */\r
+ /* return the corresponding pdata[] entry */\r
+ int curr = 0;\r
+ int end = vpu_mem.num_entries;\r
+ int best_fit = -1;\r
+ unsigned int pfn = (len + VPU_MEM_MIN_ALLOC - 1)/VPU_MEM_MIN_ALLOC;\r
+ struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ struct vpu_mem_region_node *region_node;\r
+\r
+ if (!is_vpu_mem_file(file)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "allocate vpu_mem data from invalid file.\n");\r
+#endif\r
+ return -ENODEV;\r
+ }\r
+\r
+ DLOG("vpu_mem_allocate pfn %x\n", pfn);\r
+\r
+ region_node = kmalloc(sizeof(struct vpu_mem_region_node),\r
+ GFP_KERNEL);\r
+ if (!region_node) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "No space to allocate metadata!");\r
+#endif\r
+ return -ENOMEM;\r
+ }\r
+\r
+ /* look through the bitmap:\r
+ * if you find a free slot of the correct order use it\r
+ * otherwise, use the best fit (smallest with size > order) slot\r
+ */\r
+ while (curr < end) {\r
+ if (VPU_MEM_IS_FREE(curr)) {\r
+ if (VPU_MEM_PFN(curr) >= (unsigned char)pfn) {\r
+ /* set the not free bit and clear others */\r
+ best_fit = curr;\r
+ printk("find fit size at index %d\n", curr);\r
+ break;\r
+ }\r
+ }\r
+ curr = VPU_MEM_NEXT_INDEX(curr);\r
+ }\r
+\r
+ /* if best_fit < 0, there are no suitable slots,\r
+ * return an error\r
+ */\r
+ if (best_fit < 0) {\r
+ printk("vpu_mem: no space left to allocate!\n");\r
+ return -1;\r
+ }\r
+\r
+ DLOG("best_fit: %d next: %u\n", best_fit, best_fit + pfn);\r
+\r
+ vpu_mem_get_region(data, region_node, best_fit, pfn);\r
+\r
+ return best_fit;\r
+}\r
+\r
+static int vpu_mem_free(struct file *file, int index)\r
+{\r
+ /* caller should hold the write lock on vpu_mem_sem! */\r
+ struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
+ struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+\r
+ if (!is_vpu_mem_file(file)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "free vpu_mem data from invalid file.\n");\r
+#endif\r
+ return -ENODEV;\r
+ }\r
+\r
+ DLOG("free index %d\n", index);\r
+\r
+ if ((!pbits->first) ||\r
+ (!pbits->allocated) ||\r
+ ((pbits->refrn - 1) < 0) ||\r
+ ((pbits->avail - 1) < 0))\r
+ {\r
+ DLOG("VPM ERR: found error in vpu_mem_free :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
+ index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
+ return VPU_MEM_BITMAP_ERR;\r
+ }\r
+\r
+ return vpu_mem_put_region_by_index(data, index);\r
+}\r
+\r
+static long vpu_mem_duplicate(struct file *file, int index)\r
+{\r
+ /* caller should hold the write lock on vpu_mem_sem! */\r
+ struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
+\r
+ if (!is_vpu_mem_file(file)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "duplicate vpu_mem data from invalid file.\n");\r
+#endif\r
+ return -ENODEV;\r
+ }\r
+\r
+ DLOG("duplicate index %d\n", index);\r
+\r
+ if ((!pbits->first) ||\r
+ (!pbits->allocated) ||\r
+ (!pbits->avail))\r
+ {\r
+ DLOG("VPM ERR: found error in vpu_mem_duplicate :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
+ index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
+ return VPU_MEM_BITMAP_ERR;\r
+ }\r
+\r
+ pbits->avail++;\r
+\r
+ return 0;\r
+}\r
+\r
+static long vpu_mem_link(struct file *file, int index)\r
+{\r
+ struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
+ struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ struct vpu_mem_region_node *region_node;\r
+\r
+ if (!is_vpu_mem_file(file)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "link vpu_mem data from invalid file.\n");\r
+#endif\r
+ return -ENODEV;\r
+ }\r
+\r
+ region_node = kmalloc(sizeof(struct vpu_mem_region_node),\r
+ GFP_KERNEL);\r
+ if (!region_node) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "No space to allocate metadata!");\r
+#endif\r
+ return -ENOMEM;\r
+ }\r
+\r
+ /* caller should hold the write lock on vpu_mem_sem! */\r
+ DLOG("link index %d\n", index);\r
+\r
+ if ((!pbits->first) ||\r
+ (!pbits->allocated) ||\r
+ (!pbits->avail) ||\r
+ (pbits->avail <= pbits->refrn))\r
+ {\r
+ DLOG("VPM ERR: found error in vpu_mem_duplicate :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
+ index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
+ return VPU_MEM_BITMAP_ERR;\r
+ }\r
+\r
+ pbits->refrn++;\r
+\r
+ region_node->region.offset = index;\r
+ region_node->region.len = pbits->pfn;\r
+\r
+ down_write(&data->sem);\r
+ list_add(®ion_node->list, &data->region_list);\r
+ up_write(&data->sem);\r
+\r
+ return 0;\r
+}\r
+\r
+void vpu_mem_flush(struct file *file, long index)\r
+{\r
+ struct vpu_mem_data *data;\r
+ void *flush_start, *flush_end;\r
+\r
+ if (!is_vpu_mem_file(file)) {\r
+ return;\r
+ }\r
+\r
+ data = (struct vpu_mem_data *)file->private_data;\r
+ if (!vpu_mem.cached || file->f_flags & O_SYNC)\r
+ return;\r
+\r
+ down_read(&data->sem);\r
+ flush_start = VPU_MEM_START_VADDR(index);\r
+ flush_end = VPU_MEM_END_VADDR(index);\r
+ dmac_flush_range(flush_start, flush_end);\r
+ up_read(&data->sem);\r
+}\r
+\r
+static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)\r
+{\r
+#ifdef pgprot_noncached\r
+ if (vpu_mem.cached == 0 || file->f_flags & O_SYNC)\r
+ return pgprot_noncached(vma_prot);\r
+#endif\r
+#ifdef pgprot_ext_buffered\r
+ else if (vpu_mem.buffered)\r
+ return pgprot_ext_buffered(vma_prot);\r
+#endif\r
+ return vma_prot;\r
+}\r
+\r
+static int vpu_mem_map_pfn_range(struct vm_area_struct *vma, unsigned long len)\r
+{\r
+ DLOG("map len %lx\n", len);\r
+ BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(vma->vm_start));\r
+ BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(vma->vm_end));\r
+ BUG_ON(!VPU_MEM_IS_PAGE_ALIGNED(len));\r
+ if (io_remap_pfn_range(vma, vma->vm_start,\r
+ vpu_mem.base >> PAGE_SHIFT,\r
+ len, vma->vm_page_prot)) {\r
+ return -EAGAIN;\r
+ }\r
+ return 0;\r
+}\r
+\r
+static int vpu_mem_open(struct inode *inode, struct file *file)\r
+{\r
+ struct vpu_mem_data *data;\r
+ int ret = 0;\r
+\r
+ DLOG("current %u file %p(%d)\n", current->pid, file, (int)file_count(file));\r
+ /* setup file->private_data to indicate its unmapped */\r
+ /* you can only open a vpu_mem device one time */\r
+ if (file->private_data != NULL)\r
+ return -1;\r
+ data = kmalloc(sizeof(struct vpu_mem_data), GFP_KERNEL);\r
+ if (!data) {\r
+ printk("vpu_mem: unable to allocate memory for vpu_mem metadata.");\r
+ return -1;\r
+ }\r
+ data->pid = 0;\r
+\r
+ INIT_LIST_HEAD(&data->region_list);\r
+ init_rwsem(&data->sem);\r
+\r
+ file->private_data = data;\r
+ INIT_LIST_HEAD(&data->list);\r
+\r
+ down(&vpu_mem.data_list_sem);\r
+ list_add(&data->list, &vpu_mem.data_list);\r
+ up(&vpu_mem.data_list_sem);\r
+ return ret;\r
+}\r
+\r
+static int vpu_mem_mmap(struct file *file, struct vm_area_struct *vma)\r
+{\r
+ struct vpu_mem_data *data;\r
+ unsigned long vma_size = vma->vm_end - vma->vm_start;\r
+ int ret = 0;\r
+\r
+ if (vma->vm_pgoff || !VPU_MEM_IS_PAGE_ALIGNED(vma_size)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_ERR "vpu_mem: mmaps must be at offset zero, aligned"\r
+ " and a multiple of pages_size.\n");\r
+#endif\r
+ return -EINVAL;\r
+ }\r
+\r
+ data = (struct vpu_mem_data *)file->private_data;\r
+\r
+ printk(KERN_ALERT "file->private_data : 0x%x\n", (unsigned int)data);\r
+\r
+ down_write(&data->sem);\r
+\r
+ /* assert: vma_size must be the total size of the vpu_mem */\r
+ if (vpu_mem.size != vma_size) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_WARNING "vpu_mem: mmap size [%lu] does not match"\r
+ "size of backing region [%lu].\n", vma_size, vpu_mem.size);\r
+#endif\r
+ ret = -EINVAL;\r
+ goto error;\r
+ }\r
+\r
+ vma->vm_pgoff = vpu_mem.base >> PAGE_SHIFT;\r
+ vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot);\r
+\r
+ if (vpu_mem_map_pfn_range(vma, vma_size)) {\r
+ printk(KERN_INFO "vpu_mem: mmap failed in kernel!\n");\r
+ ret = -EAGAIN;\r
+ goto error;\r
+ }\r
+\r
+ data->pid = current->pid;\r
+\r
+error:\r
+ up_write(&data->sem);\r
+ return ret;\r
+}\r
+\r
+static int vpu_mem_release(struct inode *inode, struct file *file)\r
+{\r
+ struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ struct list_head *elt, *elt2;\r
+\r
+ down(&vpu_mem.data_list_sem);\r
+ list_del(&data->list);\r
+ up(&vpu_mem.data_list_sem);\r
+\r
+ down_write(&data->sem);\r
+ file->private_data = NULL;\r
+ list_for_each_safe(elt, elt2, &data->region_list) {\r
+ struct vpu_mem_region_node *region_node = list_entry(elt, struct vpu_mem_region_node, list);\r
+ vpu_mem_put_region_by_region(region_node);\r
+ }\r
+ BUG_ON(!list_empty(&data->region_list));\r
+ up_write(&data->sem);\r
+ kfree(data);\r
+\r
+ return 0;\r
+}\r
+\r
+static long vpu_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)\r
+{\r
+ long index, ret = 0;\r
+\r
+ switch (cmd) {\r
+ case VPU_MEM_GET_PHYS:\r
+ DLOG("get_phys\n");\r
+ printk(KERN_INFO "vpu_mem: request for physical address of vpu_mem region "\r
+ "from process %d.\n", current->pid);\r
+ if (copy_to_user((void __user *)arg, &vpu_mem.base, sizeof(vpu_mem.base)))\r
+ return -EFAULT;\r
+ break;\r
+ case VPU_MEM_GET_TOTAL_SIZE:\r
+ DLOG("get total size\n");\r
+ if (copy_to_user((void __user *)arg, &vpu_mem.size, sizeof(vpu_mem.size)))\r
+ return -EFAULT;\r
+ break;\r
+ case VPU_MEM_ALLOCATE:\r
+ DLOG("allocate\n");\r
+ {\r
+ unsigned int size;\r
+ if (copy_from_user(&size, (void __user *)arg, sizeof(size)))\r
+ return -EFAULT;\r
+ down_write(&vpu_mem.bitmap_sem);\r
+ index = vpu_mem_allocate(file, size);\r
+ up_write(&vpu_mem.bitmap_sem);\r
+ DLOG("allocate at index %ld\n", index);\r
+ return index;\r
+ break;\r
+ }\r
+ case VPU_MEM_FREE:\r
+ {\r
+ DLOG("mem free\n");\r
+ if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
+ return -EFAULT;\r
+ if (index >= vpu_mem.size)\r
+ return -EACCES;\r
+ down_write(&vpu_mem.bitmap_sem);\r
+ ret = vpu_mem_free(file, index);\r
+ up_write(&vpu_mem.bitmap_sem);\r
+ return ret;\r
+ break;\r
+ }\r
+ case VPU_MEM_CACHE_FLUSH:\r
+ {\r
+ DLOG("flush\n");\r
+ if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
+ return -EFAULT;\r
+\r
+ down_write(&vpu_mem.bitmap_sem);\r
+ vpu_mem_flush(file, index);\r
+ up_write(&vpu_mem.bitmap_sem);\r
+ break;\r
+ }\r
+ case VPU_MEM_DUPLICATE:\r
+ {\r
+ DLOG("duplicate\n");\r
+ if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
+ return -EFAULT;\r
+ down_write(&vpu_mem.bitmap_sem);\r
+ ret = vpu_mem_duplicate(file, index);\r
+ up_write(&vpu_mem.bitmap_sem);\r
+ return ret;\r
+ break;\r
+ }\r
+ case VPU_MEM_LINK:\r
+ {\r
+ DLOG("link\n");\r
+ if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
+ return -EFAULT;\r
+ down_write(&vpu_mem.bitmap_sem);\r
+ ret = vpu_mem_link(file, index);\r
+ up_write(&vpu_mem.bitmap_sem);\r
+ break;\r
+ }\r
+ case VPU_MEM_MAP:\r
+ DLOG("map\n");\r
+ break;\r
+ case VPU_MEM_CONNECT:\r
+ DLOG("connect\n");\r
+ break;\r
+ case VPU_MEM_GET_SIZE:\r
+ DLOG("get_size\n");\r
+ break;\r
+ case VPU_MEM_UNMAP:\r
+ DLOG("unmap\n");\r
+ break;\r
+ default:\r
+ return -EINVAL;\r
+ }\r
+ return ret;\r
+}\r
+\r
+#if VPU_MEM_DEBUG\r
+static ssize_t debug_open(struct inode *inode, struct file *file)\r
+{\r
+ file->private_data = inode->i_private;\r
+ return 0;\r
+}\r
+\r
+static ssize_t debug_read(struct file *file, char __user *buf, size_t count,\r
+ loff_t *ppos)\r
+{\r
+ struct list_head *elt, *elt2;\r
+ struct vpu_mem_data *data;\r
+ struct vpu_mem_region_node *region_node;\r
+ const int debug_bufmax = 4096;\r
+ static char buffer[4096];\r
+ int n = 0;\r
+\r
+ DLOG("debug open\n");\r
+ n = scnprintf(buffer, debug_bufmax,\r
+ "pid #: mapped regions (offset, len) (offset,len)...\n");\r
+\r
+ down(&vpu_mem.data_list_sem);\r
+ list_for_each(elt, &vpu_mem.data_list) {\r
+ data = list_entry(elt, struct vpu_mem_data, list);\r
+ down_read(&data->sem);\r
+ n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",\r
+ data->pid);\r
+ list_for_each(elt2, &data->region_list) {\r
+ region_node = list_entry(elt2, struct vpu_mem_region_node,\r
+ list);\r
+ n += scnprintf(buffer + n, debug_bufmax - n,\r
+ "(%lx,%lx) ",\r
+ region_node->region.offset,\r
+ region_node->region.len);\r
+ }\r
+ n += scnprintf(buffer + n, debug_bufmax - n, "\n");\r
+ up_read(&data->sem);\r
+ }\r
+ up(&vpu_mem.data_list_sem);\r
+\r
+ n++;\r
+ buffer[n] = 0;\r
+ return simple_read_from_buffer(buf, count, ppos, buffer, n);\r
+}\r
+\r
+static struct file_operations debug_fops = {\r
+ .read = debug_read,\r
+ .open = debug_open,\r
+};\r
+#endif\r
+\r
+int vpu_mem_setup(struct vpu_mem_platform_data *pdata)\r
+{\r
+ int err = 0;\r
+\r
+ if (vpu_mem_count)\r
+ {\r
+ printk(KERN_ALERT "Only one vpu_mem driver can be register!\n");\r
+ goto err_cant_register_device;\r
+ }\r
+\r
+ memset(&vpu_mem, 0, sizeof(struct vpu_mem_info));\r
+\r
+ vpu_mem.cached = pdata->cached;\r
+ vpu_mem.buffered = pdata->buffered;\r
+ vpu_mem.base = pdata->start;\r
+ vpu_mem.size = pdata->size;\r
+ init_rwsem(&vpu_mem.bitmap_sem);\r
+ init_MUTEX(&vpu_mem.data_list_sem);\r
+ INIT_LIST_HEAD(&vpu_mem.data_list);\r
+ vpu_mem.dev.name = pdata->name;\r
+ vpu_mem.dev.minor = MISC_DYNAMIC_MINOR;\r
+ vpu_mem.dev.fops = &vpu_mem_fops;\r
+\r
+ err = misc_register(&vpu_mem.dev);\r
+ if (err) {\r
+ printk(KERN_ALERT "Unable to register vpu_mem driver!\n");\r
+ goto err_cant_register_device;\r
+ }\r
+ printk(KERN_ALERT "%s: %d init\n", pdata->name, vpu_mem.dev.minor);\r
+ vpu_mem_count++;\r
+\r
+ vpu_mem.num_entries = vpu_mem.size / VPU_MEM_MIN_ALLOC;\r
+ vpu_mem.bitmap = kmalloc(vpu_mem.num_entries *\r
+ sizeof(struct vpu_mem_bits), GFP_KERNEL);\r
+ if (!vpu_mem.bitmap)\r
+ goto err_no_mem_for_metadata;\r
+\r
+ memset(vpu_mem.bitmap, 0, sizeof(struct vpu_mem_bits) *\r
+ vpu_mem.num_entries);\r
+\r
+ /* record the total page number */\r
+ vpu_mem.bitmap[0].pfn = vpu_mem.num_entries;\r
+\r
+ if (vpu_mem.cached)\r
+ vpu_mem.vbase = ioremap_cached(vpu_mem.base,\r
+ vpu_mem.size);\r
+#ifdef ioremap_ext_buffered\r
+ else if (vpu_mem.buffered)\r
+ vpu_mem.vbase = ioremap_ext_buffered(vpu_mem.base,\r
+ vpu_mem.size);\r
+#endif\r
+ else\r
+ vpu_mem.vbase = ioremap(vpu_mem.base, vpu_mem.size);\r
+\r
+ if (vpu_mem.vbase == 0)\r
+ goto error_cant_remap;\r
+\r
+#if VPU_MEM_DEBUG\r
+ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)vpu_mem.dev.minor,\r
+ &debug_fops);\r
+#endif\r
+ return 0;\r
+error_cant_remap:\r
+ kfree(vpu_mem.bitmap);\r
+err_no_mem_for_metadata:\r
+ misc_deregister(&vpu_mem.dev);\r
+err_cant_register_device:\r
+ return -1;\r
+}\r
+\r
+static int vpu_mem_probe(struct platform_device *pdev)\r
+{\r
+ struct vpu_mem_platform_data *pdata;\r
+\r
+ if (!pdev || !pdev->dev.platform_data) {\r
+ printk(KERN_ALERT "Unable to probe vpu_mem!\n");\r
+ return -1;\r
+ }\r
+ pdata = pdev->dev.platform_data;\r
+ return vpu_mem_setup(pdata);\r
+}\r
+\r
+static int vpu_mem_remove(struct platform_device *pdev)\r
+{\r
+ if (!pdev || !pdev->dev.platform_data) {\r
+ printk(KERN_ALERT "Unable to remove vpu_mem!\n");\r
+ return -1;\r
+ }\r
+ if (vpu_mem_count) {\r
+ misc_deregister(&vpu_mem.dev);\r
+ vpu_mem_count--;\r
+ } else {\r
+ printk(KERN_ALERT "no vpu_mem to remove!\n");\r
+ }\r
+ return 0;\r
+}\r
+\r
+static struct platform_driver vpu_mem_driver = {\r
+ .probe = vpu_mem_probe,\r
+ .remove = vpu_mem_remove,\r
+ .driver = { .name = "vpu_mem" }\r
+};\r
+\r
+\r
+static int __init vpu_mem_init(void)\r
+{\r
+ return platform_driver_register(&vpu_mem_driver);\r
+}\r
+\r
+static void __exit vpu_mem_exit(void)\r
+{\r
+ platform_driver_unregister(&vpu_mem_driver);\r
+}\r
+\r
+module_init(vpu_mem_init);\r
+module_exit(vpu_mem_exit);\r
+\r
+\r