\r
#include <mach/vpu_mem.h>\r
\r
+#define VPU_MEM_MAX_ORDER 128\r
+#define VPU_MEM_MIN_ALLOC PAGE_SIZE\r
\r
-#define VPU_MEM_MAX_ORDER 128\r
-#define VPU_MEM_MIN_ALLOC PAGE_SIZE\r
-\r
-#define VPU_MEM_DEBUG 0\r
+#define VPU_MEM_DEBUG 0\r
+#define VPU_MEM_DEBUG_MSGS 0\r
\r
#define VPU_MEM_SPLIT_ALLOC 0\r
#define VPU_MEM_SPLIT_LINK 1\r
\r
-struct vpu_mem_data {\r
- /* protects this data field, if the mm_mmap sem will be held at the\r
- * same time as this sem, the mm sem must be taken first (as this is\r
- * the order for vma_open and vma_close ops */\r
- struct rw_semaphore sem;\r
- /* process id of teh mapping process */\r
- pid_t pid;\r
- /* a list of currently available regions if this is a suballocation */\r
- struct list_head region_list;\r
- /* a linked list of data so we can access them for debugging */\r
- struct list_head list;\r
-};\r
-\r
-struct vpu_mem_bits {\r
- int pfn:16; /* page frame number - vpu_mem space max 256M */\r
- int refrc:7; /* reference number */\r
- int first:1; /* 1 if first, 0 if not first */\r
- int avail:7; /* available link number */\r
- int last:1; /* 1 if last, 0 if no last */\r
-};\r
-\r
-struct vpu_mem_region {\r
- int index;\r
- int ref_count;\r
-};\r
-\r
-struct vpu_mem_region_node {\r
- struct vpu_mem_region region;\r
- struct list_head list;\r
-};\r
-\r
-#define NODE_REGION_INDEX(p) (p->region.index)\r
-#define NODE_REGION_REFC(p) (p->region.ref_count)\r
-\r
-#define VPU_MEM_DEBUG_MSGS 0\r
#if VPU_MEM_DEBUG_MSGS\r
#define DLOG(fmt,args...) \\r
do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \\r
#define DLOG(x...) do {} while (0)\r
#endif\r
\r
-struct vpu_mem_info {\r
+typedef enum vpu_mem_status {\r
+ VDM_FREE,\r
+ VDM_USED,\r
+ VDM_POST,\r
+ VDM_BUTT,\r
+} vdm_st;\r
+\r
+/**\r
+ * struct for process session which connect to vpu_mem\r
+ * \r
+ * @author ChenHengming (2011-4-11)\r
+ */\r
+typedef struct vpu_mem_session {\r
+ /* a list of memory region used posted by current process */\r
+ struct list_head list_used;\r
+ struct list_head list_post;\r
+ /* a linked list of data so we can access them for debugging */\r
+ struct list_head list_session;\r
+ /* process id of teh mapping process */\r
+ pid_t pid;\r
+} vdm_session;\r
+\r
+/**\r
+ * global region info\r
+ */\r
+typedef struct vpu_mem_region_info {\r
+ struct list_head index_list; /* link to index list use for search */\r
+ int used;\r
+ int post;\r
+ int index;\r
+ int pfn;\r
+} vdm_region;\r
+\r
+/**\r
+ * struct for region information \r
+ * this struct should be modified with bitmap lock \r
+ */\r
+typedef struct vpu_mem_link_info {\r
+ struct list_head session_link; /* link to vpu_mem_session list */\r
+ struct list_head status_link; /* link to vdm_info.status list use for search */\r
+ vdm_region *region;\r
+ int link_post;\r
+ int link_used;\r
+ int index;\r
+ int pfn;\r
+} vdm_link;\r
+\r
+typedef struct vpu_mem_info {\r
struct miscdevice dev;\r
/* physical start address of the remaped vpu_mem space */\r
unsigned long base;\r
* O_SYNC to get an uncached region */\r
unsigned cached;\r
unsigned buffered;\r
-\r
- /* no data_list is needed and no master mode is needed */\r
- /* for debugging, creates a list of vpu_mem file structs, the\r
- * data_list_sem should be taken before vpu_mem_data->sem if both are\r
- * needed */\r
- struct semaphore data_list_sem;\r
- struct list_head data_list;\r
-\r
- /* the bitmap for the region indicating which entries are allocated\r
- * and which are free */\r
- struct vpu_mem_bits *bitmap;\r
- /* vpu_mem_sem protects the bitmap array\r
- * a write lock should be held when modifying entries in bitmap\r
- * a read lock should be held when reading data from bits or\r
- * dereferencing a pointer into bitmap\r
- *\r
- * vpu_mem_data->sem protects the vpu_mem data of a particular file\r
- * Many of the function that require the vpu_mem_data->sem have a non-\r
- * locking version for when the caller is already holding that sem.\r
- *\r
- * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER:\r
- * down(vpu_mem_data->sem) => down(bitmap_sem)\r
+ /* \r
+ * vdm_session init only store the free region but use a vdm_session for convenience\r
*/\r
- struct rw_semaphore bitmap_sem;\r
-};\r
-\r
-static struct vpu_mem_info vpu_mem;\r
+ vdm_session status;\r
+ struct list_head list_index; /* sort by index */\r
+ struct list_head list_free; /* free region list */\r
+ struct list_head list_session; /* session list */\r
+ struct rw_semaphore rw_sem;\r
+} vdm_info;\r
+\r
+static vdm_info vpu_mem;\r
static int vpu_mem_count;\r
+static int vpu_mem_over = 0;\r
\r
-#define VPU_MEM_IS_FREE(index) !(vpu_mem.bitmap[index].avail)\r
-#define VPU_MEM_FIRST(index) (vpu_mem.bitmap[index].first)\r
-#define VPU_MEM_LAST(index) (vpu_mem.bitmap[index].last)\r
-#define VPU_MEM_REFC(index) (vpu_mem.bitmap[index].refrc)\r
-#define VPU_MEM_AVAIL(index) (vpu_mem.bitmap[index].avail)\r
-#define VPU_MEM_BIT(index) (&vpu_mem.bitmap[index])\r
-#define VPU_MEM_PFN(index) (vpu_mem.bitmap[index].pfn)\r
-#define VPU_MEM_LAST_INDEX(index) (index - VPU_MEM_PFN(index - 1))\r
-#define VPU_MEM_NEXT_INDEX(index) (index + VPU_MEM_PFN(index))\r
-#define VPU_MEM_END_INDEX(index) (VPU_MEM_NEXT_INDEX(index) - 1)\r
-#define VPU_MEM_OFFSET(index) (index * VPU_MEM_MIN_ALLOC)\r
-#define VPU_MEM_START_ADDR(index) (VPU_MEM_OFFSET(index) + vpu_mem.base)\r
-#define VPU_MEM_SIZE(index) ((VPU_MEM_PFN(index)) * VPU_MEM_MIN_ALLOC)\r
-#define VPU_MEM_END_ADDR(index) (VPU_MEM_START_ADDR(index) + VPU_MEM_SIZE(index))\r
-#define VPU_MEM_START_VADDR(index) (VPU_MEM_OFFSET(index) + vpu_mem.vbase)\r
-#define VPU_MEM_END_VADDR(index) (VPU_MEM_START_VADDR(index) + VPU_MEM_SIZE(index))\r
-#define VPU_MEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))\r
+#define vdm_used (vpu_mem.status.list_used)\r
+#define vdm_post (vpu_mem.status.list_post)\r
+#define vdm_index (vpu_mem.list_index)\r
+#define vdm_free (vpu_mem.list_free)\r
+#define vdm_proc (vpu_mem.list_session)\r
+#define vdm_rwsem (vpu_mem.rw_sem)\r
+#define is_free_region(x) ((0 == (x)->used) && (0 == (x)->post))\r
\r
-static int vpu_mem_release(struct inode *, struct file *);\r
-static int vpu_mem_mmap(struct file *, struct vm_area_struct *);\r
-static int vpu_mem_open(struct inode *, struct file *);\r
-static long vpu_mem_ioctl(struct file *, unsigned int, unsigned long);\r
+static void dump_status(void)\r
+{\r
+ vdm_link *link, *tmp_link;\r
+ vdm_region *region, *tmp_region;\r
+ vdm_session *session, *tmp_session;\r
\r
-struct file_operations vpu_mem_fops = {\r
- .open = vpu_mem_open,\r
- .mmap = vpu_mem_mmap,\r
- .unlocked_ioctl = vpu_mem_ioctl,\r
- .release = vpu_mem_release,\r
-};\r
+ printk("vpu mem status dump :\n\n");\r
\r
-int is_vpu_mem_file(struct file *file)\r
-{\r
- if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))\r
- return 0;\r
- if (unlikely(file->f_dentry->d_inode->i_rdev !=\r
- MKDEV(MISC_MAJOR, vpu_mem.dev.minor)))\r
- return 0;\r
- return 1;\r
+ // °´ index ´òÓ¡È«²¿ region\r
+ printk("region:\n");\r
+ list_for_each_entry_safe(region, tmp_region, &vdm_index, index_list) {\r
+ printk(" idx %6d pfn %6d used %3d post %3d\n",\r
+ region->index, region->pfn, region->used, region->post);\r
+ }\r
+ printk("free :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_free, status_link) {\r
+ printk(" idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+ printk("used :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_used, status_link) {\r
+ printk(" idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+ printk("post :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_post, status_link) {\r
+ printk(" idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+\r
+ // ´òÓ¡ vpu_mem_info ÖеÄÈ«²¿ session µÄ region Õ¼ÓÃÇé¿ö\r
+ list_for_each_entry_safe(session, tmp_session, &vdm_proc, list_session) {\r
+ printk("pid: %d\n", session->pid);\r
+\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {\r
+ printk("used: idx %6d pfn %6d used %3d\n",\r
+ link->index, link->pfn, link->link_used);\r
+ }\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {\r
+ printk("post: idx %6d pfn %6d post %3d\n",\r
+ link->index, link->pfn, link->link_post);\r
+ }\r
+ }\r
}\r
\r
-static void region_set(int index, int pfn)\r
+/**\r
+ * find used link in a session\r
+ * \r
+ * @author ChenHengming (2011-4-18)\r
+ * \r
+ * @param session \r
+ * @param index \r
+ * \r
+ * @return vdm_link* \r
+ */\r
+static vdm_link *find_used_link(vdm_session *session, int index)\r
{\r
- WARN(pfn <= 0, "vpu_mem: region_set non-positive pfn\n");\r
- if (pfn > 0) {\r
- int first = index;\r
- int last = index + pfn - 1;\r
+ vdm_link *pos, *n;\r
+\r
+ list_for_each_entry_safe(pos, n, &session->list_used, session_link) {\r
+ if (index == pos->index) {\r
+ DLOG("found index %d ptr %x\n", index, pos);\r
+ return pos;\r
+ }\r
+ }\r
\r
- DLOG("region_set: first %d, last %d, size %d\n", first, last, pfn);\r
+ return NULL;\r
+}\r
\r
- VPU_MEM_FIRST(first) = 1;\r
- VPU_MEM_PFN(first) = pfn;\r
+/**\r
+ * find post link from vpu_mem's vdm_post list\r
+ * \r
+ * @author ChenHengming (2011-4-18)\r
+ * \r
+ * @param index \r
+ * \r
+ * @return vdm_link* \r
+ */\r
+static vdm_link *find_post_link(int index)\r
+{\r
+ vdm_link *pos, *n;\r
\r
- VPU_MEM_LAST(last) = 1;\r
- VPU_MEM_PFN(last) = pfn;\r
+ list_for_each_entry_safe(pos, n, &vdm_post, status_link) {\r
+ if (index == pos->index) {\r
+ return pos;\r
+ }\r
}\r
+\r
+ return NULL;\r
}\r
\r
-static void region_unset(int index, int pfn)\r
+/**\r
+ * find free link from vpu_mem's vdm_free list\r
+ * \r
+ * @author Administrator (2011-4-19)\r
+ * \r
+ * @param index \r
+ * \r
+ * @return vdm_link* \r
+ */\r
+static vdm_link *find_free_link(int index)\r
{\r
- WARN(pfn <= 0, "vpu_mem: region_unset non-positive pfn\n");\r
- if (pfn > 0) {\r
- int first = index;\r
- int last = index + pfn - 1;\r
+ vdm_link *pos, *n;\r
\r
- DLOG("region_unset: first %d, last %d, size %d\n", first, last, pfn);\r
+ list_for_each_entry_safe(pos, n, &vdm_free, status_link) {\r
+ if (index == pos->index) {\r
+ return pos;\r
+ }\r
+ }\r
\r
- VPU_MEM_FIRST(first) = 0;\r
- VPU_MEM_LAST(first) = 0;\r
- VPU_MEM_PFN(first) = 0;\r
+ return NULL;\r
+}\r
+/**\r
+ * insert a region into the index list for search\r
+ * \r
+ * @author ChenHengming (2011-4-18)\r
+ * \r
+ * @param region \r
+ * \r
+ * @return int \r
+ */\r
+static int _insert_region_index(vdm_region *region)\r
+{\r
+ int index = region->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_region *tmp, *n;\r
+\r
+ if (list_empty(&vdm_index)) {\r
+ DLOG("index list is empty, insert first region\n");\r
+ list_add_tail(®ion->index_list, &vdm_index);\r
+ return 0;\r
+ }\r
\r
- VPU_MEM_FIRST(last) = 0;\r
- VPU_MEM_LAST(last) = 0;\r
- VPU_MEM_PFN(last) = 0;\r
+ list_for_each_entry_safe(tmp, n, &vdm_index, index_list) {\r
+ next = tmp->index;\r
+ DLOG("insert index %d pfn %d last %d next %d ptr %x\n", index, region->pfn, last, next, tmp);\r
+ if ((last < index) && (index < next)) {\r
+ DLOG("Done\n");\r
+ list_add_tail(®ion->index_list, &tmp->index_list);\r
+ return 0;\r
+ }\r
+ last = next;\r
}\r
+\r
+ printk(KERN_ERR "_insert_region_by_index %d fail!\n", index);\r
+ dump_status();\r
+ return -1;\r
}\r
\r
-static void region_set_ref_count(int index, int ref_count)\r
+static void _insert_region_status_free(vdm_link *link)\r
{\r
- DLOG("region_set_ref_count: index %d, ref_count %d\n", index, ref_count);\r
+ int index = link->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_link *tmp, *n;\r
+\r
+ if (list_empty(&vdm_free)) {\r
+ DLOG("free list is empty, list_add_tail first region\n");\r
+ list_add_tail(&link->status_link, &vdm_free);\r
+ return ;\r
+ }\r
\r
- VPU_MEM_REFC(index) = ref_count;\r
- VPU_MEM_REFC(VPU_MEM_END_INDEX(index)) = ref_count;\r
+ list_for_each_entry_safe(tmp, n, &vdm_free, status_link) {\r
+ next = tmp->index;\r
+ if ((last < index) && (index < next)) {\r
+ DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ return ;\r
+ }\r
+ last = next;\r
+ }\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);\r
+ return ;\r
}\r
\r
-static void region_set_avail(int index, int avail)\r
+static void _insert_link_status_post(vdm_link *link)\r
{\r
- DLOG("region_set_avail: index %d, avail %d\n", index, avail);\r
+ int index = link->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_link *tmp, *n;\r
+\r
+ if (list_empty(&vdm_post)) {\r
+ DLOG("post list is empty, list_add_tail first region\n");\r
+ list_add_tail(&link->status_link, &vdm_post);\r
+ return ;\r
+ }\r
\r
- VPU_MEM_AVAIL(index) = avail;\r
- VPU_MEM_AVAIL(VPU_MEM_END_INDEX(index)) = avail;\r
+ list_for_each_entry_safe(tmp, n, &vdm_post, status_link) {\r
+ next = tmp->index;\r
+ if ((last < index) && (index < next)) {\r
+ DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ return ;\r
+ }\r
+ last = next;\r
+ }\r
+\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);\r
+ return ;\r
}\r
\r
-static int index_avail(int index)\r
+static void _insert_link_status_used(vdm_link *link)\r
{\r
- return ((0 <= index) && (index < vpu_mem.num_entries));\r
+ int index = link->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_link *tmp, *n;\r
+\r
+ if (list_empty(&vdm_used)) {\r
+ DLOG("used list is empty, list_add_tail first region\n");\r
+ list_add_tail(&link->status_link, &vdm_used);\r
+ return ;\r
+ }\r
+\r
+ list_for_each_entry_safe(tmp, n, &vdm_used, status_link) {\r
+ next = tmp->index;\r
+ if ((last < index) && (index < next)) {\r
+ DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ return ;\r
+ }\r
+ last = next;\r
+ }\r
+\r
+ list_add_tail(&link->status_link, &tmp->status_link);\r
+ DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);\r
+ return ;\r
}\r
\r
-static int region_check(int index)\r
+static void _insert_link_session_used(vdm_link *link, vdm_session *session)\r
{\r
- int end = VPU_MEM_END_INDEX(index);\r
-\r
- DLOG("region_check: index %d val 0x%.8x, end %d val 0x%.8x\n",\r
- index, *((unsigned int *)VPU_MEM_BIT(index)),\r
- end, *((unsigned int *)VPU_MEM_BIT(end)));\r
-\r
- WARN(index < 0,\r
- "vpu_mem: region_check fail: negative first %d\n", index);\r
- WARN(index >= vpu_mem.num_entries,\r
- "vpu_mem: region_check fail: too large first %d\n", index);\r
- WARN(end < 0,\r
- "vpu_mem: region_check fail: negative end %d\n", end);\r
- WARN(end >= vpu_mem.num_entries,\r
- "vpu_mem: region_check fail: too large end %d\n", end);\r
- WARN(!VPU_MEM_FIRST(index),\r
- "vpu_mem: region_check fail: index %d is not first\n", index);\r
- WARN(!VPU_MEM_LAST(end),\r
- "vpu_mem: region_check fail: index %d is not end\n", end);\r
- WARN((VPU_MEM_PFN(index) != VPU_MEM_PFN(end)),\r
- "vpu_mem: region_check fail: first %d and end %d pfn is not equal\n", index, end);\r
- WARN(VPU_MEM_REFC(index) != VPU_MEM_REFC(end),\r
- "vpu_mem: region_check fail: first %d and end %d ref count is not equal\n", index, end);\r
- WARN(VPU_MEM_AVAIL(index) != VPU_MEM_AVAIL(end),\r
- "vpu_mem: region_check fail: first %d and end %d avail count is not equal\n", index, end);\r
- return 0;\r
+ int index = link->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_link *tmp, *n;\r
+\r
+ if (list_empty(&session->list_used)) {\r
+ DLOG("session used list is empty, list_add_tail first region\n");\r
+ list_add_tail(&link->session_link, &session->list_used);\r
+ return ;\r
+ }\r
+\r
+ list_for_each_entry_safe(tmp, n, &session->list_used, session_link) {\r
+ next = tmp->index;\r
+ if ((last < index) && (index < next)) {\r
+ list_add_tail(&link->session_link, &tmp->session_link);\r
+ DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);\r
+ return ;\r
+ }\r
+ last = next;\r
+ }\r
+\r
+ list_add_tail(&link->session_link, &tmp->session_link);\r
+ DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);\r
+ return ;\r
}\r
\r
-/*\r
- * split new allocated block from free block\r
- * the bitmap_sem and region_list_sem must be hold together\r
- * the pnode is a ouput region node\r
- */\r
-static int region_new(struct list_head *region_list, int index, int pfn)\r
+static void _insert_link_session_post(vdm_link *link, vdm_session *session)\r
{\r
- int pfn_free = VPU_MEM_PFN(index);\r
- // check pfn is smaller then target index region\r
- if ((pfn > pfn_free) || (pfn <= 0)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "unable to split region %d of size %d, while is smaller than %d!", index, pfn_free, pfn);\r
-#endif\r
- return -1;\r
- }\r
- // check region data coherence\r
- if (region_check(index)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
-#endif\r
- return -EINVAL;\r
+ int index = link->index;\r
+ int last = -1;\r
+ int next;\r
+ vdm_link *tmp, *n;\r
+\r
+ if (list_empty(&session->list_post)) {\r
+ DLOG("session post list is empty, list_add_tail first region\n");\r
+ list_add_tail(&link->session_link, &session->list_post);\r
+ return ;\r
}\r
\r
- {\r
- struct list_head *last;\r
- struct vpu_mem_region_node *node;\r
- // check target index region first\r
- if (!VPU_MEM_IS_FREE(index)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "try to split not free region %d!", index);\r
-#endif\r
- return -2;\r
- }\r
- // malloc vpu_mem_region_node\r
- node = kmalloc(sizeof(struct vpu_mem_region_node), GFP_KERNEL);\r
- if (NULL == node) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "No space to allocate struct vpu_mem_region_node!");\r
-#endif\r
- return -ENOMEM;\r
+ list_for_each_entry_safe(tmp, n, &session->list_post, session_link) {\r
+ next = tmp->index;\r
+ if ((last < index) && (index < next)) {\r
+ list_add_tail(&link->session_link, &tmp->session_link);\r
+ DLOG("list_add_tail index %d pfn %d last %d next %d ptr %x\n", index, link->pfn, last, next, tmp);\r
+ return ;\r
}\r
+ last = next;\r
+ }\r
\r
- // search the last node\r
- DLOG("search the last node\n");\r
- for (last = region_list; !list_is_last(last, region_list);)\r
- last = last->next;\r
+ list_add_tail(&link->session_link, &tmp->session_link);\r
+ DLOG("list_add index %d pfn %d last %d ptr %x\n", index, link->pfn, last, tmp);\r
+ return ;\r
+}\r
\r
- DLOG("list_add_tail\n");\r
- list_add_tail(&node->list, last);\r
+static void _remove_free_region(vdm_region *region)\r
+{\r
+ list_del_init(®ion->index_list);\r
+ kfree(region);\r
+}\r
\r
- DLOG("start region_set index %d pfn %u\n", index, pfn);\r
- region_set(index, pfn);\r
+static void _remove_free_link(vdm_link *link)\r
+{\r
+ list_del_init(&link->session_link);\r
+ list_del_init(&link->status_link);\r
+ kfree(link);\r
+}\r
\r
- if (pfn_free - pfn) {\r
- DLOG("start region_set index %d pfn %u\n", index + pfn, pfn_free - pfn);\r
- region_set(index + pfn, pfn_free - pfn);\r
- }\r
+static void _merge_two_region(vdm_region *dst, vdm_region *src)\r
+{\r
+ vdm_link *dst_link = find_free_link(dst->index);\r
+ vdm_link *src_link = find_free_link(src->index);\r
+ dst->pfn += src->pfn;\r
+ dst_link->pfn += src_link->pfn;\r
+ _remove_free_link(src_link);\r
+ _remove_free_region(src);\r
+}\r
\r
- region_set_avail(index, VPU_MEM_AVAIL(index) + 1);\r
- region_set_ref_count(index, VPU_MEM_REFC(index) + 1);\r
- node->region.index = index;\r
- node->region.ref_count = 1;\r
+static void merge_free_region_and_link(vdm_region *region)\r
+{\r
+ if (region->used || region->post) {\r
+ printk(KERN_ALERT "try to merge unfree region!\n");\r
+ return ;\r
+ } else {\r
+ vdm_region *neighbor;\r
+ struct list_head *tmp = region->index_list.next;\r
+ if (tmp != &vdm_index) {\r
+ neighbor = (vdm_region *)list_entry(tmp, vdm_region, index_list);\r
+ if (is_free_region(neighbor)) {\r
+ DLOG("merge next\n");\r
+ _merge_two_region(region, neighbor);\r
+ }\r
+ }\r
+ tmp = region->index_list.prev;\r
+ if (tmp != &vdm_index) {\r
+ neighbor = (vdm_region *)list_entry(tmp, vdm_region, index_list);\r
+ if (is_free_region(neighbor)) {\r
+ DLOG("merge prev\n");\r
+ _merge_two_region(neighbor, region);\r
+ }\r
+ }\r
}\r
+}\r
\r
- return 0;\r
+static void put_free_link(vdm_link *link)\r
+{\r
+ list_del_init(&link->session_link);\r
+ list_del_init(&link->status_link);\r
+ _insert_region_status_free(link);\r
}\r
\r
-/*\r
- * link allocated block from free block\r
- * the bitmap_sem and region_list_sem must be hold together\r
- * the pnode is a ouput region node\r
- */\r
-static int region_link(struct list_head *region_list, int index)\r
+static void put_used_link(vdm_link *link, vdm_session *session)\r
{\r
- struct vpu_mem_region_node *node = NULL;\r
- struct list_head *list, *tmp;\r
- list_for_each_safe(list, tmp, region_list) {\r
- struct vpu_mem_region_node *p = list_entry(list, struct vpu_mem_region_node, list);\r
- if (index == NODE_REGION_INDEX(p)) {\r
- node = p;\r
- break;\r
+ list_del_init(&link->session_link);\r
+ list_del_init(&link->status_link);\r
+ _insert_link_status_used(link);\r
+ _insert_link_session_used(link, session);\r
+}\r
+\r
+static void put_post_link(vdm_link *link, vdm_session *session)\r
+{\r
+ list_del_init(&link->session_link);\r
+ list_del_init(&link->status_link);\r
+ _insert_link_status_post(link);\r
+ _insert_link_session_post(link, session);\r
+}\r
+\r
+static vdm_link *new_link_by_index(int index, int pfn)\r
+{\r
+ vdm_region *region = (vdm_region *)kmalloc(sizeof(vdm_region), GFP_KERNEL);\r
+ vdm_link *link = (vdm_link *)kmalloc(sizeof(vdm_link ), GFP_KERNEL);\r
+\r
+ if ((NULL == region) || (NULL == link)) {\r
+ printk(KERN_ALERT "can not kmalloc vdm_region and vdm_link in %s", __FUNCTION__);\r
+ if (region) {\r
+ kfree(region);\r
}\r
+ if (link) {\r
+ kfree(link);\r
+ }\r
+ return NULL;\r
}\r
\r
- if (NULL == node) {\r
- struct list_head *last;\r
- DLOG("link non-exists index %d\n", index);\r
+ region->post = 0;\r
+ region->used = 0;\r
+ region->index = index;\r
+ region->pfn = pfn;\r
\r
- // malloc vpu_mem_region_node\r
- node = kmalloc(sizeof(struct vpu_mem_region_node), GFP_KERNEL);\r
- if (NULL == node) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "No space to allocate struct vpu_mem_region_node!");\r
-#endif\r
- return -ENOMEM;\r
- }\r
+ INIT_LIST_HEAD(®ion->index_list);\r
\r
- // search the last node\r
- DLOG("search the last node\n");\r
- for (last = region_list; !list_is_last(last, region_list);)\r
- last = last->next;\r
+ link->link_post = 0;\r
+ link->link_used = 0;\r
+ link->region = region;\r
+ link->index = region->index;\r
+ link->pfn = region->pfn;\r
+ INIT_LIST_HEAD(&link->session_link);\r
+ INIT_LIST_HEAD(&link->status_link);\r
\r
- DLOG("list_add_tail\n");\r
- list_add_tail(&node->list, last);\r
+ return link;\r
+}\r
\r
- node->region.index = index;\r
- node->region.ref_count = 1;\r
- } else {\r
- DLOG("link existed index %d\n", index);\r
- node->region.ref_count++;\r
+static vdm_link *new_link_by_region(vdm_region *region)\r
+{\r
+ vdm_link *link = (vdm_link *)kmalloc(sizeof(vdm_link), GFP_KERNEL);\r
+ if (NULL == link) {\r
+ printk(KERN_ALERT "can not kmalloc vdm_region and vdm_link in %s", __FUNCTION__);\r
+ return NULL;\r
}\r
- region_set_ref_count(index, VPU_MEM_REFC(index) + 1);\r
\r
- return 0;\r
+ link->link_post = 0;\r
+ link->link_used = 0;\r
+ link->region = region;\r
+ link->index = region->index;\r
+ link->pfn = region->pfn;\r
+ INIT_LIST_HEAD(&link->session_link);\r
+ INIT_LIST_HEAD(&link->status_link);\r
+\r
+ return link;\r
}\r
\r
-static int region_merge(struct list_head *node)\r
+static void link_del(vdm_link *link)\r
{\r
- struct vpu_mem_region_node *pnode = list_entry(node, struct vpu_mem_region_node, list);\r
- int index = pnode->region.index;\r
- int target;\r
+ list_del_init(&link->session_link);\r
+ list_del_init(&link->status_link);\r
+ kfree(link);\r
+}\r
\r
- if (VPU_MEM_AVAIL(index))\r
- return 0;\r
- if (region_check(index))\r
- return -EINVAL;\r
-\r
- target = VPU_MEM_NEXT_INDEX(index);\r
- if (index_avail(target) && VPU_MEM_IS_FREE(target)) {\r
- int pfn_target = VPU_MEM_PFN(target);\r
- int pfn_index = VPU_MEM_PFN(index);\r
- int pfn_total = pfn_target + pfn_index;\r
- region_unset(index, pfn_index);\r
- region_unset(target, pfn_target);\r
- region_set(index, pfn_total);\r
- } else {\r
- DLOG("region_merge: merge NEXT_INDEX fail index_avail(%d) = %d IS_FREE = %d\n",\r
- target, index_avail(target), VPU_MEM_IS_FREE(target));\r
+static vdm_link *get_used_link_from_free_link(vdm_link *link, vdm_session *session, int pfn)\r
+{\r
+ if (pfn > link->pfn) {\r
+ return NULL;\r
}\r
- target = index - 1;\r
- if (index_avail(target) && VPU_MEM_IS_FREE(target)) {\r
- int pfn_target = VPU_MEM_PFN(target);\r
- int pfn_index = VPU_MEM_PFN(index);\r
- int pfn_total = pfn_target + pfn_index;\r
- target = VPU_MEM_LAST_INDEX(index);\r
- region_unset(index, pfn_index);\r
- region_unset(target, pfn_target);\r
- region_set(target, pfn_total);\r
+ if (pfn == link->pfn) {\r
+ DLOG("pfn == link->pfn %d\n", pfn);\r
+ link->link_used = 1;\r
+ link->region->used = 1;\r
+ put_used_link(link, session);\r
+ return link;\r
} else {\r
- DLOG("region_merge: merge LAST_INDEX fail index_avail(%d) = %d IS_FREE = %d\n",\r
- target, index_avail(target), VPU_MEM_IS_FREE(target));\r
+ vdm_link *used = new_link_by_index(link->index, pfn);\r
+ if (NULL == used)\r
+ return NULL;\r
+\r
+ link->index += pfn;\r
+ link->pfn -= pfn;\r
+ link->region->index += pfn;\r
+ link->region->pfn -= pfn;\r
+ used->link_used = 1;\r
+ used->region->used = 1;\r
+\r
+ DLOG("used: index %d pfn %d ptr %x\n", used->index, used->pfn, used->region);\r
+ if (_insert_region_index(used->region)) {\r
+ printk(KERN_ALERT "fail to insert allocated region index %d pfn %d\n", used->index, used->pfn);\r
+ link_del(used);\r
+ link->index -= pfn;\r
+ link->pfn += pfn;\r
+ link->region->index -= pfn;\r
+ link->region->pfn += pfn;\r
+ _remove_free_region(used->region);\r
+ _remove_free_link(used);\r
+ return NULL;\r
+ }\r
+ put_used_link(used, session);\r
+ return used;\r
}\r
- return 0;\r
+}\r
+\r
+#define VPU_MEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))\r
+\r
+static int vpu_mem_release(struct inode *, struct file *);\r
+static int vpu_mem_mmap(struct file *, struct vm_area_struct *);\r
+static int vpu_mem_open(struct inode *, struct file *);\r
+static long vpu_mem_ioctl(struct file *, unsigned int, unsigned long);\r
+\r
+struct file_operations vpu_mem_fops = {\r
+ .open = vpu_mem_open,\r
+ .mmap = vpu_mem_mmap,\r
+ .unlocked_ioctl = vpu_mem_ioctl,\r
+ .release = vpu_mem_release,\r
+};\r
+\r
+int is_vpu_mem_file(struct file *file)\r
+{\r
+ if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode))\r
+ return 0;\r
+ if (unlikely(file->f_dentry->d_inode->i_rdev !=\r
+ MKDEV(MISC_MAJOR, vpu_mem.dev.minor)))\r
+ return 0;\r
+ return 1;\r
}\r
\r
static long vpu_mem_allocate(struct file *file, unsigned int len)\r
{\r
- /* caller should hold the write lock on vpu_mem_sem! */\r
- /* return the corresponding pdata[] entry */\r
- int curr = 0;\r
- int best_fit = -1;\r
+ vdm_link *free, *n;\r
unsigned int pfn = (len + VPU_MEM_MIN_ALLOC - 1)/VPU_MEM_MIN_ALLOC;\r
- struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
\r
if (!is_vpu_mem_file(file)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "allocate vpu_mem data from invalid file.\n");\r
-#endif\r
+ printk(KERN_INFO "allocate vpu_mem session from invalid file\n");\r
return -ENODEV;\r
}\r
\r
- /* look through the bitmap:\r
- * if you find a free slot of the correct order use it\r
- * otherwise, use the best fit (smallest with size > order) slot\r
- */\r
- while (curr < vpu_mem.num_entries) {\r
- if (VPU_MEM_IS_FREE(curr)) {\r
- if (VPU_MEM_PFN(curr) >= pfn) {\r
- /* set the not free bit and clear others */\r
- best_fit = curr;\r
-#if VPU_MEM_DEBUG\r
- printk("vpu_mem: find fit size at index %d\n", curr);\r
-#endif\r
- break;\r
- }\r
- }\r
-#if VPU_MEM_DEBUG\r
- //printk(KERN_INFO "vpu_mem: search curr %d\n!", curr);\r
-#endif\r
- curr = VPU_MEM_NEXT_INDEX(curr);\r
-#if VPU_MEM_DEBUG\r
- //printk(KERN_INFO "vpu_mem: search next %d\n!", curr);\r
-#endif\r
- }\r
-\r
- /* if best_fit < 0, there are no suitable slots,\r
- * return an error\r
- */\r
- if (best_fit < 0) {\r
-#if VPU_MEM_DEBUG\r
- printk("vpu_mem: no space left to allocate!\n");\r
-#endif\r
- return -1;\r
- }\r
-\r
- DLOG("best_fit: %d next: %u\n", best_fit, best_fit + pfn);\r
-\r
- down_write(&data->sem);\r
- {\r
- int ret = region_new(&data->region_list, best_fit, pfn);\r
- if (ret)\r
- best_fit = -1;\r
+ list_for_each_entry_safe(free, n, &vdm_free, status_link) {\r
+ /* find match free buffer use it first */\r
+ vdm_link *used = get_used_link_from_free_link(free, session, pfn);\r
+ DLOG("search free buffer at index %d pfn %d for len %d\n", free->index, free->pfn, pfn);\r
+ DLOG("found buffer at index %d pfn %d for ptr %x\n", used->index, used->pfn, used);\r
+ if (NULL == used)\r
+ continue;\r
+ else\r
+ return used->index;\r
}\r
- up_write(&data->sem);\r
\r
- DLOG("best_fit result: %d next: %u\n", best_fit, best_fit + pfn);\r
-\r
- return best_fit;\r
-}\r
-\r
-static int vpu_mem_free_by_region(struct vpu_mem_region_node *node)\r
-{\r
- int ret = 0;\r
- int index = node->region.index;\r
- int avail = VPU_MEM_AVAIL(index);\r
- int refc = VPU_MEM_REFC(index);\r
-\r
- WARN((NODE_REGION_REFC(node) <= 0),\r
- "vpu_mem: vpu_mem_free: non-positive ref count\n");\r
- WARN((!VPU_MEM_FIRST(index)),\r
- "vpu_mem: vpu_mem_free: index %d is not first\n", index);\r
- WARN((avail <= 0),\r
- "vpu_mem: vpu_mem_free: avail of index %d is non-positive\n", index);\r
- WARN((refc <= 0),\r
- "vpu_mem: vpu_mem_free: refc of index %d is non-positive\n", index);\r
-\r
- NODE_REGION_REFC(node) -= 1;\r
- region_set_avail(index, avail - 1);\r
- region_set_ref_count(index, refc - 1);\r
- if (0 == NODE_REGION_REFC(node))\r
- {\r
- avail = VPU_MEM_AVAIL(index);\r
- if (0 == avail)\r
- {\r
- refc = VPU_MEM_REFC(index);\r
- WARN((0 != refc),\r
- "vpu_mem: vpu_mem_free: refc of index %d after free is non-zero\n", index);\r
- ret = region_merge(&node->list);\r
- }\r
- list_del(&node->list);\r
- kfree(node);\r
+ if (!vpu_mem_over) {\r
+ printk(KERN_INFO "vpu_mem: no space left to allocate!\n");\r
+ dump_status();\r
+ vpu_mem_over = 1;\r
}\r
- return ret;\r
+ return -1;\r
}\r
\r
static int vpu_mem_free(struct file *file, int index)\r
{\r
- /* caller should hold the write lock on vpu_mem_sem! */\r
- struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
\r
if (!is_vpu_mem_file(file)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "free vpu_mem data from invalid file.\n");\r
-#endif\r
+ printk(KERN_INFO "free vpu_mem session from invalid file.\n");\r
return -ENODEV;\r
}\r
\r
- DLOG("search for index %d\n", index);\r
-\r
- down_write(&data->sem);\r
+ DLOG("searching for index %d\n", index);\r
{\r
- struct list_head *list, *tmp;\r
- list_for_each_safe(list, tmp, &data->region_list) {\r
- struct vpu_mem_region_node *node = list_entry(list, struct vpu_mem_region_node, list);\r
- if (index == NODE_REGION_INDEX(node)) {\r
- int ret = vpu_mem_free_by_region(node);\r
- up_write(&data->sem);\r
- return ret;\r
+ vdm_link *link = find_used_link(session, index);\r
+ if (NULL == link) {\r
+ DLOG("no link of index %d searched\n", index);\r
+ return -1;\r
+ }\r
+ link->link_used--;\r
+ link->region->used--;\r
+ if (0 == link->link_used) {\r
+ if (is_free_region(link->region)) {\r
+ put_free_link(link);\r
+ merge_free_region_and_link(link->region);\r
+ } else {\r
+ link_del(link);\r
}\r
}\r
}\r
- up_write(&data->sem);\r
-\r
- DLOG("no region of index %d searched\n", index);\r
-\r
- return -1;\r
+ return 0;\r
}\r
\r
static int vpu_mem_duplicate(struct file *file, int index)\r
{\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
/* caller should hold the write lock on vpu_mem_sem! */\r
if (!is_vpu_mem_file(file)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "duplicate vpu_mem data from invalid file.\n");\r
-#endif\r
+ printk(KERN_INFO "duplicate vpu_mem session from invalid file.\n");\r
return -ENODEV;\r
}\r
\r
DLOG("duplicate index %d\n", index);\r
-\r
- if (region_check(index)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
-#endif\r
- return -EINVAL;\r
+ {\r
+ vdm_link *post = find_post_link(index);\r
+ if (NULL == post) {\r
+ vdm_link *used = find_used_link(session, index);\r
+ if (NULL == used) {\r
+ printk(KERN_ERR "try to duplicate unknown index %d\n", index);\r
+ dump_status();\r
+ return -1;\r
+ }\r
+ post = new_link_by_region(used->region);\r
+ post->link_post = 1;\r
+ post->region->post++;\r
+ put_post_link(post, session);\r
+ } else {\r
+ DLOG("duplicate posted index %d\n", index);\r
+ post->link_post++;\r
+ post->region->post++;\r
+ }\r
}\r
\r
- region_set_avail(index, VPU_MEM_AVAIL(index) + 1);\r
-\r
return 0;\r
}\r
\r
static int vpu_mem_link(struct file *file, int index)\r
{\r
- int err;\r
- struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
\r
if (!is_vpu_mem_file(file)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "link vpu_mem data from invalid file.\n");\r
-#endif\r
+ printk(KERN_INFO "link vpu_mem session from invalid file.\n");\r
return -ENODEV;\r
}\r
\r
- if (region_check(index)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
-#endif\r
- return -EINVAL;\r
- }\r
-\r
- // check target index region first\r
- if (VPU_MEM_IS_FREE(index)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "try to link free region %d!", index);\r
-#endif\r
- return -1;\r
+ DLOG("link index %d\n", index);\r
+ {\r
+ vdm_link *post = find_post_link(index);\r
+ if (NULL == post) {\r
+ printk(KERN_ERR "try to link unknown index %d\n", index);\r
+ dump_status();\r
+ return -1;\r
+ } else {\r
+ vdm_link *used = find_used_link(session, index);\r
+ post->link_post--;\r
+ post->region->post--;\r
+ if (0 == post->link_post) {\r
+ if (NULL == used) {\r
+ post->link_used++;\r
+ post->region->used++;\r
+ put_used_link(post, session);\r
+ } else {\r
+ used->link_used++;\r
+ used->region->used++;\r
+ link_del(post);\r
+ }\r
+ } else {\r
+ if (NULL == used) {\r
+ used = new_link_by_region(post->region);\r
+ used->link_used++;\r
+ used->region->used++;\r
+ put_used_link(used, session);\r
+ } else {\r
+ used->link_used++;\r
+ used->region->used++;\r
+ }\r
+ }\r
+ }\r
}\r
\r
- /* caller should hold the write lock on vpu_mem_sem! */\r
- down_write(&data->sem);\r
- err = region_link(&data->region_list, index);\r
- up_write(&data->sem);\r
- DLOG("link index %d ret %d\n", index, err);\r
-\r
- return err;\r
+ return 0;\r
}\r
\r
void vpu_mem_cache_opt(struct file *file, long index, unsigned int cmd)\r
{\r
- struct vpu_mem_data *data;\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
void *start, *end;\r
\r
if (!is_vpu_mem_file(file)) {\r
return;\r
}\r
\r
- data = (struct vpu_mem_data *)file->private_data;\r
if (!vpu_mem.cached || file->f_flags & O_SYNC)\r
return;\r
\r
- down_read(&data->sem);\r
- start = VPU_MEM_START_VADDR(index);\r
- end = VPU_MEM_END_VADDR(index);\r
- switch (cmd) {\r
- case VPU_MEM_CACHE_FLUSH : {\r
- dmac_flush_range(start, end);\r
- break;\r
- }\r
- case VPU_MEM_CACHE_CLEAN : {\r
- dmac_clean_range(start, end);\r
- break;\r
- }\r
- case VPU_MEM_CACHE_INVALID : {\r
- dmac_inv_range(start, end);\r
- break;\r
- }\r
- default :\r
- break;\r
+ down_read(&vdm_rwsem);\r
+ {\r
+ vdm_link *link = find_used_link(session, index);\r
+ start = vpu_mem.vbase + index * VPU_MEM_MIN_ALLOC;\r
+ end = start + link->pfn * VPU_MEM_MIN_ALLOC;;\r
+ switch (cmd) {\r
+ case VPU_MEM_CACHE_FLUSH : {\r
+ dmac_flush_range(start, end);\r
+ break;\r
+ }\r
+ case VPU_MEM_CACHE_CLEAN : {\r
+ dmac_clean_range(start, end);\r
+ break;\r
+ }\r
+ case VPU_MEM_CACHE_INVALID : {\r
+ dmac_inv_range(start, end);\r
+ break;\r
+ }\r
+ default :\r
+ break;\r
+ }\r
}\r
- up_read(&data->sem);\r
+ up_read(&vdm_rwsem);\r
}\r
\r
static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot)\r
\r
static int vpu_mem_open(struct inode *inode, struct file *file)\r
{\r
- struct vpu_mem_data *data;\r
- int ret = 0;\r
-\r
- DLOG("current %u file %p(%d)\n", current->pid, file, (int)file_count(file));\r
- /* setup file->private_data to indicate its unmapped */\r
- /* you can only open a vpu_mem device one time */\r
- if (file->private_data != NULL)\r
- return -1;\r
- data = kmalloc(sizeof(struct vpu_mem_data), GFP_KERNEL);\r
- if (!data) {\r
-#if VPU_MEM_DEBUG\r
- printk("vpu_mem: unable to allocate memory for vpu_mem metadata.");\r
-#endif\r
- return -1;\r
- }\r
- data->pid = 0;\r
-\r
- INIT_LIST_HEAD(&data->region_list);\r
- init_rwsem(&data->sem);\r
-\r
- file->private_data = data;\r
- INIT_LIST_HEAD(&data->list);\r
-\r
- down(&vpu_mem.data_list_sem);\r
- list_add(&data->list, &vpu_mem.data_list);\r
- up(&vpu_mem.data_list_sem);\r
- return ret;\r
+ vdm_session *session;\r
+ int ret = 0;\r
+ \r
+ DLOG("current %u file %p(%d)\n", current->pid, file, (int)file_count(file));\r
+ /* setup file->private_data to indicate its unmapped */\r
+ /* you can only open a vpu_mem device one time */\r
+ if (file->private_data != NULL)\r
+ return -1;\r
+ session = kmalloc(sizeof(vdm_session), GFP_KERNEL);\r
+ if (!session) {\r
+ printk(KERN_ALERT "vpu_mem: unable to allocate memory for vpu_mem metadata.");\r
+ return -1;\r
+ }\r
+ session->pid = current->pid;\r
+ INIT_LIST_HEAD(&session->list_post);\r
+ INIT_LIST_HEAD(&session->list_used);\r
+ \r
+ file->private_data = session;\r
+ \r
+ down_write(&vdm_rwsem);\r
+ list_add_tail(&session->list_session, &vdm_proc);\r
+ up_write(&vdm_rwsem);\r
+ return ret;\r
}\r
\r
static int vpu_mem_mmap(struct file *file, struct vm_area_struct *vma)\r
{\r
- struct vpu_mem_data *data;\r
+ vdm_session *session;\r
unsigned long vma_size = vma->vm_end - vma->vm_start;\r
int ret = 0;\r
\r
if (vma->vm_pgoff || !VPU_MEM_IS_PAGE_ALIGNED(vma_size)) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_ERR "vpu_mem: mmaps must be at offset zero, aligned"\r
+ printk(KERN_ALERT "vpu_mem: mmaps must be at offset zero, aligned"\r
" and a multiple of pages_size.\n");\r
-#endif\r
return -EINVAL;\r
}\r
\r
- data = (struct vpu_mem_data *)file->private_data;\r
-\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_ALERT "vpu_mem: file->private_data : 0x%x\n", (unsigned int)data);\r
-#endif\r
-\r
- down_write(&data->sem);\r
+ session = (vdm_session *)file->private_data;\r
\r
/* assert: vma_size must be the total size of the vpu_mem */\r
if (vpu_mem.size != vma_size) {\r
-#if VPU_MEM_DEBUG\r
printk(KERN_WARNING "vpu_mem: mmap size [%lu] does not match"\r
"size of backing region [%lu].\n", vma_size, vpu_mem.size);\r
-#endif\r
ret = -EINVAL;\r
goto error;\r
}\r
goto error;\r
}\r
\r
- data->pid = current->pid;\r
+ session->pid = current->pid;\r
\r
error:\r
- up_write(&data->sem);\r
return ret;\r
}\r
\r
static int vpu_mem_release(struct inode *inode, struct file *file)\r
{\r
- struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
- struct list_head *elt, *elt2;\r
+ vdm_session *session = (vdm_session *)file->private_data;\r
\r
- down(&vpu_mem.data_list_sem);\r
- list_del(&data->list);\r
- up(&vpu_mem.data_list_sem);\r
-\r
- // TODO: ×îºóÒ»¸öÎļþ release µÄʱºò\r
- down_write(&data->sem);\r
- file->private_data = NULL;\r
- list_for_each_safe(elt, elt2, &data->region_list) {\r
- struct vpu_mem_region_node *node = list_entry(elt, struct vpu_mem_region_node, list);\r
- if (vpu_mem_free_by_region(node))\r
- printk(KERN_INFO "vpu_mem: err on vpu_mem_free_by_region when vpu_mem_release\n");\r
- }\r
- BUG_ON(!list_empty(&data->region_list));\r
- up_write(&data->sem);\r
- kfree(data);\r
+ down_write(&vdm_rwsem);\r
+ {\r
+ vdm_link *link, *tmp_link;\r
+ //unsigned long flags = current->flags;\r
+ //printk("current->flags: %lx\n", flags);\r
+ list_del(&session->list_session);\r
+ file->private_data = NULL;\r
+\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {\r
+ do {\r
+ link->link_post--;\r
+ link->region->post--;\r
+ } while (link->link_post);\r
+ if (find_free_link(link->index)) {\r
+ link_del(link);\r
+ } else {\r
+ put_free_link(link);\r
+ }\r
+ if (is_free_region(link->region)) {\r
+ merge_free_region_and_link(link->region);\r
+ }\r
+ }\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {\r
+ do {\r
+ link->link_used--;\r
+ link->region->used--;\r
+ } while (link->link_used);\r
+ if (find_free_link(link->index)) {\r
+ link_del(link);\r
+ } else {\r
+ put_free_link(link);\r
+ }\r
+ if (is_free_region(link->region)) {\r
+ merge_free_region_and_link(link->region);\r
+ }\r
+ }\r
+ }\r
+ up_write(&vdm_rwsem);\r
+ kfree(session);\r
\r
- return 0;\r
+ return 0;\r
}\r
\r
static long vpu_mem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)\r
{\r
long index, ret = 0;\r
-\r
+ \r
switch (cmd) {\r
case VPU_MEM_GET_PHYS:\r
DLOG("get_phys\n");\r
unsigned int size;\r
if (copy_from_user(&size, (void __user *)arg, sizeof(size)))\r
return -EFAULT;\r
- down_write(&vpu_mem.bitmap_sem);\r
- index = vpu_mem_allocate(file, size);\r
- up_write(&vpu_mem.bitmap_sem);\r
- DLOG("allocate at index %ld\n", index);\r
- return index;\r
+ down_write(&vdm_rwsem);\r
+ ret = vpu_mem_allocate(file, size);\r
+ up_write(&vdm_rwsem);\r
+ DLOG("allocate at index %ld\n", ret);\r
break;\r
}\r
case VPU_MEM_FREE:\r
+ DLOG("mem free\n");\r
{\r
- DLOG("mem free\n");\r
if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
return -EFAULT;\r
if (index >= vpu_mem.size)\r
return -EACCES;\r
- down_write(&vpu_mem.bitmap_sem);\r
+ down_write(&vdm_rwsem);\r
ret = vpu_mem_free(file, index);\r
- up_write(&vpu_mem.bitmap_sem);\r
- return ret;\r
+ up_write(&vdm_rwsem);\r
break;\r
}\r
case VPU_MEM_CACHE_FLUSH:\r
case VPU_MEM_CACHE_CLEAN:\r
case VPU_MEM_CACHE_INVALID:\r
+ DLOG("flush\n");\r
{\r
- DLOG("flush\n");\r
if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
return -EFAULT;\r
-\r
- down_write(&vpu_mem.bitmap_sem);\r
vpu_mem_cache_opt(file, index, cmd);\r
- up_write(&vpu_mem.bitmap_sem);\r
break;\r
}\r
case VPU_MEM_DUPLICATE:\r
+ DLOG("duplicate\n");\r
{\r
- DLOG("duplicate\n");\r
if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
return -EFAULT;\r
- down_write(&vpu_mem.bitmap_sem);\r
+ down_write(&vdm_rwsem);\r
ret = vpu_mem_duplicate(file, index);\r
- up_write(&vpu_mem.bitmap_sem);\r
- return ret;\r
+ up_write(&vdm_rwsem);\r
break;\r
}\r
case VPU_MEM_LINK:\r
+ DLOG("link\n");\r
{\r
- DLOG("link\n");\r
if (copy_from_user(&index, (void __user *)arg, sizeof(index)))\r
return -EFAULT;\r
- down_write(&vpu_mem.bitmap_sem);\r
+ down_write(&vdm_rwsem);\r
ret = vpu_mem_link(file, index);\r
- up_write(&vpu_mem.bitmap_sem);\r
+ up_write(&vdm_rwsem);\r
break;\r
}\r
default:\r
static ssize_t debug_read(struct file *file, char __user *buf, size_t count,\r
loff_t *ppos)\r
{\r
+#if 0\r
struct list_head *elt, *elt2;\r
- struct vpu_mem_data *data;\r
- struct vpu_mem_region_node *region_node;\r
+ struct vpu_mem_info *data;\r
const int debug_bufmax = 4096;\r
static char buffer[4096];\r
int n = 0;\r
DLOG("debug open\n");\r
n = scnprintf(buffer, debug_bufmax,\r
"pid #: mapped regions (offset, len) (offset,len)...\n");\r
-\r
- down(&vpu_mem.data_list_sem);\r
+ down_read(&vdm_rwsem);\r
list_for_each(elt, &vpu_mem.data_list) {\r
- data = list_entry(elt, struct vpu_mem_data, list);\r
+ data = list_entry(elt, struct vpu_mem_info, list);\r
down_read(&data->sem);\r
n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:",\r
data->pid);\r
n += scnprintf(buffer + n, debug_bufmax - n, "\n");\r
up_read(&data->sem);\r
}\r
- up(&vpu_mem.data_list_sem);\r
\r
+ up_read(&vdm_rwsem);\r
n++;\r
buffer[n] = 0;\r
return simple_read_from_buffer(buf, count, ppos, buffer, n);\r
+#else\r
+ return 0;\r
+#endif\r
}\r
\r
static struct file_operations debug_fops = {\r
\r
int vpu_mem_setup(struct vpu_mem_platform_data *pdata)\r
{\r
+ vdm_link *tmp = NULL;\r
int err = 0;\r
\r
- if (vpu_mem_count)\r
- {\r
+ if (vpu_mem_count) {\r
printk(KERN_ALERT "Only one vpu_mem driver can be register!\n");\r
goto err_cant_register_device;\r
}\r
\r
memset(&vpu_mem, 0, sizeof(struct vpu_mem_info));\r
\r
- vpu_mem.cached = pdata->cached;\r
- vpu_mem.buffered = pdata->buffered;\r
- vpu_mem.base = pdata->start;\r
- vpu_mem.size = pdata->size;\r
- init_rwsem(&vpu_mem.bitmap_sem);\r
- init_MUTEX(&vpu_mem.data_list_sem);\r
- INIT_LIST_HEAD(&vpu_mem.data_list);\r
- vpu_mem.dev.name = pdata->name;\r
- vpu_mem.dev.minor = MISC_DYNAMIC_MINOR;\r
- vpu_mem.dev.fops = &vpu_mem_fops;\r
-\r
- err = misc_register(&vpu_mem.dev);\r
- if (err) {\r
- printk(KERN_ALERT "Unable to register vpu_mem driver!\n");\r
- goto err_cant_register_device;\r
- }\r
- vpu_mem_count++;\r
-\r
- vpu_mem.num_entries = vpu_mem.size / VPU_MEM_MIN_ALLOC;\r
- vpu_mem.bitmap = kzalloc(vpu_mem.num_entries *\r
- sizeof(struct vpu_mem_bits), GFP_KERNEL);\r
- if (!vpu_mem.bitmap)\r
- goto err_no_mem_for_metadata;\r
-\r
- region_set(0, vpu_mem.num_entries);\r
-\r
- if (vpu_mem.cached)\r
- vpu_mem.vbase = ioremap_cached(vpu_mem.base,\r
- vpu_mem.size);\r
-#ifdef ioremap_ext_buffered\r
- else if (vpu_mem.buffered)\r
- vpu_mem.vbase = ioremap_ext_buffered(vpu_mem.base,\r
- vpu_mem.size);\r
-#endif\r
- else\r
- vpu_mem.vbase = ioremap(vpu_mem.base, vpu_mem.size);\r
-\r
- if (vpu_mem.vbase == 0)\r
- goto error_cant_remap;\r
+ vpu_mem.cached = pdata->cached;\r
+ vpu_mem.buffered = pdata->buffered;\r
+ vpu_mem.base = pdata->start;\r
+ vpu_mem.size = pdata->size;\r
+ init_rwsem(&vdm_rwsem);\r
+ INIT_LIST_HEAD(&vdm_proc);\r
+ INIT_LIST_HEAD(&vdm_used);\r
+ INIT_LIST_HEAD(&vdm_post);\r
+ INIT_LIST_HEAD(&vdm_free);\r
+ INIT_LIST_HEAD(&vdm_index);\r
+ vpu_mem.dev.name = pdata->name;\r
+ vpu_mem.dev.minor = MISC_DYNAMIC_MINOR;\r
+ vpu_mem.dev.fops = &vpu_mem_fops;\r
+ \r
+ err = misc_register(&vpu_mem.dev);\r
+ if (err) {\r
+ printk(KERN_ALERT "Unable to register vpu_mem driver!\n");\r
+ goto err_cant_register_device;\r
+ }\r
+ \r
+ vpu_mem.num_entries = vpu_mem.size / VPU_MEM_MIN_ALLOC;\r
\r
-#if VPU_MEM_DEBUG\r
- debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)vpu_mem.dev.minor,\r
- &debug_fops);\r
-#endif\r
- printk("%s: %d initialized\n", pdata->name, vpu_mem.dev.minor);\r
+ tmp = new_link_by_index(0, vpu_mem.num_entries);\r
+ if (NULL == tmp) {\r
+ printk(KERN_ALERT "init free region failed\n");\r
+ goto err_no_mem_for_metadata;\r
+ }\r
+ put_free_link(tmp);\r
+ _insert_region_index(tmp->region);\r
+\r
+ if (vpu_mem.cached)\r
+ vpu_mem.vbase = ioremap_cached(vpu_mem.base, vpu_mem.size);\r
+ #ifdef ioremap_ext_buffered\r
+ else if (vpu_mem.buffered)\r
+ vpu_mem.vbase = ioremap_ext_buffered(vpu_mem.base, vpu_mem.size);\r
+ #endif\r
+ else\r
+ vpu_mem.vbase = ioremap(vpu_mem.base, vpu_mem.size);\r
+ \r
+ if (vpu_mem.vbase == 0)\r
+ goto error_cant_remap;\r
+ \r
+ #if VPU_MEM_DEBUG\r
+ debugfs_create_file(pdata->name, S_IFREG | S_IRUGO, NULL, (void *)vpu_mem.dev.minor,\r
+ &debug_fops);\r
+ #endif\r
+ printk("%s: %d initialized\n", pdata->name, vpu_mem.dev.minor);\r
+ vpu_mem_count++;\r
return 0;\r
error_cant_remap:\r
- kfree(vpu_mem.bitmap);\r
+ if (tmp) {\r
+ kfree(tmp);\r
+ }\r
err_no_mem_for_metadata:\r
misc_deregister(&vpu_mem.dev);\r
err_cant_register_device:\r
return -1;\r
}\r
if (vpu_mem_count) {\r
- if (vpu_mem.bitmap) {\r
- kfree(vpu_mem.bitmap);\r
- vpu_mem.bitmap = NULL;\r
- }\r
misc_deregister(&vpu_mem.dev);\r
vpu_mem_count--;\r
} else {\r
\r
static int proc_vpu_mem_show(struct seq_file *s, void *v)\r
{\r
- unsigned int i;\r
-\r
- if (vpu_mem.bitmap) {\r
+ if (vpu_mem_count) {\r
seq_printf(s, "vpu mem opened\n");\r
} else {\r
seq_printf(s, "vpu mem closed\n");\r
return 0;\r
}\r
\r
- down_read(&vpu_mem.bitmap_sem);\r
+ down_read(&vdm_rwsem);\r
{\r
- // ´òÓ¡ bitmap ÖеÄÈ«²¿ region\r
- for (i = 0; i < vpu_mem.num_entries; i = VPU_MEM_NEXT_INDEX(i)) {\r
- region_check(i);\r
- seq_printf(s, "vpu_mem: idx %6d pfn %6d refc %3d avail %3d\n",\r
- i, VPU_MEM_PFN(i), VPU_MEM_REFC(i), VPU_MEM_AVAIL(i));\r
+ vdm_link *link, *tmp_link;\r
+ vdm_region *region, *tmp_region;\r
+ vdm_session *session, *tmp_session;\r
+ // °´ index ´òÓ¡È«²¿ region\r
+ seq_printf(s, "index:\n");\r
+ list_for_each_entry_safe(region, tmp_region, &vdm_index, index_list) {\r
+ seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",\r
+ region->index, region->pfn, region->used, region->post);\r
}\r
-\r
- // ´òÓ¡ vpu_mem_data ÖеÄÈ«²¿ region\r
- down(&vpu_mem.data_list_sem);\r
- { // search exists index\r
- struct list_head *list, *tmp_list;\r
- list_for_each_safe(list, tmp_list, &vpu_mem.data_list) {\r
- struct list_head *region, *tmp_data;\r
- struct vpu_mem_data *data = list_entry(list, struct vpu_mem_data, list);\r
-\r
- seq_printf(s, "pid: %d\n", data->pid);\r
-\r
- down_read(&data->sem);\r
- list_for_each_safe(region, tmp_data, &data->region_list) {\r
- struct vpu_mem_region_node *node = list_entry(region, struct vpu_mem_region_node, list);\r
- i = node->region.index;\r
- seq_printf(s, " region: idx %6d pfn %6d refc %3d avail %3d ref by %d\n",\r
- i, VPU_MEM_PFN(i), VPU_MEM_REFC(i), VPU_MEM_AVAIL(i), node->region.ref_count);\r
+ if (list_empty(&vdm_free)) {\r
+ seq_printf(s, "free : empty\n");\r
+ } else {\r
+ seq_printf(s, "free :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_free, status_link) {\r
+ seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+ }\r
+ if (list_empty(&vdm_used)) {\r
+ seq_printf(s, "used : empty\n");\r
+ } else {\r
+ seq_printf(s, "used :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_used, status_link) {\r
+ seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+ }\r
+ if (list_empty(&vdm_post)) {\r
+ seq_printf(s, "post : empty\n");\r
+ } else {\r
+ seq_printf(s, "post :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &vdm_post, status_link) {\r
+ seq_printf(s, " idx %6d pfn %6d used %3d post %3d\n",\r
+ link->index, link->pfn, link->link_used, link->link_post);\r
+ }\r
+ }\r
+ \r
+ // ´òÓ¡ vpu_mem_info ÖеÄÈ«²¿ session µÄ region Õ¼ÓÃÇé¿ö\r
+ list_for_each_entry_safe(session, tmp_session, &vdm_proc, list_session) {\r
+ seq_printf(s, "\npid: %d\n", session->pid);\r
+ if (list_empty(&session->list_used)) {\r
+ seq_printf(s, "used : empty\n");\r
+ } else {\r
+ seq_printf(s, "used :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_used, session_link) {\r
+ seq_printf(s, " idx %6d pfn %6d used %3d\n",\r
+ link->index, link->pfn, link->link_used);\r
+ }\r
+ }\r
+ if (list_empty(&session->list_post)) {\r
+ seq_printf(s, "post : empty\n");\r
+ } else {\r
+ seq_printf(s, "post :\n");\r
+ list_for_each_entry_safe(link, tmp_link, &session->list_post, session_link) {\r
+ seq_printf(s, " idx %6d pfn %6d post %3d\n",\r
+ link->index, link->pfn, link->link_post);\r
}\r
- up_read(&data->sem);\r
}\r
}\r
- up(&vpu_mem.data_list_sem);\r
}\r
- up_read(&vpu_mem.bitmap_sem);\r
- return 0;\r
+\r
+ up_read(&vdm_rwsem);\r
+ return 0;\r
}\r
\r
static int proc_vpu_mem_open(struct inode *inode, struct file *file)\r