\r
#define VPU_MEM_DEBUG 1\r
\r
-#define VPU_MEM_BITMAP_ERR (-1)\r
-#define VPU_MEM_ERR_FREE_REFN_ERR (-5)\r
+#define VPU_MEM_SPLIT_ALLOC 0\r
+#define VPU_MEM_SPLIT_LINK 1\r
\r
struct vpu_mem_data {\r
/* protects this data field, if the mm_mmap sem will be held at the\r
};\r
\r
struct vpu_mem_bits {\r
- unsigned short pfn; /* page frame number - vpu_mem space max 256M */\r
- signed refrn:7; /* reference number */\r
- unsigned first:1; /* 1 if first, 0 if not first */\r
- signed avail:7; /* available link number */\r
- unsigned allocated:1; /* 1 if allocated, 0 if free */\r
+ int pfn:16; /* page frame number - vpu_mem space max 256M */\r
+ int refrc:7; /* reference number */\r
+ int first:1; /* 1 if first, 0 if not first */\r
+ int avail:7; /* available link number */\r
+ int last:1; /* 1 if last, 0 if no last */\r
};\r
\r
struct vpu_mem_region {\r
- unsigned long offset;\r
- unsigned long len;\r
+ int index;\r
+ int ref_count;\r
};\r
\r
struct vpu_mem_region_node {\r
struct list_head list;\r
};\r
\r
+#define NODE_REGION_INDEX(p) (p->region.index)\r
+#define NODE_REGION_REFC(p) (p->region.ref_count)\r
+\r
#define VPU_MEM_DEBUG_MSGS 0\r
#if VPU_MEM_DEBUG_MSGS\r
#define DLOG(fmt,args...) \\r
static struct vpu_mem_info vpu_mem;\r
static int vpu_mem_count;\r
\r
-#define VPU_MEM_IS_FREE(index) !(vpu_mem.bitmap[index].allocated)\r
-#define VPU_MEM_IS_FIRST(index) (vpu_mem.bitmap[index].first)\r
-#define VPU_MEM_BIT(index) &vpu_mem.bitmap[index]\r
-#define VPU_MEM_PFN(index) vpu_mem.bitmap[index].pfn\r
+#define VPU_MEM_IS_FREE(index) !(vpu_mem.bitmap[index].avail)\r
+#define VPU_MEM_FIRST(index) (vpu_mem.bitmap[index].first)\r
+#define VPU_MEM_LAST(index) (vpu_mem.bitmap[index].last)\r
+#define VPU_MEM_REFC(index) (vpu_mem.bitmap[index].refrc)\r
+#define VPU_MEM_AVAIL(index) (vpu_mem.bitmap[index].avail)\r
+#define VPU_MEM_BIT(index) (&vpu_mem.bitmap[index])\r
+#define VPU_MEM_PFN(index) (vpu_mem.bitmap[index].pfn)\r
+#define VPU_MEM_LAST_INDEX(index) (index - VPU_MEM_PFN(index - 1))\r
#define VPU_MEM_NEXT_INDEX(index) (index + VPU_MEM_PFN(index))\r
+#define VPU_MEM_END_INDEX(index) (VPU_MEM_NEXT_INDEX(index) - 1)\r
#define VPU_MEM_OFFSET(index) (index * VPU_MEM_MIN_ALLOC)\r
#define VPU_MEM_START_ADDR(index) (VPU_MEM_OFFSET(index) + vpu_mem.base)\r
#define VPU_MEM_SIZE(index) ((VPU_MEM_PFN(index)) * VPU_MEM_MIN_ALLOC)\r
return 1;\r
}\r
\r
-\r
-static void vpu_mem_get_region(struct vpu_mem_data *data,\r
- struct vpu_mem_region_node *region_node,\r
- int index, int pfn)\r
+static void region_set(int index, int pfn)\r
{\r
- int curr, next = index + pfn;\r
- struct vpu_mem_bits *pbits;\r
+ WARN(pfn <= 0, "vpu_mem: region_set non-positive pfn\n");\r
+ if (pfn > 0) {\r
+ int first = index;\r
+ int last = index + pfn - 1;\r
\r
- if (VPU_MEM_IS_FREE(next)) {\r
- pbits = VPU_MEM_BIT(next);\r
- pbits->first = 1;\r
- pbits->pfn = VPU_MEM_PFN(index) - pfn;\r
- } else {\r
- if (!VPU_MEM_IS_FIRST(next))\r
- DLOG("something wrong when get_region pfn %d at index %d\n", pfn, index);\r
+ DLOG("region_set: first %d, last %d, size %d\n", first, last, pfn);\r
+\r
+ VPU_MEM_FIRST(first) = 1;\r
+ VPU_MEM_PFN(first) = pfn;\r
+\r
+ VPU_MEM_LAST(last) = 1;\r
+ VPU_MEM_PFN(last) = pfn;\r
}\r
+}\r
\r
- pbits = VPU_MEM_BIT(index);\r
+static void region_unset(int index, int pfn)\r
+{\r
+ WARN(pfn <= 0, "vpu_mem: region_unset non-positive pfn\n");\r
+ if (pfn > 0) {\r
+ int first = index;\r
+ int last = index + pfn - 1;\r
\r
- pbits->first = 1;\r
- pbits->pfn = pfn;\r
- pbits->refrn++;\r
- pbits->avail++;\r
+ DLOG("region_unset: first %d, last %d, size %d\n", first, last, pfn);\r
\r
- for (curr = 0; curr < pfn; curr++)\r
- pbits[curr].allocated = 1;\r
+ VPU_MEM_FIRST(first) = 0;\r
+ VPU_MEM_LAST(first) = 0;\r
+ VPU_MEM_PFN(first) = 0;\r
\r
- region_node->region.offset = index;\r
- region_node->region.len = pfn;\r
+ VPU_MEM_FIRST(last) = 0;\r
+ VPU_MEM_LAST(last) = 0;\r
+ VPU_MEM_PFN(last) = 0;\r
+ }\r
+}\r
\r
- down_write(&data->sem);\r
- list_add(®ion_node->list, &data->region_list);\r
- up_write(&data->sem);\r
+static void region_set_ref_count(int index, int ref_count)\r
+{\r
+ DLOG("region_set_ref_count: index %d, ref_count %d\n", index, ref_count);\r
\r
- return ;\r
+ VPU_MEM_REFC(index) = ref_count;\r
+ VPU_MEM_REFC(VPU_MEM_END_INDEX(index)) = ref_count;\r
}\r
\r
-static int vpu_mem_put_region_by_index(struct vpu_mem_data *data, int index)\r
+static void region_set_avail(int index, int avail)\r
{\r
- struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
- pbits->refrn--;\r
- pbits->avail--;\r
+ DLOG("region_set_avail: index %d, avail %d\n", index, avail);\r
\r
- if (!pbits->avail)\r
- {\r
- int i;\r
- for (i = 0; i < pbits->pfn; i++)\r
- pbits[i].allocated = 0;\r
+ VPU_MEM_AVAIL(index) = avail;\r
+ VPU_MEM_AVAIL(VPU_MEM_END_INDEX(index)) = avail;\r
+}\r
\r
- down_write(&data->sem);\r
- {\r
- struct vpu_mem_region_node *region_node;\r
- struct list_head *elt, *elt2;\r
- list_for_each_safe(elt, elt2, &data->region_list) {\r
- region_node = list_entry(elt, struct vpu_mem_region_node, list);\r
- if (region_node->region.offset == index)\r
- {\r
- if (pbits->pfn != region_node->region.len)\r
- DLOG("something wrong when put_region at index %d\n", index);\r
- list_del(elt);\r
- kfree(region_node);\r
- break;\r
- }\r
- }\r
- }\r
- up_write(&data->sem);\r
- }\r
+static int index_avail(int index)\r
+{\r
+ return ((0 <= index) && (index < vpu_mem.num_entries));\r
+}\r
+\r
+static int region_check(int index)\r
+{\r
+ int end = VPU_MEM_END_INDEX(index);\r
+\r
+ DLOG("region_check: index %d val 0x%.8x, end %d val 0x%.8x\n",\r
+ index, *((unsigned int *)VPU_MEM_BIT(index)),\r
+ end, *((unsigned int *)VPU_MEM_BIT(end)));\r
+\r
+ WARN(index < 0,\r
+ "vpu_mem: region_check fail: negative first %d\n", index);\r
+ WARN(index >= vpu_mem.num_entries,\r
+ "vpu_mem: region_check fail: too large first %d\n", index);\r
+ WARN(end <= 0,\r
+ "vpu_mem: region_check fail: negative end %d\n", end);\r
+ WARN(end > vpu_mem.num_entries,\r
+ "vpu_mem: region_check fail: too large end %d\n", end);\r
+ WARN(!VPU_MEM_FIRST(index),\r
+ "vpu_mem: region_check fail: index %d is not first\n", index);\r
+ WARN(!VPU_MEM_LAST(end),\r
+ "vpu_mem: region_check fail: index %d is not end\n", end);\r
+ WARN((VPU_MEM_PFN(index) != VPU_MEM_PFN(end)),\r
+ "vpu_mem: region_check fail: first %d and end %d pfn is not equal\n", index, end);\r
+ WARN(VPU_MEM_REFC(index) != VPU_MEM_REFC(end),\r
+ "vpu_mem: region_check fail: first %d and end %d ref count is not equal\n", index, end);\r
+ WARN(VPU_MEM_AVAIL(index) != VPU_MEM_AVAIL(end),\r
+ "vpu_mem: region_check fail: first %d and end %d avail count is not equal\n", index, end);\r
return 0;\r
}\r
\r
-static int vpu_mem_put_region_by_region(struct vpu_mem_region_node *region_node)\r
+/*\r
+ * split allocated block from free block\r
+ * the bitmap_sem and region_list_sem must be hold together\r
+ * the pnode is a ouput region node\r
+ */\r
+static int region_split(struct list_head *region_list, struct vpu_mem_region_node *node, int index, int pfn)\r
{\r
- int index = region_node->region.offset;\r
- struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
- pbits->refrn--;\r
- pbits->avail--;\r
+ int pfn_free = VPU_MEM_PFN(index);\r
+ // check pfn is smaller then target index region\r
+ if ((pfn > pfn_free) || (pfn <= 0)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "unable to split region %d of size %d, while is smaller than %d!", index, pfn_free, pfn);\r
+#endif\r
+ return -1;\r
+ }\r
+ // check region data coherence\r
+ if (region_check(index)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
+#endif\r
+ return -EINVAL;\r
+ }\r
\r
- if (!pbits->avail)\r
- {\r
- int i;\r
- for (i = 0; i < pbits->pfn; i++)\r
- pbits[i].allocated = 0;\r
+ if (NULL == node) {\r
+ struct list_head *last;\r
+ // check target index region first\r
+ if (!VPU_MEM_IS_FREE(index)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "try to split not free region %d!", index);\r
+#endif\r
+ return -2;\r
+ }\r
+ // malloc vpu_mem_region_node\r
+ node = kmalloc(sizeof(struct vpu_mem_region_node), GFP_KERNEL);\r
+ if (NULL == node) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "No space to allocate struct vpu_mem_region_node!");\r
+#endif\r
+ return -ENOMEM;\r
+ }\r
+\r
+ // search the last node\r
+ DLOG("search the last node\n");\r
+ for (last = region_list; !list_is_last(last, region_list);)\r
+ last = last->next;\r
+\r
+ DLOG("list_add_tail\n");\r
+ list_add_tail(&node->list, last);\r
+\r
+ DLOG("start region_set index %d pfn %u\n", index, pfn);\r
+ region_set(index, pfn);\r
\r
- list_del(®ion_node->list);\r
- kfree(region_node);\r
+ DLOG("start region_set index %d pfn %u\n", index + pfn, pfn_free - pfn);\r
+ region_set(index + pfn, pfn_free - pfn);\r
+\r
+ region_set_avail(index, VPU_MEM_AVAIL(index) + 1);\r
+ region_set_ref_count(index, VPU_MEM_REFC(index) + 1);\r
+ node->region.index = index;\r
+ node->region.ref_count = 1;\r
+ } else {\r
+ region_set_ref_count(index, VPU_MEM_REFC(index) + 1);\r
+ node->region.ref_count++;\r
}\r
\r
return 0;\r
}\r
\r
+static int region_merge(struct list_head *node)\r
+{\r
+ struct vpu_mem_region_node *pnode = list_entry(node, struct vpu_mem_region_node, list);\r
+ int index = pnode->region.index;\r
+ int target;\r
+\r
+ if (VPU_MEM_AVAIL(index))\r
+ return 0;\r
+ if (region_check(index))\r
+ return -EINVAL;\r
+\r
+ target = VPU_MEM_NEXT_INDEX(index);\r
+ if (index_avail(target) && VPU_MEM_IS_FREE(target)) {\r
+ int pfn_target = VPU_MEM_PFN(target);\r
+ int pfn_index = VPU_MEM_PFN(index);\r
+ int pfn_total = pfn_target + pfn_index;\r
+ region_unset(index, pfn_index);\r
+ region_unset(target, pfn_target);\r
+ region_set(index, pfn_total);\r
+ }\r
+ target = VPU_MEM_LAST_INDEX(index);\r
+ if (index_avail(target) && VPU_MEM_IS_FREE(target)) {\r
+ int pfn_target = VPU_MEM_PFN(target);\r
+ int pfn_index = VPU_MEM_PFN(index);\r
+ int pfn_total = pfn_target + pfn_index;\r
+ region_unset(index, pfn_index);\r
+ region_unset(target, pfn_target);\r
+ region_set(index, pfn_total);\r
+ }\r
+ return 0;\r
+}\r
+\r
static long vpu_mem_allocate(struct file *file, unsigned int len)\r
{\r
/* caller should hold the write lock on vpu_mem_sem! */\r
/* return the corresponding pdata[] entry */\r
int curr = 0;\r
- int end = vpu_mem.num_entries;\r
int best_fit = -1;\r
unsigned int pfn = (len + VPU_MEM_MIN_ALLOC - 1)/VPU_MEM_MIN_ALLOC;\r
struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
- struct vpu_mem_region_node *region_node;\r
\r
if (!is_vpu_mem_file(file)) {\r
#if VPU_MEM_DEBUG\r
return -ENODEV;\r
}\r
\r
- DLOG("vpu_mem_allocate pfn %x\n", pfn);\r
-\r
- region_node = kmalloc(sizeof(struct vpu_mem_region_node),\r
- GFP_KERNEL);\r
- if (!region_node) {\r
-#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "No space to allocate metadata!");\r
-#endif\r
- return -ENOMEM;\r
- }\r
-\r
/* look through the bitmap:\r
* if you find a free slot of the correct order use it\r
* otherwise, use the best fit (smallest with size > order) slot\r
*/\r
- while (curr < end) {\r
+ while (curr < vpu_mem.num_entries) {\r
if (VPU_MEM_IS_FREE(curr)) {\r
if (VPU_MEM_PFN(curr) >= (unsigned char)pfn) {\r
/* set the not free bit and clear others */\r
break;\r
}\r
}\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "curr %d\n!", curr);\r
+#endif\r
curr = VPU_MEM_NEXT_INDEX(curr);\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "next %d\n!", curr);\r
+#endif\r
}\r
\r
/* if best_fit < 0, there are no suitable slots,\r
* return an error\r
*/\r
if (best_fit < 0) {\r
+#if VPU_MEM_DEBUG\r
printk("vpu_mem: no space left to allocate!\n");\r
+#endif\r
return -1;\r
}\r
\r
DLOG("best_fit: %d next: %u\n", best_fit, best_fit + pfn);\r
\r
- vpu_mem_get_region(data, region_node, best_fit, pfn);\r
+ down_write(&data->sem);\r
+ {\r
+ int ret = region_split(&data->region_list, NULL, best_fit, pfn);\r
+ if (ret)\r
+ best_fit = -1;\r
+ }\r
+ up_write(&data->sem);\r
+\r
+ DLOG("best_fit result: %d next: %u\n", best_fit, best_fit + pfn);\r
\r
return best_fit;\r
}\r
\r
+static int vpu_mem_free_by_region(struct vpu_mem_region_node *node)\r
+{\r
+ int ret = 0;\r
+ int index = node->region.index;\r
+ int avail = VPU_MEM_AVAIL(index);\r
+ int refc = VPU_MEM_REFC(index);\r
+\r
+ WARN((NODE_REGION_REFC(node) <= 0),\r
+ "vpu_mem: vpu_mem_free: non-positive ref count\n");\r
+ WARN((!VPU_MEM_FIRST(index)),\r
+ "vpu_mem: vpu_mem_free: index %d is not first\n", index);\r
+ WARN((avail <= 0),\r
+ "vpu_mem: vpu_mem_free: avail of index %d is non-positive\n", index);\r
+ WARN((refc <= 0),\r
+ "vpu_mem: vpu_mem_free: refc of index %d is non-positive\n", index);\r
+\r
+ NODE_REGION_REFC(node) -= 1;\r
+ region_set_avail(index, avail - 1);\r
+ region_set_ref_count(index, refc - 1);\r
+ if (0 == NODE_REGION_REFC(node))\r
+ {\r
+ avail = VPU_MEM_AVAIL(index);\r
+ if (0 == avail)\r
+ {\r
+ refc = VPU_MEM_REFC(index);\r
+ WARN((0 != refc),\r
+ "vpu_mem: vpu_mem_free: refc of index %d after free is non-zero\n", index);\r
+ ret = region_merge(&node->list);\r
+ }\r
+ list_del(&node->list);\r
+ kfree(node);\r
+ }\r
+ return ret;\r
+}\r
+\r
static int vpu_mem_free(struct file *file, int index)\r
{\r
/* caller should hold the write lock on vpu_mem_sem! */\r
- struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
\r
if (!is_vpu_mem_file(file)) {\r
return -ENODEV;\r
}\r
\r
- DLOG("free index %d\n", index);\r
+ DLOG("search for index %d\n", index);\r
\r
- if ((!pbits->first) ||\r
- (!pbits->allocated) ||\r
- ((pbits->refrn - 1) < 0) ||\r
- ((pbits->avail - 1) < 0))\r
+ down_write(&data->sem);\r
{\r
- DLOG("VPM ERR: found error in vpu_mem_free :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
- index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
- return VPU_MEM_BITMAP_ERR;\r
- }\r
+ struct list_head *list, *tmp;\r
+ list_for_each_safe(list, tmp, &data->region_list) {\r
+ struct vpu_mem_region_node *node = list_entry(list, struct vpu_mem_region_node, list);\r
+ if (index == NODE_REGION_INDEX(node)) {\r
+ int ret = vpu_mem_free_by_region(node);\r
+ up_write(&data->sem);\r
+ return ret;\r
+ }\r
+ }\r
+ }\r
+ up_write(&data->sem);\r
\r
- return vpu_mem_put_region_by_index(data, index);\r
+ DLOG("no region of index %d searched\n", index);\r
+\r
+ return -1;\r
}\r
\r
-static long vpu_mem_duplicate(struct file *file, int index)\r
+static int vpu_mem_duplicate(struct file *file, int index)\r
{\r
/* caller should hold the write lock on vpu_mem_sem! */\r
- struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
-\r
if (!is_vpu_mem_file(file)) {\r
#if VPU_MEM_DEBUG\r
printk(KERN_INFO "duplicate vpu_mem data from invalid file.\n");\r
\r
DLOG("duplicate index %d\n", index);\r
\r
- if ((!pbits->first) ||\r
- (!pbits->allocated) ||\r
- (!pbits->avail))\r
- {\r
- DLOG("VPM ERR: found error in vpu_mem_duplicate :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
- index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
- return VPU_MEM_BITMAP_ERR;\r
+ if (region_check(index)) {\r
+#if VPU_MEM_DEBUG\r
+ printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
+#endif\r
+ return -EINVAL;\r
}\r
\r
- pbits->avail++;\r
+ region_set_avail(index, VPU_MEM_AVAIL(index) + 1);\r
\r
return 0;\r
}\r
\r
-static long vpu_mem_link(struct file *file, int index)\r
+static int vpu_mem_link(struct file *file, int index)\r
{\r
- struct vpu_mem_bits *pbits = VPU_MEM_BIT(index);\r
struct vpu_mem_data *data = (struct vpu_mem_data *)file->private_data;\r
- struct vpu_mem_region_node *region_node;\r
\r
if (!is_vpu_mem_file(file)) {\r
#if VPU_MEM_DEBUG\r
return -ENODEV;\r
}\r
\r
- region_node = kmalloc(sizeof(struct vpu_mem_region_node),\r
- GFP_KERNEL);\r
- if (!region_node) {\r
+ if (region_check(index)) {\r
#if VPU_MEM_DEBUG\r
- printk(KERN_INFO "No space to allocate metadata!");\r
+ printk(KERN_INFO "region %d unable to pass coherence check!", index);\r
#endif\r
- return -ENOMEM;\r
+ return -EINVAL;\r
}\r
\r
/* caller should hold the write lock on vpu_mem_sem! */\r
DLOG("link index %d\n", index);\r
\r
- if ((!pbits->first) ||\r
- (!pbits->allocated) ||\r
- (!pbits->avail) ||\r
- (pbits->avail <= pbits->refrn))\r
- {\r
- DLOG("VPM ERR: found error in vpu_mem_duplicate :\nvpu_mem.bitmap[%d].first %d, allocated %d, avail %d, refrn %d\n",\r
- index, pbits->first, pbits->allocated, pbits->avail, pbits->refrn);\r
- return VPU_MEM_BITMAP_ERR;\r
- }\r
-\r
- pbits->refrn++;\r
-\r
- region_node->region.offset = index;\r
- region_node->region.len = pbits->pfn;\r
-\r
- down_write(&data->sem);\r
- list_add(®ion_node->list, &data->region_list);\r
- up_write(&data->sem);\r
+ down_write(&data->sem);\r
+ { // search exists index\r
+ struct list_head *list, *tmp;\r
+ list_for_each_safe(list, tmp, &data->region_list) {\r
+ struct vpu_mem_region_node *node = list_entry(list, struct vpu_mem_region_node, list);\r
+ if (index == NODE_REGION_INDEX(node)) {\r
+ region_split(&data->region_list, node, index, VPU_MEM_PFN(index));\r
+ up_write(&data->sem);\r
+ return 0;\r
+ }\r
+ }\r
+ // non-exists index\r
+ region_split(&data->region_list, NULL, index, VPU_MEM_PFN(index));\r
+ }\r
+ up_write(&data->sem);\r
\r
return 0;\r
}\r
return -1;\r
data = kmalloc(sizeof(struct vpu_mem_data), GFP_KERNEL);\r
if (!data) {\r
+#if VPU_MEM_DEBUG\r
printk("vpu_mem: unable to allocate memory for vpu_mem metadata.");\r
+#endif\r
return -1;\r
}\r
data->pid = 0;\r
\r
data = (struct vpu_mem_data *)file->private_data;\r
\r
+#if VPU_MEM_DEBUG\r
printk(KERN_ALERT "file->private_data : 0x%x\n", (unsigned int)data);\r
+#endif\r
\r
down_write(&data->sem);\r
\r
list_del(&data->list);\r
up(&vpu_mem.data_list_sem);\r
\r
+ // TODO: ×îºóÒ»¸öÎļþ release µÄʱºò\r
down_write(&data->sem);\r
file->private_data = NULL;\r
list_for_each_safe(elt, elt2, &data->region_list) {\r
- struct vpu_mem_region_node *region_node = list_entry(elt, struct vpu_mem_region_node, list);\r
- vpu_mem_put_region_by_region(region_node);\r
+ struct vpu_mem_region_node *node = list_entry(elt, struct vpu_mem_region_node, list);\r
+ if (vpu_mem_free_by_region(node))\r
+ printk(KERN_INFO "vpu_mem: err on vpu_mem_free_by_region when vpu_mem_release\n");\r
}\r
BUG_ON(!list_empty(&data->region_list));\r
up_write(&data->sem);\r
up_write(&vpu_mem.bitmap_sem);\r
break;\r
}\r
- case VPU_MEM_MAP:\r
- DLOG("map\n");\r
- break;\r
- case VPU_MEM_CONNECT:\r
- DLOG("connect\n");\r
- break;\r
- case VPU_MEM_GET_SIZE:\r
- DLOG("get_size\n");\r
- break;\r
- case VPU_MEM_UNMAP:\r
- DLOG("unmap\n");\r
- break;\r
default:\r
return -EINVAL;\r
}\r
region_node = list_entry(elt2, struct vpu_mem_region_node,\r
list);\r
n += scnprintf(buffer + n, debug_bufmax - n,\r
- "(%lx,%lx) ",\r
- region_node->region.offset,\r
- region_node->region.len);\r
+ "(%d,%d) ",\r
+ region_node->region.index,\r
+ region_node->region.ref_count);\r
}\r
n += scnprintf(buffer + n, debug_bufmax - n, "\n");\r
up_read(&data->sem);\r
memset(vpu_mem.bitmap, 0, sizeof(struct vpu_mem_bits) *\r
vpu_mem.num_entries);\r
\r
- /* record the total page number */\r
- vpu_mem.bitmap[0].pfn = vpu_mem.num_entries;\r
+ region_set(0, vpu_mem.num_entries);\r
\r
if (vpu_mem.cached)\r
vpu_mem.vbase = ioremap_cached(vpu_mem.base,\r
return -1;\r
}\r
if (vpu_mem_count) {\r
+ if (vpu_mem.bitmap) {\r
+ kfree(vpu_mem.bitmap);\r
+ vpu_mem.bitmap = NULL;\r
+ }\r
misc_deregister(&vpu_mem.dev);\r
vpu_mem_count--;\r
} else {\r