- mali_mem_block_mali_map(descriptor, phys_addr, mali_addr + offset, current_mapping_size);
- if (mali_mem_block_cpu_map(descriptor, vma, phys_addr, offset, current_mapping_size, info->cpu_usage_adjust)) {
- /* release all memory back to the pool */
- while (last_allocated) {
- /* This relinks every block we've just allocated back into the free-list */
- block = last_allocated->next;
- last_allocated->next = info->first_free;
- info->first_free = last_allocated;
- last_allocated = block;
- }
-
- mutex_unlock(&info->mutex);
- _mali_osk_mutex_signal(session->memory_lock);
-
- mali_mem_mali_map_free(descriptor);
- mali_mem_descriptor_destroy(descriptor);
+/* unref the node, but not free it */
+_mali_osk_errcode_t mali_mem_block_unref_node(struct mali_page_node *node)
+{
+ mali_block_allocator *info = mali_mem_block_gobal_allocator;
+ mali_page_node *new_node;
+
+ /* only handle BLOCK node */
+ if (node->type == MALI_PAGE_NODE_BLOCK && info) {
+ /*Need to make this atomic?*/
+ if (1 == _mali_page_node_get_ref_count(node)) {
+ /* allocate a new node, Add to free list, keep the old node*/
+ _mali_page_node_unref(node);
+ new_node = _mali_page_node_allocate(MALI_PAGE_NODE_BLOCK);
+ if (new_node) {
+ memcpy(new_node, node, sizeof(mali_page_node));
+ list_add(&new_node->list, &info->free);
+ atomic_add(1, &info->free_num);
+ } else
+ return _MALI_OSK_ERR_FAULT;