static mali_bool mali_executor_virtual_group_is_usable(void);
static void mali_executor_schedule(void);
static void mali_executor_wq_schedule(void *arg);
-static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
static void mali_executor_complete_group(struct mali_group *group,
mali_bool success,
struct mali_gp_job **gp_job_done,
return _MALI_OSK_ERR_OK;
}
} else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+ struct mali_gp_job *job = mali_group_get_running_gp_job(group);
- mali_executor_unlock();
+ /* PLBU out of mem */
+ MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Give group a chance to generate a SUSPEND event */
+ mali_group_oom(group);
+#endif
- mali_group_schedule_oom_work_handler(group);
+ /*
+ * no need to hold interrupt raised while
+ * waiting for more memory.
+ */
+ mali_executor_send_gp_oom_to_user(job);
+
+ mali_executor_unlock();
return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_OK;
}
-void mali_executor_group_oom(struct mali_group *group)
-{
- struct mali_gp_job *job = NULL;
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_POINTER(group->gp_core);
- MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
- mali_executor_lock();
-
- job = mali_group_get_running_gp_job(group);
-
- MALI_DEBUG_ASSERT_POINTER(job);
-
-#if defined(CONFIG_MALI400_PROFILING)
- /* Give group a chance to generate a SUSPEND event */
- mali_group_oom(group);
-#endif
-
- mali_gp_job_set_current_heap_addr(job, mali_gp_read_plbu_alloc_start_addr(group->gp_core));
-
- mali_executor_unlock();
-
- if (_MALI_OSK_ERR_OK == mali_mem_add_mem_size(job->session, job->heap_base_addr, job->heap_grow_size)) {
- _mali_osk_notification_t *new_notification = NULL;
-
- new_notification = _mali_osk_notification_create(
- _MALI_NOTIFICATION_GP_STALLED,
- sizeof(_mali_uk_gp_job_suspended_s));
-
- /* resume job with new heap,
- * This will also re-enable interrupts
- */
- mali_executor_lock();
-
- mali_executor_send_gp_oom_to_user(job, job->heap_grow_size);
-
- if (NULL != new_notification) {
-
- mali_gp_job_set_oom_notification(job, new_notification);
-
- mali_group_resume_gp_with_new_heap(group, mali_gp_job_get_id(job),
- job->heap_current_addr,
- job->heap_current_addr + job->heap_grow_size);
- }
- mali_executor_unlock();
- } else {
- mali_executor_lock();
- mali_executor_send_gp_oom_to_user(job, 0);
- mali_executor_unlock();
- }
-
-}
-
void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
{
u32 i;
args->arguments[0],
args->arguments[1]);
- job->heap_base_addr = args->arguments[0];
- job->heap_current_addr = args->arguments[0];
-
mali_executor_unlock();
return _MALI_OSK_ERR_OK;
} else {
mali_executor_unlock();
}
-static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job, u32 added_size)
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
{
_mali_uk_gp_job_suspended_s *jobres;
_mali_osk_notification_t *notification;
jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
jobres->user_job_ptr = mali_gp_job_get_user_id(job);
jobres->cookie = gp_returned_cookie;
- jobres->heap_added_size = added_size;
+
mali_session_send_notification(mali_gp_job_get_session(job),
notification);
}
_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
-
-void mali_executor_group_oom(struct mali_group *group);
void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
MALI_DEBUG_ASSERT(alloc[i] == mali_vma_node->vm_node.start);
} else {
MALI_DEBUG_PRINT(1, ("ERROE!_mali_gp_add_varying_allocations,can't find allocation %d by address =0x%x, num=%d\n", i, alloc[i], num));
- MALI_DEBUG_ASSERT(0);
+ _mali_osk_free(alloc_node);
+ goto fail;
}
alloc_node->alloc = mali_alloc;
/* add to gp job varying alloc list*/
_mali_osk_list_init(&job->list);
job->session = session;
job->id = id;
- job->heap_base_addr = job->uargs.frame_registers[4];
job->heap_current_addr = job->uargs.frame_registers[4];
- job->heap_grow_size = job->uargs.heap_grow_size;
job->perf_counter_value0 = 0;
job->perf_counter_value1 = 0;
job->pid = _mali_osk_get_pid();
INIT_LIST_HEAD(&job->varying_alloc);
INIT_LIST_HEAD(&job->vary_todo);
job->dmem = NULL;
+
+ if (job->uargs.varying_alloc_num > session->allocation_mgr.mali_allocation_num) {
+ MALI_PRINT_ERROR(("Mali GP job: The number of varying buffer to defer bind is invalid !\n"));
+ goto fail1;
+ }
+
/* add varying allocation list*/
- if (uargs->varying_alloc_num) {
+ if (job->uargs.varying_alloc_num > 0) {
/* copy varying list from user space*/
- job->varying_list = _mali_osk_calloc(1, sizeof(u32) * uargs->varying_alloc_num);
+ job->varying_list = _mali_osk_calloc(1, sizeof(u32) * job->uargs.varying_alloc_num);
if (!job->varying_list) {
- MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", uargs->varying_alloc_num));
+ MALI_PRINT_ERROR(("Mali GP job: allocate varying_list failed varying_alloc_num = %d !\n", job->uargs.varying_alloc_num));
goto fail1;
}
memory_list = (u32 __user *)(uintptr_t)uargs->varying_alloc_list;
- if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32)*uargs->varying_alloc_num)) {
+ if (0 != _mali_osk_copy_from_user(job->varying_list, memory_list, sizeof(u32) * job->uargs.varying_alloc_num)) {
MALI_PRINT_ERROR(("Mali GP job: Failed to copy varying list from user space!\n"));
goto fail;
}
if (unlikely(_mali_gp_add_varying_allocations(session, job, job->varying_list,
- uargs->varying_alloc_num))) {
+ job->uargs.varying_alloc_num))) {
MALI_PRINT_ERROR(("Mali GP job: _mali_gp_add_varying_allocations failed!\n"));
goto fail;
}
/* do preparetion for each allocation */
list_for_each_entry_safe(alloc_node, tmp_node, &job->varying_alloc, node) {
- if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo))) {
+ if (unlikely(_MALI_OSK_ERR_OK != mali_mem_defer_bind_allocation_prepare(alloc_node->alloc, &job->vary_todo, &job->required_varying_memsize))) {
MALI_PRINT_ERROR(("Mali GP job: mali_mem_defer_bind_allocation_prepare failed!\n"));
goto fail;
}
MALI_PRINT_ERROR(("Mali GP job: mali_mem_prepare_mem_for_job failed!\n"));
goto fail;
}
- if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job->uargs.varying_memsize / _MALI_OSK_MALI_PAGE_SIZE, job, &dmem_block)) {
+ if (_MALI_OSK_ERR_OK != mali_mem_defer_bind(job, &dmem_block)) {
MALI_PRINT_ERROR(("gp job create, mali_mem_defer_bind failed! GP %x fail!", job));
goto fail;
}
_mali_osk_free(bkn);
}
- if (!list_empty(&job->vary_todo)) {
- MALI_DEBUG_ASSERT(0);
- }
-
mali_mem_defer_dmem_free(job);
/* de-allocate the pre-allocated oom notifications */
* returning job to user. Hold executor lock when setting,
* no lock needed when reading
*/
- u32 heap_base_addr; /** < Holds the base mali addr of mem handle which is used for new heap*/
u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
- u32 heap_grow_size; /** < Holds the HEAP grow size when HEAP oom */
u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
struct mali_defer_mem *dmem; /** < used for defer bind to store dmem info */
u32 bind_flag; /** < flag for deferbind*/
u32 *varying_list; /**< varying memory list need to to defer bind*/
struct list_head vary_todo; /**< list of backend list need to do defer bind*/
+ u32 required_varying_memsize; /** < size of varying memory to reallocate*/
u32 big_job; /** < if the gp job have large varying output and may take long time*/
};
static void mali_group_bottom_half_gp(void *data);
static void mali_group_bottom_half_pp(void *data);
static void mali_group_timeout(void *data);
-static void mali_group_out_of_memory(void *data);
-
static void mali_group_reset_pp(struct mali_group *group);
static void mali_group_reset_mmu(struct mali_group *group);
return _MALI_OSK_ERR_FAULT;
}
- group->oom_work_handler = _mali_osk_wq_create_work(mali_group_out_of_memory, group);
- if (NULL == group->oom_work_handler) {
- _mali_osk_wq_delete_work(group->bottom_half_work_gp);
- }
return _MALI_OSK_ERR_OK;
}
if (NULL != group->bottom_half_work_gp) {
_mali_osk_wq_delete_work(group->bottom_half_work_gp);
}
-
- if (NULL != group->oom_work_handler) {
- _mali_osk_wq_delete_work(group->oom_work_handler);
- }
}
_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core)
#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
mali_executor_lock();
- if (!mali_group_is_working(group)) {
- /* Not working, so nothing to do */
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ 0xFFFFFFFF, 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ 0xFFFFFFFF, 0);
+ }
+
mali_executor_unlock();
- return _MALI_OSK_ERR_FAULT;
+ return ret;
}
#endif
#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
mali_executor_lock();
- if (!mali_group_is_working(group)) {
- /* Not working, so nothing to do */
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ 0xFFFFFFFF, 0);
mali_executor_unlock();
- return _MALI_OSK_ERR_FAULT;
+ return ret;
}
#endif
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
#if defined(CONFIG_MALI400_PROFILING) && defined (CONFIG_TRACEPOINTS)
#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
mali_executor_lock();
- if (!mali_group_is_working(group)) {
- /* Not working, so nothing to do */
+ if (!mali_group_is_working(group) && (!mali_group_power_is_on(group))) {
+ /* group complete and on job shedule on it, it already power off */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ 0xFFFFFFFF, 0);
mali_executor_unlock();
- return _MALI_OSK_ERR_FAULT;
+ return ret;
}
#endif
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
}
}
-static void mali_group_out_of_memory(void *data)
-{
- struct mali_group *group = (struct mali_group *)data;
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_POINTER(group->gp_core);
- MALI_DEBUG_ASSERT_POINTER(group->mmu);
-
- mali_executor_group_oom(group);
-}
-
mali_bool mali_group_zap_session(struct mali_group *group,
struct mali_session_data *session)
{
_mali_osk_wq_work_t *bottom_half_work_gp;
_mali_osk_wq_work_t *bottom_half_work_pp;
- _mali_osk_wq_work_t *oom_work_handler;
_mali_osk_timer_t *timeout_timer;
};
_mali_osk_wq_schedule_work(group->bottom_half_work_gp);
}
-MALI_STATIC_INLINE void mali_group_schedule_oom_work_handler(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_POINTER(group->gp_core);
- _mali_osk_wq_schedule_work(group->oom_work_handler);
-}
MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
{
_mali_osk_notification_queue_t *ioctl_queue;
_mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
+ _mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */
#if 0
_mali_osk_list_t memory_head; /**< Track all the memory allocated in this session, for freeing on abnormal termination */
#endif
MALI_DEBUG_ASSERT(NULL != timeline->system);
MALI_DEBUG_ASSERT(MALI_TIMELINE_MAX > timeline->id);
+ if (NULL != timeline->delayed_work) {
+ _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
+ _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
+ }
+
#if defined(CONFIG_SYNC)
if (NULL != timeline->sync_tl) {
sync_timeline_destroy(timeline->sync_tl);
}
#endif /* defined(CONFIG_SYNC) */
- if (NULL != timeline->delayed_work) {
- _mali_osk_wq_delayed_cancel_work_sync(timeline->delayed_work);
- _mali_osk_wq_delayed_delete_work_nonflush(timeline->delayed_work);
- }
-
#ifndef CONFIG_SYNC
_mali_osk_free(timeline);
#endif
u64 user_job_ptr; /**< [in] identifier for the job in user space, a @c mali_gp_job_info* */
u32 priority; /**< [in] job priority. A lower number means higher priority */
u32 frame_registers[MALIGP2_NUM_REGS_FRAME]; /**< [in] core specific registers associated with this job */
- u32 heap_grow_size; /** <[in] the grow size of the plbu heap when out of memory */
u32 perf_counter_flag; /**< [in] bitmask indicating which performance counters to enable, see \ref _MALI_PERFORMANCE_COUNTER_FLAG_SRC0_ENABLE and related macro definitions */
u32 perf_counter_src0; /**< [in] source id for performance counter 0 (see ARM DDI0415A, Table 3-60) */
u32 perf_counter_src1; /**< [in] source id for performance counter 1 (see ARM DDI0415A, Table 3-60) */
typedef struct {
u64 user_job_ptr; /**< [out] identifier for the job in user space */
u32 cookie; /**< [out] identifier for the core in kernel space on which the job stalled */
- u32 heap_added_size;
} _mali_uk_gp_job_suspended_s;
/** @} */ /* end group _mali_uk_gp */
* The 16bit integer is stored twice in a 32bit integer
* For example, for version 1 the value would be 0x00010001
*/
-#define _MALI_API_VERSION 800
+#define _MALI_API_VERSION 850
#define _MALI_UK_API_VERSION _MAKE_VERSION_ID(_MALI_API_VERSION)
/**
#include <linux/rbtree.h>
#include "mali_kernel_license.h"
#include "mali_osk_types.h"
+#include <linux/version.h>
extern struct platform_device *mali_platform_device;
+/* After 3.19.0 kenrel droped CONFIG_PM_RUNTIME define,define by ourself */
+#if defined(CONFIG_PM) && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
+#define CONFIG_PM_RUNTIME 1
+#endif
+
#ifdef __cplusplus
}
#endif
return VM_FAULT_LOCKED;
}
} else {
- MALI_DEBUG_ASSERT(0);
- /*NOT support yet*/
+ MALI_PRINT_ERROR(("Mali vma fault! It never happen, indicating some logic errors in caller.\n"));
}
return VM_FAULT_NOPAGE;
}
ret = 0;
} else {
/* Not support yet*/
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of backend memory! \n"));
+ return -EFAULT;
}
if (ret != 0) {
MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
+ session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
+ if (NULL == session_data->cow_lock) {
+ _mali_osk_mutex_term(session_data->memory_lock);
+ _mali_osk_free(session_data);
+ MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ }
+
mali_memory_manager_init(&session_data->allocation_mgr);
MALI_DEBUG_PRINT(5, ("MMU session begin: success\n"));
/* Free the lock */
_mali_osk_mutex_term(session->memory_lock);
-
+ _mali_osk_mutex_term(session->cow_lock);
return;
}
mali_mem_block_add_ref(node);
} else if (node->type == MALI_PAGE_NODE_SWAP) {
atomic_inc(&node->swap_it->ref_count);
- } else
- MALI_DEBUG_ASSERT(0);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
}
void _mali_page_node_unref(struct mali_page_node *node)
put_page(node->page);
} else if (node->type == MALI_PAGE_NODE_BLOCK) {
mali_mem_block_dec_ref(node);
- } else
- MALI_DEBUG_ASSERT(0);
+ } else {
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
+ }
}
} else if (node->type == MALI_PAGE_NODE_SWAP) {
return atomic_read(&node->swap_it->ref_count);
} else {
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
}
return -1;
}
} else if (node->type == MALI_PAGE_NODE_SWAP) {
return node->swap_it->dma_addr;
} else {
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
}
return 0;
}
} else if (node->type == MALI_PAGE_NODE_SWAP) {
return page_to_pfn(node->swap_it->page);
} else {
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Invalid type of mali page node! \n"));
}
return 0;
}
pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
\r
if (NULL == pages) {\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("No memory page need to cow ! \n"));\r
return _MALI_OSK_ERR_FAULT;\r
}\r
\r
\r
pages = _mali_memory_cow_get_node_list(target_bk, target_offset, target_size);\r
if (NULL == pages) {\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("No swap memory page need to cow ! \n"));\r
return _MALI_OSK_ERR_FAULT;\r
}\r
\r
u32 range_size)\r
{\r
mali_mem_allocation *alloc = NULL;\r
+ struct mali_session_data *session;\r
mali_mem_cow *cow = &backend->cow_mem;\r
struct mali_page_node *m_page, *m_tmp;\r
LIST_HEAD(pages);\r
alloc = backend->mali_allocation;\r
MALI_DEBUG_ASSERT_POINTER(alloc);\r
\r
+ session = alloc->session;\r
+ MALI_DEBUG_ASSERT_POINTER(session);\r
+\r
MALI_DEBUG_ASSERT(MALI_MEM_COW == backend->type);\r
MALI_DEBUG_ASSERT(((range_start + range_size) / _MALI_OSK_MALI_PAGE_SIZE) <= cow->count);\r
\r
if (1 != _mali_page_node_get_ref_count(m_page))\r
change_pages_nr++;\r
/* unref old page*/\r
+ _mali_osk_mutex_wait(session->cow_lock);\r
if (_mali_mem_put_page_node(m_page)) {\r
__free_page(new_page);\r
+ _mali_osk_mutex_signal(session->cow_lock);\r
goto error;\r
}\r
+ _mali_osk_mutex_signal(session->cow_lock);\r
/* add new page*/\r
/* always use OS for COW*/\r
m_page->type = MALI_PAGE_NODE_OS;\r
break;\r
case MALI_MEM_EXTERNAL:\r
/*NOT support yet*/\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("External physical memory not supported ! \n"));\r
+ return _MALI_OSK_ERR_UNSUPPORTED;\r
break;\r
case MALI_MEM_DMA_BUF:\r
/*NOT support yet*/\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("DMA buffer not supported ! \n"));\r
+ return _MALI_OSK_ERR_UNSUPPORTED;\r
break;\r
case MALI_MEM_UMP:\r
/*NOT support yet*/\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("UMP buffer not supported ! \n"));\r
+ return _MALI_OSK_ERR_UNSUPPORTED;\r
break;\r
default:\r
/*Not support yet*/\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported ! \n"));\r
+ return _MALI_OSK_ERR_UNSUPPORTED;\r
break;\r
}\r
return _MALI_OSK_ERR_OK;\r
u32 mali_mem_cow_release(mali_mem_backend *mem_bkend, mali_bool is_mali_mapped)\r
{\r
mali_mem_allocation *alloc;\r
+ struct mali_session_data *session;\r
u32 free_pages_nr = 0;\r
MALI_DEBUG_ASSERT_POINTER(mem_bkend);\r
MALI_DEBUG_ASSERT(MALI_MEM_COW == mem_bkend->type);\r
alloc = mem_bkend->mali_allocation;\r
MALI_DEBUG_ASSERT_POINTER(alloc);\r
\r
+ session = alloc->session;\r
+ MALI_DEBUG_ASSERT_POINTER(session);\r
+\r
if (MALI_MEM_BACKEND_FLAG_SWAP_COWED != (MALI_MEM_BACKEND_FLAG_SWAP_COWED & mem_bkend->flags)) {\r
/* Unmap the memory from the mali virtual address space. */\r
if (MALI_TRUE == is_mali_mapped)\r
mali_mem_os_mali_unmap(alloc);\r
/* free cow backend list*/\r
+ _mali_osk_mutex_wait(session->cow_lock);\r
free_pages_nr = mali_mem_os_free(&mem_bkend->cow_mem.pages, mem_bkend->cow_mem.count, MALI_TRUE);\r
+ _mali_osk_mutex_signal(session->cow_lock);\r
+\r
free_pages_nr += mali_mem_block_free_list(&mem_bkend->cow_mem.pages);\r
\r
MALI_DEBUG_ASSERT(list_empty(&mem_bkend->cow_mem.pages));\r
}\r
mem_bkend->cow_mem.change_pages_nr++;\r
}\r
+\r
+ _mali_osk_mutex_wait(session->cow_lock);\r
if (_mali_mem_put_page_node(found_node)) {\r
__free_page(new_page);\r
kfree(new_node);\r
+ _mali_osk_mutex_signal(session->cow_lock);\r
return _MALI_OSK_ERR_NOMEM;\r
}\r
+ _mali_osk_mutex_signal(session->cow_lock);\r
\r
list_replace(&found_node->list, &new_node->list);\r
\r
\r
static u32 mali_dmem_get_gp_varying_size(struct mali_gp_job *gp_job)\r
{\r
- return gp_job->uargs.varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;\r
+ return gp_job->required_varying_memsize / _MALI_OSK_MALI_PAGE_SIZE;\r
}\r
\r
_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void)\r
\r
\r
/* do preparetion for allocation before defer bind */\r
-_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list)\r
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize)\r
{\r
mali_mem_backend *mem_bkend = NULL;\r
struct mali_backend_bind_list *bk_list = _mali_osk_calloc(1, sizeof(struct mali_backend_bind_list));\r
if (!(mem_bkend = idr_find(&mali_backend_idr, alloc->backend_handle))) {\r
MALI_DEBUG_PRINT(1, ("Can't find memory backend in defer bind!\n"));\r
mutex_unlock(&mali_idr_mutex);\r
- kfree(bk_list);\r
+ _mali_osk_free(bk_list);\r
return _MALI_OSK_ERR_FAULT;\r
}\r
mutex_unlock(&mali_idr_mutex);\r
+\r
+ /* If the mem backend has already been bound, no need to bind again.*/\r
+ if (mem_bkend->os_mem.count > 0) {\r
+ _mali_osk_free(bk_list);\r
+ return _MALI_OSK_ERR_OK;\r
+ }\r
+\r
MALI_DEBUG_PRINT(4, ("bind_allocation_prepare:: allocation =%x vaddr=0x%x!\n", alloc, alloc->mali_vma_node.vm_node.start));\r
\r
INIT_LIST_HEAD(&mem_bkend->os_mem.pages);\r
bk_list->vaddr = alloc->mali_vma_node.vm_node.start;\r
bk_list->session = alloc->session;\r
bk_list->page_num = mem_bkend->size / _MALI_OSK_MALI_PAGE_SIZE;\r
+ *required_varying_memsize += mem_bkend->size;\r
MALI_DEBUG_ASSERT(mem_bkend->type == MALI_MEM_OS);\r
\r
/* add to job to do list */\r
@ pages page list to do this bind\r
@ count number of pages\r
*/\r
-_mali_osk_errcode_t mali_mem_defer_bind(u32 count, struct mali_gp_job *gp,\r
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp,\r
struct mali_defer_mem_block *dmem_block)\r
{\r
struct mali_defer_mem *dmem = NULL;\r
struct mali_backend_bind_list *bkn, *bkn_tmp;\r
LIST_HEAD(pages);\r
\r
+ if (gp->required_varying_memsize != (atomic_read(&dmem_block->num_free_pages) * _MALI_OSK_MALI_PAGE_SIZE)) {\r
+ MALI_DEBUG_PRINT_ERROR(("#BIND: The memsize of varying buffer not match to the pagesize of the dmem_block!!## \n"));\r
+ return _MALI_OSK_ERR_FAULT;\r
+ }\r
+\r
MALI_DEBUG_PRINT(4, ("#BIND: GP job=%x## \n", gp));\r
dmem = (mali_defer_mem *)_mali_osk_calloc(1, sizeof(struct mali_defer_mem));\r
if (dmem) {\r
_mali_osk_free(bkn);\r
} else {\r
/* not enough memory will not happen */\r
- MALI_DEBUG_PRINT(1, ("#BIND: NOT enough memory when binded !!## \n"));\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("#BIND: NOT enough memory when binded !!## \n"));\r
+ _mali_osk_free(gp->dmem);\r
+ return _MALI_OSK_ERR_NOMEM;\r
}\r
}\r
\r
if (!list_empty(&gp->vary_todo)) {\r
- MALI_DEBUG_ASSERT(0);\r
+ MALI_DEBUG_PRINT_ERROR(("#BIND: The deferbind backend list isn't empty !!## \n"));\r
+ _mali_osk_free(gp->dmem);\r
+ return _MALI_OSK_ERR_FAULT;\r
}\r
\r
dmem->flag = MALI_DEFER_BIND_MEMORY_BINDED;\r
\r
_mali_osk_errcode_t mali_mem_defer_bind_manager_init(void);\r
void mali_mem_defer_bind_manager_destory(void);\r
-_mali_osk_errcode_t mali_mem_defer_bind(u32 count, struct mali_gp_job *gp,\r
- struct mali_defer_mem_block *dmem_block);\r
-_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list);\r
+_mali_osk_errcode_t mali_mem_defer_bind(struct mali_gp_job *gp, struct mali_defer_mem_block *dmem_block);\r
+_mali_osk_errcode_t mali_mem_defer_bind_allocation_prepare(mali_mem_allocation *alloc, struct list_head *list, u32 *required_varying_memsize);\r
_mali_osk_errcode_t mali_mem_prepare_mem_for_job(struct mali_gp_job *next_gp_job, mali_defer_mem_block *dblock);\r
void mali_mem_defer_dmem_free(struct mali_gp_job *gp);\r
\r
mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->gpu_vaddr, 0);
if (unlikely(mali_vma_node)) {
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
return _MALI_OSK_ERR_FAULT;
}
/**
break;
case _MALI_MEMORY_BIND_BACKEND_MALI_MEMORY:
/* not allowed */
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Mali internal memory type not supported !\n"));
+ goto Failed_bind_backend;
break;
case _MALI_MEMORY_BIND_BACKEND_EXTERNAL_MEMORY:
case _MALI_MEMORY_BIND_BACKEND_EXT_COW:
/* not allowed */
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("External cow memory type not supported !\n"));
+ goto Failed_bind_backend;
break;
default:
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("Invalid memory type not supported !\n"));
+ goto Failed_bind_backend;
break;
}
MALI_DEBUG_ASSERT(0 == mem_backend->size % MALI_MMU_PAGE_SIZE);
mali_vma_node = mali_vma_offset_search(&session->allocation_mgr, args->vaddr, 0);
if (unlikely(mali_vma_node)) {
- MALI_DEBUG_ASSERT(0);
+ MALI_DEBUG_PRINT_ERROR(("The mali virtual address has already been used ! \n"));
return ret;
}
{
mali_mem_allocation *alloc;
+ struct mali_session_data *session;
u32 free_pages_nr = 0;
MALI_DEBUG_ASSERT_POINTER(mem_bkend);
MALI_DEBUG_ASSERT(MALI_MEM_OS == mem_bkend->type);
alloc = mem_bkend->mali_allocation;
MALI_DEBUG_ASSERT_POINTER(alloc);
+ session = alloc->session;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
/* Unmap the memory from the mali virtual address space. */
mali_mem_os_mali_unmap(alloc);
mutex_lock(&mem_bkend->mutex);
/* Free pages */
if (MALI_MEM_BACKEND_FLAG_COWED & mem_bkend->flags) {
+ /* Lock to avoid the free race condition for the cow shared memory page node. */
+ _mali_osk_mutex_wait(session->cow_lock);
free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_TRUE);
+ _mali_osk_mutex_signal(session->cow_lock);
} else {
free_pages_nr = mali_mem_os_free(&mem_bkend->os_mem.pages, mem_bkend->os_mem.count, MALI_FALSE);
}
#include <linux/sched.h>
+#include "mali_kernel_linux.h"
#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
#endif /* CONFIG_PM_RUNTIME */
#include <linux/version.h>
#include "mali_osk.h"
#include "mali_kernel_common.h"
-#include "mali_kernel_linux.h"
/* Can NOT run in atomic context */
_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
u8 byte_value = ~0;
while ((byte_value & 0x80) != 0) {
- MALI_DEBUG_ASSERT((*pos) < packet_size);
+ if ((*pos) >= packet_size);
+ return -1;
byte_value = buf[*pos];
*pos += 1;
int_value |= (u32)(byte_value & 0x7f) << shift;
byte_value |= 0x80;
}
- MALI_DEBUG_ASSERT((pos + add_bytes) < buf_size);
+ if ((pos + add_bytes) >= buf_size)
+ return 0;
buf[pos + add_bytes] = byte_value;
add_bytes++;
}
}
/* Send supported counters */
+ if (PACKET_HEADER_SIZE > output_buffer_size)
+ return _MALI_OSK_ERR_FAULT;
+
*response_packet_data = PACKET_HEADER_COUNTERS_ACK;
args->response_packet_size = PACKET_HEADER_SIZE;
u32 event;
u32 key;
+ /* Check the counter name which should be ended with null */
while (request_pos < control_packet_size && control_packet_data[request_pos] != '\0') {
++request_pos;
}
+ if (request_pos >= control_packet_size)
+ return _MALI_OSK_ERR_FAULT;
+
++request_pos;
event = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
key = _mali_profiling_read_packet_int(control_packet_data, &request_pos, control_packet_size);
for (i = 0; i < num_global_mali_profiling_counters; ++i) {
u32 name_size = strlen((char *)(control_packet_data + begin));
+
if (strncmp(global_mali_profiling_counters[i].counter_name, (char *)(control_packet_data + begin), name_size) == 0) {
if (!sw_counter_if_enabled && (FIRST_SW_COUNTER <= global_mali_profiling_counters[i].counter_id
&& global_mali_profiling_counters[i].counter_id <= LAST_SW_COUNTER)) {
kargs.ctx = (uintptr_t)session_data;
+
+ /* Sanity check about the size */
+ if (kargs.control_packet_size > PAGE_SIZE || kargs.response_packet_size > PAGE_SIZE)
+ return -EINVAL;
+
if (0 != kargs.control_packet_size) {
+ if (0 == kargs.response_packet_size)
+ return -EINVAL;
+
kernel_control_data = _mali_osk_calloc(1, kargs.control_packet_size);
if (NULL == kernel_control_data) {
return -ENOMEM;
}
- MALI_DEBUG_ASSERT(0 != kargs.response_packet_size);
-
kernel_response_data = _mali_osk_calloc(1, kargs.response_packet_size);
if (NULL == kernel_response_data) {
_mali_osk_free(kernel_control_data);
#include <linux/platform_device.h>
#include <linux/version.h>
#include <linux/pm.h>
+#include "mali_kernel_linux.h"
#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
#endif
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
-
+
* Class Path Exception
* Linking this library statically or dynamically with other modules is making a combined work based on this library.
* Thus, the terms and conditions of the GNU General Public License cover the whole combination.