--- /dev/null
+/*\r
+ * Copyright (C) 2014, 2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */\r
+* ARM Mali-300/400/450 GPU\r
+\r
+Required properties:\r
+- compatible:\r
+ At least one of these: "arm,mali-300", "arm,mali-400", "arm,mali-450"\r
+ Always: "arm,mali-utgard"\r
+ Mali-450 can also include "arm,mali-400" as it is compatible.\r
+ - "arm,mali-400", "arm,mali-utgard" for any Mali-400 GPU.\r
+ - "arm,mali-450", "arm,mali-400", "arm,mali-utgard" for any Mali-450 GPU.\r
+- reg:\r
+ Physical base address and length of the GPU's registers.\r
+- interrupts:\r
+ - List of all Mali interrupts.\r
+ - This list must match the number of and the order of entries in\r
+ interrupt-names.\r
+- interrupt-names:\r
+ - IRQPP<X> - Name for PP interrupts.\r
+ - IRQPPMMU<X> - Name for interrupts from the PP MMU.\r
+ - IRQPP - Name for the PP broadcast interrupt (Mali-450 only).\r
+ - IRQGP - Name for the GP interrupt.\r
+ - IRQGPMMU - Name for the interrupt from the GP MMU.\r
+ - IRQPMU - Name for the PMU interrupt (If pmu is implemented in HW, it must be contained).\r
+\r
+Optional properties:\r
+- pmu_domain_config:\r
+ - If the Mali internal PMU is present and the PMU IRQ is specified in\r
+ interrupt/interrupt-names ("IRQPMU").This contains the mapping of\r
+ Mali HW units to the PMU power domain.\r
+ -Mali Dynamic power domain configuration in sequence from 0-11, like:\r
+ <GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7 L2$0 L2$1 L2$2>.\r
+- pmu-switch-delay:\r
+ - Only needed if the power gates are connected to the PMU in a high fanout\r
+ network. This value is the number of Mali clock cycles it takes to\r
+ enable the power gates and turn on the power mesh. This value will\r
+ have no effect if a daisy chain implementation is used.\r
+\r
+Platform related properties:\r
+- clocks: Phandle to clock for Mali utgard device.\r
+- clock-names: the corresponding names of clock in clocks property.\r
+- regulator: Phandle to regulator which is power supplier of mali device.\r
+\r
+Example for a Mali400_MP1_PMU device:\r
+\r
+/ {\r
+ ...\r
+\r
+ gpu@12300000 {\r
+ compatible = "arm,mali-400", "arm,mali-utgard";\r
+ reg = <0x12300000 0x30000>;\r
+ interrupts = <0 55 4>, <0 56 4>, <0 57 4>, <0 58 4>, <0 59 4>;\r
+ interrupt-names = "IRQGP", "IRQGPMMU", "IRQPP0", "IRQPPMMU0", "IRQPMU";\r
+\r
+ pmu_domain_config = <0x1 0x4 0x0 0x0 0x0 0x0 0x0 0x0 0x0 0x2 0x0 0x0>;\r
+ pmu_switch_delay = <0xff>;\r
+ clocks = <clock 122>, <clock 123>;\r
+ clock-names = "mali_parent", "mali";\r
+ vdd_g3d-supply = <regulator_Phandle>;\r
+ };\r
+}\r
mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_dma_buf.o
mali-$(CONFIG_DMA_SHARED_BUFFER) += linux/mali_memory_secure.o
mali-$(CONFIG_SYNC) += linux/mali_sync.o
+mali-$(CONFIG_MALI_DMA_BUF_FENCE) += linux/mali_dma_fence.o
ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
+VERSION_STRINGS += USING_DMA_BUF_FENCE = $(CONFIG_MALI_DMA_BUF_FENCE)
VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
# Create file with Mali driver configuration
USING_PROFILING ?= 1
USING_INTERNAL_PROFILING ?= 0
USING_DVFS ?= 1
+USING_DMA_BUF_FENCE ?= 0
MALI_HEATMAPS_ENABLED ?= 0
MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
MALI_PMU_PARALLEL_POWER_UP ?= 0
export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
endif
+ifeq ($(USING_DMA_BUF_FENCE),1)
+export CONFIG_MALI_DMA_BUF_FENCE=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_FENCE
+endif
+
ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
/*
- * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
mali_executor_unlock();
return _MALI_OSK_ERR_OK;
} else {
- MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n"));
+ MALI_DEBUG_PRINT(2, ("Executor: Unable to resume gp job becasue gp time out or any other unexpected reason!\n"));
_mali_osk_notification_delete(new_notification);
mali_bool trigger_pm_update = MALI_FALSE;
mali_bool deactivate_idle_group = MALI_TRUE;
mali_bool gpu_secure_mode_is_needed = MALI_FALSE;
-
+ mali_bool is_gpu_secure_mode = MALI_FALSE;
/* Physical groups + jobs to start in this function */
struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
}
}
- /* 6. To power up group asap, we trigger pm update here. */
+ /* 6. To power up group asap, trigger pm update only when no need to swith the gpu mode. */
- if (MALI_TRUE == trigger_pm_update) {
- trigger_pm_update = MALI_FALSE;
- mali_pm_update_async();
+ is_gpu_secure_mode = _mali_osk_gpu_secure_mode_is_enabled();
+
+ if ((MALI_FALSE == gpu_secure_mode_is_needed && MALI_FALSE == is_gpu_secure_mode)
+ || (MALI_TRUE == gpu_secure_mode_is_needed && MALI_TRUE == is_gpu_secure_mode)) {
+ if (MALI_TRUE == trigger_pm_update) {
+ trigger_pm_update = MALI_FALSE;
+ mali_pm_update_async();
+ }
}
/* 7. Assign jobs to idle virtual group (or deactivate if no job) */
if (NULL != virtual_job_to_start) {
MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
mali_group_start_pp_job(virtual_group,
- virtual_job_to_start, 0);
+ virtual_job_to_start, 0, is_gpu_secure_mode);
}
for (i = 0; i < num_jobs_to_start; i++) {
groups_to_start[i]));
mali_group_start_pp_job(groups_to_start[i],
jobs_to_start[i],
- sub_jobs_to_start[i]);
+ sub_jobs_to_start[i], is_gpu_secure_mode);
}
MALI_DEBUG_ASSERT_POINTER(gp_group);
if (NULL != gp_job_to_start) {
MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
- mali_group_start_gp_job(gp_group, gp_job_to_start);
+ mali_group_start_gp_job(gp_group, gp_job_to_start, is_gpu_secure_mode);
}
/* 11. Trigger any pending PM updates */
/*
- * Copyright (C) 2012, 2014-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2012, 2014-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#include "mali_hw_core.h"
#include "mali_group.h"
#include "mali_osk.h"
-#include "mali_osk_mali.h"
#include "regs/mali_gp_regs.h"
#include "mali_kernel_common.h"
#include "mali_kernel_core.h"
u32 counter_src0 = mali_gp_job_get_perf_counter_src0(job);
u32 counter_src1 = mali_gp_job_get_perf_counter_src1(job);
- /* Disable gpu secure mode. */
- if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
- _mali_osk_gpu_secure_mode_disable();
- }
-
MALI_DEBUG_ASSERT_POINTER(core);
if (mali_gp_job_has_vs_job(job)) {
}
}
- if (copy_of_uargs.varying_memsize > MALI_UK_BIG_VARYING_SIZE) {
+ if (job->uargs.varying_memsize > MALI_UK_BIG_VARYING_SIZE) {
job->big_job = 1;
}
}
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#include "mali_broadcast.h"
#include "mali_scheduler.h"
#include "mali_osk_profiling.h"
+#include "mali_osk_mali.h"
#include "mali_pm_domain.h"
#include "mali_pm.h"
#include "mali_executor.h"
static void mali_group_reset_pp(struct mali_group *group);
static void mali_group_reset_mmu(struct mali_group *group);
-static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload);
static void mali_group_recovery_reset(struct mali_group *group);
struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
- MALI_DEBUG_ASSERT(NULL == group->session);
MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
mali_group_core_description(group)));
}
}
-void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled)
{
struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
- mali_group_activate_page_directory(group, session);
+ /* Reset GPU and disable gpu secure mode if needed. */
+ if (MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_disable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ }
+
+ /* Reload mmu page table if needed */
+ if (MALI_TRUE == gpu_secure_mode_pre_enabled) {
+ mali_group_reset(group);
+ mali_group_activate_page_directory(group, session, MALI_TRUE);
+ } else {
+ mali_group_activate_page_directory(group, session, MALI_FALSE);
+ }
mali_gp_job_start(group->gp_core, job);
/* Used to set all the registers except frame renderer list address and fragment shader stack address
* It means the caller must set these two registers properly before calling this function
*/
-void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled)
{
struct mali_session_data *session;
mali_l2_cache_invalidate_conditional(group->l2_cache_core[1], mali_pp_job_get_cache_order(job));
}
- mali_group_activate_page_directory(group, session);
+ /* Reset GPU and change gpu secure mode if needed. */
+ if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_enable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ _mali_osk_gpu_reset_and_secure_mode_disable();
+ /* Need to disable the pmu interrupt mask register */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ }
+ }
+
+ /* Reload the mmu page table if needed */
+ if ((MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == gpu_secure_mode_pre_enabled)
+ || (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == gpu_secure_mode_pre_enabled)) {
+ mali_group_reset(group);
+ mali_group_activate_page_directory(group, session, MALI_TRUE);
+ } else {
+ mali_group_activate_page_directory(group, session, MALI_FALSE);
+ }
if (mali_group_is_virtual(group)) {
struct mali_group *child;
return mali_global_num_groups;
}
-static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session, mali_bool is_reload)
{
MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
mali_session_get_page_directory(session), session,
MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- if (group->session != session) {
+ if (group->session != session || MALI_TRUE == is_reload) {
/* Different session than last time, so we need to do some work */
MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
session, group->session,
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/** @brief Start GP job
*/
-void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job, mali_bool gpu_secure_mode_pre_enabled);
-void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job, mali_bool gpu_secure_mode_pre_enabled);
/** @brief Start virtual group Job on a virtual group
*/
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#include "mali_control_timer.h"
#include "mali_dvfs_policy.h"
#include <linux/sched.h>
+#include <linux/atomic.h>
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include <linux/fence.h>
+#endif
#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
goto err;
}
+ /*create a wait queue for this session */
+ session->wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == session->wait_queue) {
+ goto err_wait_queue;
+ }
+
session->page_directory = mali_mmu_pagedir_alloc();
if (NULL == session->page_directory) {
goto err_mmu;
goto err_soft;
}
+ /* Initialize the dma fence context.*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+ session->fence_context = fence_context_alloc(1);
+ _mali_osk_atomic_init(&session->fence_seqno, 0);
+#else
+ MALI_PRINT_ERROR(("The kernel version not support dma fence!\n"));
+ goto err_time_line;
+#endif
+#endif
+
/* Create timeline system. */
session->timeline_system = mali_timeline_system_create(session);
if (NULL == session->timeline_system) {
_mali_osk_atomic_init(&session->number_of_window_jobs, 0);
#endif
+ _mali_osk_atomic_init(&session->number_of_pp_jobs, 0);
+
session->use_high_priority_job_queue = MALI_FALSE;
/* Initialize list of PP jobs on this session. */
err_session:
mali_mmu_pagedir_free(session->page_directory);
err_mmu:
+ _mali_osk_wait_queue_term(session->wait_queue);
+err_wait_queue:
_mali_osk_notification_queue_term(session->ioctl_queue);
err:
_mali_osk_free(session);
mali_soft_job_system_destroy(session->soft_job_system);
session->soft_job_system = NULL;
- MALI_DEBUG_CODE({
- /* Check that the pp_job_fb_lookup_list array is empty. */
- u32 i;
- for (i = 0; i < MALI_PP_JOB_FB_LOOKUP_LIST_SIZE; ++i)
- {
- MALI_DEBUG_ASSERT(_mali_osk_list_empty(&session->pp_job_fb_lookup_list[i]));
- }
- });
+ /*Wait for the session job lists become empty.*/
+ _mali_osk_wait_queue_wait_event(session->wait_queue, mali_session_pp_job_is_empty, (void *) session);
/* Free remaining memory allocated to this session */
mali_memory_session_end(session);
/* Free session data structures */
mali_mmu_pagedir_unmap(session->page_directory, MALI_DLBU_VIRT_ADDR, _MALI_OSK_MALI_PAGE_SIZE);
mali_mmu_pagedir_free(session->page_directory);
+ _mali_osk_wait_queue_term(session->wait_queue);
_mali_osk_notification_queue_term(session->ioctl_queue);
_mali_osk_free(session);
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*/
_mali_osk_errcode_t _mali_osk_gpu_secure_mode_deinit(void);
-/** @brief Enable the gpu secure mode.
+/** @brief Reset GPU and enable the gpu secure mode.
* @return _MALI_OSK_ERR_OK on success, otherwise failure.
*/
-_mali_osk_errcode_t _mali_osk_gpu_secure_mode_enable(void);
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void);
-/** @brief Disable the gpu secure mode.
+/** @brief Reset GPU and disable the gpu secure mode.
* @return _MALI_OSK_ERR_OK on success, otherwise failure.
*/
-_mali_osk_errcode_t _mali_osk_gpu_secure_mode_disable(void);
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void);
/** @brief Check if the gpu secure mode has been enabled.
* @return MALI_TRUE if enabled, otherwise MALI_FALSE.
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#include "regs/mali_200_regs.h"
#include "mali_kernel_common.h"
#include "mali_kernel_core.h"
-#include "mali_osk_mali.h"
#if defined(CONFIG_MALI400_PROFILING)
#include "mali_osk_profiling.h"
MALI_DEBUG_ASSERT_POINTER(core);
- /* Change gpu secure mode if needed. */
- if (MALI_TRUE == mali_pp_job_is_protected_job(job) && MALI_FALSE == _mali_osk_gpu_secure_mode_is_enabled()) {
- _mali_osk_gpu_secure_mode_enable();
- } else if (MALI_FALSE == mali_pp_job_is_protected_job(job) && MALI_TRUE == _mali_osk_gpu_secure_mode_is_enabled()) {
- _mali_osk_gpu_secure_mode_disable();
- }
-
/* Write frame registers */
/*
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
job = _mali_osk_calloc(1, sizeof(struct mali_pp_job));
if (NULL != job) {
-
+
_mali_osk_list_init(&job->list);
_mali_osk_list_init(&job->session_fb_lookup_list);
+ _mali_osk_atomic_inc(&session->number_of_pp_jobs);
if (0 != _mali_osk_copy_from_user(&job->uargs, uargs, sizeof(_mali_uk_pp_start_job_s))) {
goto fail;
void mali_pp_job_delete(struct mali_pp_job *job)
{
+ struct mali_session_data *session;
+
MALI_DEBUG_ASSERT_POINTER(job);
MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
if (NULL != job->memory_cookies) {
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
/* Unmap buffers attached to job */
_mali_osk_atomic_term(&job->sub_jobs_completed);
_mali_osk_atomic_term(&job->sub_job_errors);
-
+ _mali_osk_atomic_dec(&session->number_of_pp_jobs);
_mali_osk_free(job);
+
+ _mali_osk_wait_queue_wake_up(session->wait_queue);
}
void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
/*
- * Copyright (C) 2011-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2011-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
#include "linux/mali_memory_dma_buf.h"
#endif
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "linux/mali_dma_fence.h"
+#include <linux/fence.h>
+#endif
typedef enum pp_job_status {
MALI_NO_SWAP_IN,
*/
_mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
_mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+
u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
/*
*/
u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct mali_dma_fence_context dma_fence_context; /**< The mali dma fence context to record dma fence waiters that this job wait for */
+ struct fence *rendered_dma_fence; /**< the new dma fence link to this job */
+#endif
};
void mali_pp_job_initialize(void);
#if defined(CONFIG_DMA_SHARED_BUFFER)
#include "mali_memory_dma_buf.h"
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+#include "mali_dma_fence.h"
+#include <linux/dma-buf.h>
+#endif
#endif
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
static mali_timeline_point mali_scheduler_submit_gp_job(
struct mali_session_data *session, struct mali_gp_job *job);
-static mali_timeline_point mali_scheduler_submit_pp_job(
- struct mali_session_data *session, struct mali_pp_job *job);
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point);
static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
job->user_notification = user_notification;
job->num_pp_cores_in_virtual = num_cores_in_virtual;
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if (NULL != job->rendered_dma_fence)
+ mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+#endif
+
if (dequeued) {
#if defined(CONFIG_MALI_DVFS)
if (mali_pp_job_is_window_surface(job)) {
mali_session_inc_num_window_jobs(session);
}
#endif
-
_mali_osk_pm_dev_ref_put();
if (mali_utilization_enabled()) {
_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
_mali_uk_pp_start_job_s *uargs)
{
+ _mali_osk_errcode_t ret;
struct mali_session_data *session;
struct mali_pp_job *job;
mali_timeline_point point;
point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
- point = mali_scheduler_submit_pp_job(session, job);
+ /* Submit PP job. */
+ ret = mali_scheduler_submit_pp_job(session, job, &point);
job = NULL;
- if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
- /*
- * Let user space know that something failed
- * after the job was started.
- */
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ if (_MALI_OSK_ERR_OK == ret) {
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
}
- return _MALI_OSK_ERR_OK;
+ return ret;
}
_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
_mali_uk_pp_and_gp_start_job_s *uargs)
{
+ _mali_osk_errcode_t ret;
struct mali_session_data *session;
_mali_uk_pp_and_gp_start_job_s kargs;
struct mali_pp_job *pp_job;
gp_job = NULL;
/* Submit PP job. */
- point = mali_scheduler_submit_pp_job(session, pp_job);
+ ret = mali_scheduler_submit_pp_job(session, pp_job, &point);
pp_job = NULL;
- if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
- /*
- * Let user space know that something failed
- * after the jobs were started.
- */
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ if (_MALI_OSK_ERR_OK == ret) {
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
}
- return _MALI_OSK_ERR_OK;
+ return ret;
}
void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
return point;
}
-static mali_timeline_point mali_scheduler_submit_pp_job(
- struct mali_session_data *session, struct mali_pp_job *job)
+static _mali_osk_errcode_t mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job, mali_timeline_point *point)
+
{
- mali_timeline_point point;
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct ww_acquire_ctx ww_actx;
+ u32 i;
+ u32 num_memory_cookies = 0;
+ struct reservation_object **reservation_object_list = NULL;
+ unsigned int num_reservation_object = 0;
+#endif
MALI_DEBUG_ASSERT_POINTER(session);
MALI_DEBUG_ASSERT_POINTER(job);
mali_pp_job_fb_lookup_add(job);
mali_scheduler_unlock();
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+
+ /* Allocate the reservation_object_list to list the dma reservation object of dependent dma buffer */
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
+ if (0 < num_memory_cookies) {
+ reservation_object_list = kzalloc(sizeof(struct reservation_object *) * num_memory_cookies, GFP_KERNEL);
+ if (NULL == reservation_object_list) {
+ MALI_PRINT_ERROR(("Failed to alloc the reservation object list.\n"));
+ ret = _MALI_OSK_ERR_NOMEM;
+ goto failed_to_alloc_reservation_object_list;
+ }
+ }
+
+ /* Add the dma reservation object into reservation_object_list*/
+ for (i = 0; i < num_memory_cookies; i++) {
+ mali_mem_backend *mem_backend = NULL;
+ struct reservation_object *tmp_reservation_object = NULL;
+ u32 mali_addr = mali_pp_job_get_memory_cookie(job, i);
+
+ mem_backend = mali_mem_backend_struct_search(session, mali_addr);
+
+ MALI_DEBUG_ASSERT_POINTER(mem_backend);
+
+ if (NULL == mem_backend) {
+ MALI_PRINT_ERROR(("Failed to find the memory backend for memory cookie[%d].\n", i));
+ goto failed_to_find_mem_backend;
+ }
+
+ if (MALI_MEM_DMA_BUF != mem_backend->type)
+ continue;
+
+ tmp_reservation_object = mem_backend->dma_buf.attachment->buf->resv;
+
+ if (NULL != tmp_reservation_object) {
+ mali_dma_fence_add_reservation_object_list(tmp_reservation_object,
+ reservation_object_list, &num_reservation_object);
+ }
+ }
+
+ /*
+ * Add the mali dma fence callback to wait for all dependent dma buf,
+ * and extend the timeline system to support dma fence,
+ * then create the new internal dma fence to replace all last dma fence for dependent dma buf.
+ */
+ if (0 < num_reservation_object) {
+ int error;
+ int num_dma_fence_waiter = 0;
+ /* Create one new dma fence.*/
+ job->rendered_dma_fence = mali_dma_fence_new(job->session->fence_context,
+ _mali_osk_atomic_inc_return(&job->session->fence_seqno));
+
+ if (NULL == job->rendered_dma_fence) {
+ MALI_PRINT_ERROR(("Failed to creat one new dma fence.\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ goto failed_to_create_dma_fence;
+ }
+
+ /* In order to avoid deadlock, wait/wound mutex lock to lock all dma buffers*/
+
+ error = mali_dma_fence_lock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+
+ if (0 != error) {
+ MALI_PRINT_ERROR(("Failed to lock all reservation objects.\n"));
+ ret = _MALI_OSK_ERR_FAULT;
+ goto failed_to_lock_reservation_object_list;
+ }
+
+ mali_dma_fence_context_init(&job->dma_fence_context,
+ mali_timeline_dma_fence_callback, (void *)job);
+
+ /* Add dma fence waiters and dma fence callback. */
+ for (i = 0; i < num_reservation_object; i++) {
+ ret = mali_dma_fence_context_add_waiters(&job->dma_fence_context, reservation_object_list[i]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to add waiter into mali dma fence context.\n"));
+ goto failed_to_add_dma_fence_waiter;
+ }
+ }
+
+ for (i = 0; i < num_reservation_object; i++) {
+ reservation_object_add_excl_fence(reservation_object_list[i], job->rendered_dma_fence);
+ }
+
+ num_dma_fence_waiter = job->dma_fence_context.num_dma_fence_waiter;
+
+ /* Add job to Timeline system. */
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+ if (0 != num_dma_fence_waiter) {
+ mali_dma_fence_context_dec_count(&job->dma_fence_context);
+ }
+
+ /* Unlock all wait/wound mutex lock. */
+ mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+ } else {
+ /* Add job to Timeline system. */
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+ }
+
+ kfree(reservation_object_list);
+ return ret;
+#else
/* Add job to Timeline system. */
- point = mali_timeline_system_add_tracker(session->timeline_system,
+ (*point) = mali_timeline_system_add_tracker(session->timeline_system,
mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+#endif
- return point;
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+failed_to_add_dma_fence_waiter:
+ mali_dma_fence_context_term(&job->dma_fence_context);
+ mali_dma_fence_unlock_reservation_object_list(reservation_object_list,
+ num_reservation_object, &ww_actx);
+failed_to_lock_reservation_object_list:
+ mali_dma_fence_signal_and_put(&job->rendered_dma_fence);
+failed_to_create_dma_fence:
+failed_to_find_mem_backend:
+ if (NULL != reservation_object_list)
+ kfree(reservation_object_list);
+failed_to_alloc_reservation_object_list:
+ mali_pp_job_fb_lookup_remove(job);
+#endif
+ return ret;
}
static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
struct mali_pp_job, list) {
_mali_osk_list_delinit(&job->list);
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ mali_dma_fence_context_term(&job->dma_fence_context);
+#endif
+
mali_pp_job_delete(job); /* delete the job object itself */
}
}
/*
- * Copyright (C) 2012-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
return mali_session_count;
}
+mali_bool mali_session_pp_job_is_empty(void *data)
+{
+ struct mali_session_data *session = (struct mali_session_data *)data;
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if ( 0 == _mali_osk_atomic_read(&session->number_of_pp_jobs)) {
+ return MALI_TRUE;
+ }
+ return MALI_FALSE;
+}
+
wait_queue_head_t *mali_session_get_wait_queue(void)
{
return &pending_queue;
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
struct mali_session_data {
_mali_osk_notification_queue_t *ioctl_queue;
+ _mali_osk_wait_queue_t *wait_queue; /**The wait queue to wait for the number of pp job become 0.*/
+
_mali_osk_mutex_t *memory_lock; /**< Lock protecting the vm manipulation */
_mali_osk_mutex_t *cow_lock; /** < Lock protecting the cow memory free manipulation */
#if 0
#if defined(CONFIG_MALI_DVFS)
_mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
#endif
+ _mali_osk_atomic_t number_of_pp_jobs; /** < Record the pp jobs on this session */
_mali_osk_list_t pp_job_fb_lookup_list[MALI_PP_JOB_FB_LOOKUP_LIST_SIZE]; /**< List of PP job lists per frame builder id. Used to link jobs from same frame builder. */
-
struct mali_soft_job_system *soft_job_system; /**< Soft job system for this session. */
struct mali_timeline_system *timeline_system; /**< Timeline system for this session. */
size_t max_mali_mem_allocated_size; /**< The past max mali memory allocated size, which include mali os memory and mali dedicated memory. */
/* Added for new memroy system */
struct mali_allocation_manager allocation_mgr;
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ u32 fence_context; /** < The execution dma fence context this fence is run on. */
+ _mali_osk_atomic_t fence_seqno; /** < Alinear increasing sequence number for this dma fence context. */
+#endif
};
_mali_osk_errcode_t mali_session_initialize(void);
void mali_session_add(struct mali_session_data *session);
void mali_session_remove(struct mali_session_data *session);
u32 mali_session_get_count(void);
+mali_bool mali_session_pp_job_is_empty(void *data);
wait_queue_head_t *mali_session_get_wait_queue(void);
#define MALI_SESSION_FOREACH(session, tmp, link) \
return system;
}
-#if defined(CONFIG_SYNC)
-
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC)
/**
* Check if there are any trackers left on timeline.
*
return mali_timeline_is_empty(timeline);
}
-
+#if defined(CONFIG_SYNC)
/**
* Cancel sync fence waiters waited upon by trackers on all timelines.
*
#endif /* defined(CONFIG_SYNC) */
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+static void mali_timeline_cancel_dma_fence_waiters(struct mali_timeline_system *system)
+{
+ u32 i, j;
+ u32 tid = _mali_osk_get_tid();
+ struct mali_pp_job *pp_job = NULL;
+ struct mali_pp_job *next_pp_job = NULL;
+ struct mali_timeline *timeline = NULL;
+ struct mali_timeline_tracker *tracker, *tracker_next;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(pp_job_list);
+
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+ MALI_DEBUG_ASSERT(system->session->is_aborting);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ /* Cancel dma fence waiters. */
+ timeline = system->timelines[MALI_TIMELINE_PP];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+
+ tracker_next = timeline->tracker_tail;
+ while (NULL != tracker_next) {
+ mali_bool fence_is_signaled = MALI_TRUE;
+ tracker = tracker_next;
+ tracker_next = tracker->timeline_next;
+
+ if (NULL == tracker->waiter_dma_fence) continue;
+ pp_job = (struct mali_pp_job *)tracker->job;
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_PRINT(3, ("Mali Timeline: Cancelling dma fence waiter for tracker 0x%08X.\n", tracker));
+
+ for (j = 0; j < pp_job->dma_fence_context.num_dma_fence_waiter; j++) {
+ if (pp_job->dma_fence_context.mali_dma_fence_waiters[j]) {
+ /* Cancel a previously callback from the fence.
+ * This function returns true if the callback is successfully removed,
+ * or false if the fence has already been signaled.
+ */
+ bool ret = fence_remove_callback(pp_job->dma_fence_context.mali_dma_fence_waiters[j]->fence,
+ &pp_job->dma_fence_context.mali_dma_fence_waiters[j]->base);
+ if (ret) {
+ fence_is_signaled = MALI_FALSE;
+ }
+ }
+ }
+
+ /* Callbacks were not called, move pp job to local list. */
+ if (MALI_FALSE == fence_is_signaled)
+ _mali_osk_list_add(&pp_job->list, &pp_job_list);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ /* Manually call dma fence callback in order to release waiter and trigger activation of tracker. */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, next_pp_job, &pp_job_list, struct mali_pp_job, list) {
+ mali_timeline_dma_fence_callback((void *)pp_job);
+ }
+
+ /* Sleep until all dma fence callbacks are done and all timelines are empty. */
+ for (i = 0; i < MALI_TIMELINE_MAX; ++i) {
+ struct mali_timeline *timeline = system->timelines[i];
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ _mali_osk_wait_queue_wait_event(system->wait_queue, mali_timeline_has_no_trackers, (void *) timeline);
+ }
+}
+#endif
+#endif
void mali_timeline_system_abort(struct mali_timeline_system *system)
{
MALI_DEBUG_CODE(u32 tid = _mali_osk_get_tid(););
mali_timeline_cancel_sync_fence_waiters(system);
#endif /* defined(CONFIG_SYNC) */
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ mali_timeline_cancel_dma_fence_waiters(system);
+#endif
+
/* Should not be any waiters or trackers left at this point. */
MALI_DEBUG_CODE({
u32 i;
sync_fence = NULL;
}
+#endif /* defined(CONFIG_SYNC)*/
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if ((NULL != tracker->timeline) && (MALI_TIMELINE_PP == tracker->timeline->id)) {
+
+ struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+
+ if (0 < job->dma_fence_context.num_dma_fence_waiter) {
+ struct mali_timeline_waiter *waiter;
+ /* Check if we have a zeroed waiter object available. */
+ if (unlikely(NULL == waiter_tail)) {
+ MALI_PRINT_ERROR(("Mali Timeline: failed to allocate memory for waiter\n"));
+ goto exit;
+ }
+
+ /* Grab new zeroed waiter object. */
+ waiter = waiter_tail;
+ waiter_tail = waiter_tail->tracker_next;
+
+ /* Increase the trigger ref count of the tracker. */
+ tracker->trigger_ref_count++;
+
+ waiter->point = MALI_TIMELINE_NO_POINT;
+ waiter->tracker = tracker;
+
+ /* Insert waiter on tracker's singly-linked waiter list. */
+ if (NULL == tracker->waiter_head) {
+ /* list is empty */
+ MALI_DEBUG_ASSERT(NULL == tracker->waiter_tail);
+ tracker->waiter_tail = waiter;
+ } else {
+ tracker->waiter_head->tracker_next = waiter;
+ }
+ tracker->waiter_head = waiter;
+
+ /* Also store waiter in separate field for easy access by sync callback. */
+ tracker->waiter_dma_fence = waiter;
+ }
+ }
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE)*/
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE) ||defined(CONFIG_SYNC)
exit:
-#endif /* defined(CONFIG_SYNC) */
+#endif /* defined(CONFIG_MALI_DMA_BUF_FENCE) || defined(CONFIG_SYNC) */
if (NULL != waiter_tail) {
mali_timeline_system_release_waiter_list(system, waiter_tail, waiter_head);
int num_waiters = 0;
struct mali_timeline_waiter *waiter_tail, *waiter_head;
u32 tid = _mali_osk_get_tid();
+
mali_timeline_point point = MALI_TIMELINE_NO_POINT;
MALI_DEBUG_ASSERT_POINTER(system);
num_waiters = mali_timeline_fence_num_waiters(&tracker->fence);
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ if (MALI_TIMELINE_PP == timeline_id) {
+ struct mali_pp_job *job = (struct mali_pp_job *)tracker->job;
+ if (0 < job->dma_fence_context.num_dma_fence_waiter)
+ num_waiters++;
+ }
+#endif
+
/* Allocate waiters. */
mali_timeline_system_allocate_waiters(system, &waiter_tail, &waiter_head, num_waiters);
MALI_DEBUG_ASSERT(MALI_TIMELINE_SYSTEM_LOCKED(system));
}
#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+void mali_timeline_dma_fence_callback(void *pp_job_ptr)
+{
+ struct mali_timeline_system *system;
+ struct mali_timeline_waiter *waiter;
+ struct mali_timeline_tracker *tracker;
+ struct mali_pp_job *pp_job = (struct mali_pp_job *)pp_job_ptr;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+ u32 tid = _mali_osk_get_tid();
+ mali_bool is_aborting = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+
+ tracker = &pp_job->tracker;
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+
+ system = tracker->system;
+ MALI_DEBUG_ASSERT_POINTER(system);
+ MALI_DEBUG_ASSERT_POINTER(system->session);
+
+ mali_spinlock_reentrant_wait(system->spinlock, tid);
+
+ waiter = tracker->waiter_dma_fence;
+ MALI_DEBUG_ASSERT_POINTER(waiter);
+
+ schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
+
+ is_aborting = system->session->is_aborting;
+
+ /* If aborting, wake up sleepers that are waiting for dma fence callbacks to complete. */
+ if (is_aborting) {
+ _mali_osk_wait_queue_wake_up(system->wait_queue);
+ }
+
+ mali_spinlock_reentrant_signal(system->spinlock, tid);
+
+ if (!is_aborting) {
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
+ }
+}
+#endif
_mali_osk_list_t sync_fence_cancel_list; /**< List node used to cancel sync fence waiters. */
#endif /* defined(CONFIG_SYNC) */
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+ struct mali_timeline_waiter *waiter_dma_fence; /**< A direct pointer to timeline waiter representing dma fence. */
+#endif
+
struct mali_timeline_system *system; /**< Timeline system. */
struct mali_timeline *timeline; /**< Timeline, or NULL if not on a timeline. */
enum mali_timeline_tracker_type type; /**< Type of tracker. */
#endif /* defined(MALI_TIMELINE_DEBUG_FUNCTIONS) */
+#if defined(CONFIG_MALI_DMA_BUF_FENCE)
+/**
+ * The timeline dma fence callback when dma fence signal.
+ *
+ * @param pp_job_ptr The pointer to pp job that link to the signaled dma fence.
+ */
+void mali_timeline_dma_fence_callback(void *pp_job_ptr);
+#endif
+
#endif /* __MALI_TIMELINE_H__ */
int (*secure_mode_init)(void);
/* Function that deinit the mali gpu secure mode */
void (*secure_mode_deinit)(void);
- /* Function that enable the mali gpu secure mode */
- int (*secure_mode_enable)(void);
- /* Function that disable the mali gpu secure mode */
- int (*secure_mode_disable)(void);
+ /* Function that reset GPU and enable gpu secure mode */
+ int (*gpu_reset_and_secure_mode_enable)(void);
+ /* Function that Reset GPU and disable gpu secure mode */
+ int (*gpu_reset_and_secure_mode_disable)(void);
/* ipa related interface customer need register */
#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
struct devfreq_cooling_power *gpu_cooling_ops;
* If you do not wish to do so, delete this exception statement from your version.
*/
-
#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
#define _MALI_UTGARD_PROFILING_EVENTS_H_
u32 flags;
u64 backend_handle; /**< [out] backend handle */
s32 secure_shared_fd; /** < [in] the mem handle for secure mem */
- struct {
- /* buffer types*/
- /* CPU read/write info*/
- } buffer_info;
} _mali_uk_alloc_mem_s;
u32 rights; /**< [in] rights necessary for accessing memory */
u32 flags; /**< [in] flags, see \ref _MALI_MAP_EXTERNAL_MAP_GUARD_PAGE */
} bind_dma_buf;
- struct {
- /**/
- } bind_mali_memory;
struct {
u32 phys_addr; /**< [in] physical address */
u32 rights; /**< [in] rights necessary for accessing memory */
--- /dev/null
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+#include <linux/version.h>
+#include "mali_osk.h"
+#include "mali_kernel_common.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#include "mali_dma_fence.h"
+#include <linux/atomic.h>
+#include <linux/workqueue.h>
+#endif
+
+static DEFINE_SPINLOCK(mali_dma_fence_lock);
+
+static bool mali_dma_fence_enable_signaling(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return true;
+}
+
+static const char *mali_dma_fence_get_driver_name(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali";
+}
+
+static const char *mali_dma_fence_get_timeline_name(struct fence *fence)
+{
+ MALI_IGNORE(fence);
+ return "mali_dma_fence";
+}
+
+static const struct fence_ops mali_dma_fence_ops = {
+ .get_driver_name = mali_dma_fence_get_driver_name,
+ .get_timeline_name = mali_dma_fence_get_timeline_name,
+ .enable_signaling = mali_dma_fence_enable_signaling,
+ .signaled = NULL,
+ .wait = fence_default_wait,
+ .release = NULL
+};
+
+static void mali_dma_fence_context_cleanup(struct mali_dma_fence_context *dma_fence_context)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ for (i = 0; i < dma_fence_context->num_dma_fence_waiter; i++) {
+ if (dma_fence_context->mali_dma_fence_waiters[i]) {
+ fence_remove_callback(dma_fence_context->mali_dma_fence_waiters[i]->fence,
+ &dma_fence_context->mali_dma_fence_waiters[i]->base);
+ fence_put(dma_fence_context->mali_dma_fence_waiters[i]->fence);
+ kfree(dma_fence_context->mali_dma_fence_waiters[i]);
+ dma_fence_context->mali_dma_fence_waiters[i] = NULL;
+ }
+ }
+
+ if (NULL != dma_fence_context->mali_dma_fence_waiters)
+ kfree(dma_fence_context->mali_dma_fence_waiters);
+
+ dma_fence_context->mali_dma_fence_waiters = NULL;
+ dma_fence_context->num_dma_fence_waiter = 0;
+}
+
+static void mali_dma_fence_context_work_func(struct work_struct *work_handle)
+{
+ struct mali_dma_fence_context *dma_fence_context;
+
+ MALI_DEBUG_ASSERT_POINTER(work_handle);
+
+ dma_fence_context = container_of(work_handle, struct mali_dma_fence_context, work_handle);
+
+ dma_fence_context->cb_func(dma_fence_context->pp_job_ptr);
+}
+
+static void mali_dma_fence_callback(struct fence *fence, struct fence_cb *cb)
+{
+ struct mali_dma_fence_waiter *dma_fence_waiter = NULL;
+ struct mali_dma_fence_context *dma_fence_context = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(cb);
+
+ MALI_IGNORE(fence);
+
+ dma_fence_waiter = container_of(cb, struct mali_dma_fence_waiter, base);
+ dma_fence_context = dma_fence_waiter->parent;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ if (atomic_dec_and_test(&dma_fence_context->count))
+ schedule_work(&dma_fence_context->work_handle);
+}
+
+static _mali_osk_errcode_t mali_dma_fence_add_callback(struct mali_dma_fence_context *dma_fence_context, struct fence *fence)
+{
+ int ret = 0;
+ struct mali_dma_fence_waiter *dma_fence_waiter;
+ struct mali_dma_fence_waiter **dma_fence_waiters;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ MALI_DEBUG_ASSERT_POINTER(fence);
+
+ dma_fence_waiters = krealloc(dma_fence_context->mali_dma_fence_waiters,
+ (dma_fence_context->num_dma_fence_waiter + 1)
+ * sizeof(struct mali_dma_fence_waiter *),
+ GFP_KERNEL);
+
+ if (NULL == dma_fence_waiters) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to realloc the dma fence waiters.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ dma_fence_context->mali_dma_fence_waiters = dma_fence_waiters;
+
+ dma_fence_waiter = kzalloc(sizeof(struct mali_dma_fence_waiter), GFP_KERNEL);
+
+ if (NULL == dma_fence_waiter) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create mali dma fence waiter.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ fence_get(fence);
+
+ dma_fence_waiter->fence = fence;
+ dma_fence_waiter->parent = dma_fence_context;
+ atomic_inc(&dma_fence_context->count);
+
+ ret = fence_add_callback(fence, &dma_fence_waiter->base,
+ mali_dma_fence_callback);
+ if (0 > ret) {
+ fence_put(fence);
+ kfree(dma_fence_waiter);
+ atomic_dec(&dma_fence_context->count);
+ if (-ENOENT == ret) {
+ /*-ENOENT if fence has already been signaled, return _MALI_OSK_ERR_OK*/
+ return _MALI_OSK_ERR_OK;
+ }
+ /* Failed to add the fence callback into fence, return _MALI_OSK_ERR_FAULT*/
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into fence.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ dma_fence_context->mali_dma_fence_waiters[dma_fence_context->num_dma_fence_waiter] = dma_fence_waiter;
+ dma_fence_context->num_dma_fence_waiter++;
+
+ return _MALI_OSK_ERR_OK;
+}
+
+
+struct fence *mali_dma_fence_new(u32 context, u32 seqno)
+{
+ struct fence *fence = NULL;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+
+ if (NULL == fence) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to create dma fence.\n"));
+ return fence;
+ }
+
+ fence_init(fence,
+ &mali_dma_fence_ops,
+ &mali_dma_fence_lock,
+ context, seqno);
+
+ return fence;
+}
+
+void mali_dma_fence_signal_and_put(struct fence **fence)
+{
+ MALI_DEBUG_ASSERT_POINTER(fence);
+ MALI_DEBUG_ASSERT_POINTER(*fence);
+
+ fence_signal(*fence);
+ fence_put(*fence);
+ *fence = NULL;
+}
+
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+ mali_dma_fence_context_callback_func_t cb_func,
+ void *pp_job_ptr)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ INIT_WORK(&dma_fence_context->work_handle, mali_dma_fence_context_work_func);
+ atomic_set(&dma_fence_context->count, 1);
+ dma_fence_context->num_dma_fence_waiter = 0;
+ dma_fence_context->mali_dma_fence_waiters = NULL;
+ dma_fence_context->cb_func = cb_func;
+ dma_fence_context->pp_job_ptr = pp_job_ptr;
+}
+
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+ struct reservation_object *dma_reservation_object)
+{
+ _mali_osk_errcode_t ret = _MALI_OSK_ERR_OK;
+ struct fence *exclusive_fence = NULL;
+ u32 shared_count = 0, i;
+ struct fence **shared_fences = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+
+ /* Get all the shared/exclusive fences in the reservation object of dma buf*/
+ ret = reservation_object_get_fences_rcu(dma_reservation_object, &exclusive_fence,
+ &shared_count, &shared_fences);
+ if (ret < 0) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to get shared or exclusive_fence dma fences from the reservation object of dma buf.\n"));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (exclusive_fence) {
+ ret = mali_dma_fence_add_callback(dma_fence_context, exclusive_fence);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into exclusive fence.\n"));
+ mali_dma_fence_context_cleanup(dma_fence_context);
+ goto ended;
+ }
+ }
+
+
+ for (i = 0; i < shared_count; i++) {
+ ret = mali_dma_fence_add_callback(dma_fence_context, shared_fences[i]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to add callback into shared fence [%d].\n", i));
+ mali_dma_fence_context_cleanup(dma_fence_context);
+ break;
+ }
+ }
+
+ended:
+
+ if (exclusive_fence)
+ fence_put(exclusive_fence);
+
+ if (shared_fences) {
+ for (i = 0; i < shared_count; i++) {
+ fence_put(shared_fences[i]);
+ }
+ kfree(shared_fences);
+ }
+
+ return ret;
+}
+
+
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+ atomic_set(&dma_fence_context->count, 0);
+ if (dma_fence_context->work_handle.func) {
+ cancel_work_sync(&dma_fence_context->work_handle);
+ }
+ mali_dma_fence_context_cleanup(dma_fence_context);
+}
+
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context)
+{
+ MALI_DEBUG_ASSERT_POINTER(dma_fence_context);
+
+ if (atomic_dec_and_test(&dma_fence_context->count))
+ schedule_work(&dma_fence_context->work_handle);
+}
+
+
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+ struct reservation_object **dma_reservation_object_list,
+ u32 *num_dma_reservation_object)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object);
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+ MALI_DEBUG_ASSERT_POINTER(num_dma_reservation_object);
+
+ for (i = 0; i < *num_dma_reservation_object; i++) {
+ if (dma_reservation_object_list[i] == dma_reservation_object)
+ return;
+ }
+
+ dma_reservation_object_list[*num_dma_reservation_object] = dma_reservation_object;
+ (*num_dma_reservation_object)++;
+}
+
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+ u32 i;
+
+ struct reservation_object *reservation_object_to_slow_lock = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(dma_reservation_object_list);
+ MALI_DEBUG_ASSERT_POINTER(ww_actx);
+
+ ww_acquire_init(ww_actx, &reservation_ww_class);
+
+again:
+ for (i = 0; i < num_dma_reservation_object; i++) {
+ int ret;
+
+ if (dma_reservation_object_list[i] == reservation_object_to_slow_lock) {
+ reservation_object_to_slow_lock = NULL;
+ continue;
+ }
+
+ ret = ww_mutex_lock(&dma_reservation_object_list[i]->lock, ww_actx);
+
+ if (ret < 0) {
+ u32 slow_lock_index = i;
+
+ /* unlock all pre locks we have already locked.*/
+ while (i > 0) {
+ i--;
+ ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+ }
+
+ if (NULL != reservation_object_to_slow_lock)
+ ww_mutex_unlock(&reservation_object_to_slow_lock->lock);
+
+ if (ret == -EDEADLK) {
+ reservation_object_to_slow_lock = dma_reservation_object_list[slow_lock_index];
+ ww_mutex_lock_slow(&reservation_object_to_slow_lock->lock, ww_actx);
+ goto again;
+ }
+ ww_acquire_fini(ww_actx);
+ MALI_DEBUG_PRINT(1, ("Mali dma fence: failed to lock all dma reservation objects.\n", i));
+ return ret;
+ }
+ }
+
+ ww_acquire_done(ww_actx);
+ return 0;
+}
+
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx)
+{
+ u32 i;
+
+ for (i = 0; i < num_dma_reservation_object; i++)
+ ww_mutex_unlock(&dma_reservation_object_list[i]->lock);
+
+ ww_acquire_fini(ww_actx);
+}
--- /dev/null
+/*
+ * Copyright (C) 2012-2016 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file mali_dma_fence.h
+ *
+ * Mali interface for Linux dma buf fence objects.
+ */
+
+#ifndef _MALI_DMA_FENCE_H_
+#define _MALI_DMA_FENCE_H_
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0)
+#include <linux/fence.h>
+#include <linux/reservation.h>
+#endif
+
+struct mali_dma_fence_context;
+
+/* The mali dma fence context callback function */
+typedef void (*mali_dma_fence_context_callback_func_t)(void *pp_job_ptr);
+
+struct mali_dma_fence_waiter {
+ struct fence_cb base;
+ struct mali_dma_fence_context *parent;
+ struct fence *fence;
+};
+
+struct mali_dma_fence_context {
+ struct work_struct work_handle;
+ struct mali_dma_fence_waiter **mali_dma_fence_waiters;
+ u32 num_dma_fence_waiter;
+ atomic_t count;
+ void *pp_job_ptr; /* the mali pp job pointer */;
+ mali_dma_fence_context_callback_func_t cb_func;
+};
+
+/* Create a dma fence
+ * @param context The execution context this fence is run on
+ * @param seqno A linearly increasing sequence number for this context
+ * @return the new dma fence if success, or NULL on failure.
+ */
+struct fence *mali_dma_fence_new(u32 context, u32 seqno);
+
+/* Signal and put dma fence
+ * @param fence The dma fence to signal and put
+ */
+void mali_dma_fence_signal_and_put(struct fence **fence);
+
+/**
+ * Initialize a mali dma fence context for pp job.
+ * @param dma_fence_context The mali dma fence context to initialize.
+ * @param cb_func The dma fence context callback function to call when all dma fence release.
+ * @param pp_job_ptr The pp_job to call function with.
+ */
+void mali_dma_fence_context_init(struct mali_dma_fence_context *dma_fence_context,
+ mali_dma_fence_context_callback_func_t cb_func,
+ void *pp_job_ptr);
+
+/**
+ * Add new mali dma fence waiter into mali dma fence context
+ * @param dma_fence_context The mali dma fence context
+ * @param dma_reservation_object the reservation object to create new mali dma fence waiters
+ * @return _MALI_OSK_ERR_OK if success, or not.
+ */
+_mali_osk_errcode_t mali_dma_fence_context_add_waiters(struct mali_dma_fence_context *dma_fence_context,
+ struct reservation_object *dma_reservation_object);
+
+/**
+ * Release the dma fence context
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_term(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Decrease the dma fence context atomic count
+ * @param dma_fence_text The mali dma fence context.
+ */
+void mali_dma_fence_context_dec_count(struct mali_dma_fence_context *dma_fence_context);
+
+/**
+ * Get all reservation object
+ * @param dma_reservation_object The reservation object to add into the reservation object list
+ * @param dma_reservation_object_list The reservation object list to store all reservation object
+ * @param num_dma_reservation_object The number of all reservation object
+ */
+void mali_dma_fence_add_reservation_object_list(struct reservation_object *dma_reservation_object,
+ struct reservation_object **dma_reservation_object_list,
+ u32 *num_dma_reservation_object);
+
+/**
+ * Wait/wound mutex lock to lock all reservation object.
+ */
+int mali_dma_fence_lock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+
+/**
+ * Wait/wound mutex lock to unlock all reservation object.
+ */
+void mali_dma_fence_unlock_reservation_object_list(struct reservation_object **dma_reservation_object_list,
+ u32 num_dma_reservation_object, struct ww_acquire_ctx *ww_actx);
+#endif
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
session_data->cow_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
if (NULL == session_data->cow_lock) {
_mali_osk_mutex_term(session_data->memory_lock);
- _mali_osk_free(session_data);
MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
/*\r
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*\r
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*\r
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/* Allocate new pages, if needed. */
for (i = 0; i < remaining; i++) {
dma_addr_t dma_addr;
- gfp_t flags = __GFP_ZERO | __GFP_NORETRY | __GFP_NOWARN | __GFP_COLD;
+ gfp_t flags = __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN | __GFP_COLD;
int err;
#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
/*
- * Copyright (C) 2010, 2013, 2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010, 2013, 2015-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2013-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2013-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
/* Function that init the mali gpu secure mode */
void (*mali_secure_mode_deinit)(void) = NULL;
-/* Function that enable the mali gpu secure mode */
-int (*mali_secure_mode_enable)(void) = NULL;
-/* Function that disable the mali gpu secure mode */
-int (*mali_secure_mode_disable)(void) = NULL;
+/* Function that reset GPU and enable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_enable)(void) = NULL;
+/* Function that reset GPU and disable the mali gpu secure mode */
+int (*mali_gpu_reset_and_secure_mode_disable)(void) = NULL;
#ifdef CONFIG_MALI_DT
if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
if ((NULL != data.secure_mode_init) && (NULL != data.secure_mode_deinit)
- && (NULL != data.secure_mode_enable) && (NULL != data.secure_mode_disable)) {
+ && (NULL != data.gpu_reset_and_secure_mode_enable) && (NULL != data.gpu_reset_and_secure_mode_disable)) {
int err = data.secure_mode_init();
if (err) {
MALI_DEBUG_PRINT(1, ("Failed to init gpu secure mode.\n"));
}
mali_secure_mode_deinit = data.secure_mode_deinit;
- mali_secure_mode_enable = data.secure_mode_enable;
- mali_secure_mode_disable = data.secure_mode_disable;
+ mali_gpu_reset_and_secure_mode_enable = data.gpu_reset_and_secure_mode_enable;
+ mali_gpu_reset_and_secure_mode_disable = data.gpu_reset_and_secure_mode_disable;
mali_secure_mode_supported = MALI_TRUE;
mali_secure_mode_enabled = MALI_FALSE;
}
-_mali_osk_errcode_t _mali_osk_gpu_secure_mode_enable(void)
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_enable(void)
{
/* the mali executor lock must be held before enter this function. */
MALI_DEBUG_ASSERT(MALI_FALSE == mali_secure_mode_enabled);
- if (NULL != mali_secure_mode_enable) {
- if (mali_secure_mode_enable()) {
- MALI_DEBUG_PRINT(1, ("Failed to enable gpu secure mode.\n"));
+ if (NULL != mali_gpu_reset_and_secure_mode_enable) {
+ if (mali_gpu_reset_and_secure_mode_enable()) {
+ MALI_DEBUG_PRINT(1, ("Failed to reset GPU or enable gpu secure mode.\n"));
return _MALI_OSK_ERR_FAULT;
}
mali_secure_mode_enabled = MALI_TRUE;
return _MALI_OSK_ERR_UNSUPPORTED;
}
-_mali_osk_errcode_t _mali_osk_gpu_secure_mode_disable(void)
+_mali_osk_errcode_t _mali_osk_gpu_reset_and_secure_mode_disable(void)
{
/* the mali executor lock must be held before enter this function. */
MALI_DEBUG_ASSERT(MALI_TRUE == mali_secure_mode_enabled);
- if (NULL != mali_secure_mode_disable) {
- if (mali_secure_mode_disable()) {
- MALI_DEBUG_PRINT(1, ("Failed to disable gpu secure mode.\n"));
+ if (NULL != mali_gpu_reset_and_secure_mode_disable) {
+ if (mali_gpu_reset_and_secure_mode_disable()) {
+ MALI_DEBUG_PRINT(1, ("Failed to reset GPU or disable gpu secure mode.\n"));
return _MALI_OSK_ERR_FAULT;
}
mali_secure_mode_enabled = MALI_FALSE;
/**
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
{
s32 fd = -1;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0)
fd = get_unused_fd();
+#else
+ fd = get_unused_fd_flags(0);
+#endif
+
if (fd < 0) {
sync_fence_put(sync_fence);
return -1;
/*
- * Copyright (C) 2010-2015 ARM Limited. All rights reserved.
+ * Copyright (C) 2010-2016 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
#define SECURE_MODE_CONTROL_HANDLER 0x6F02006C
void *secure_mode_mapped_addr = NULL;
/**
- * Enable/Disable Mali secure mode.
+ * Reset GPU and enable/disable Mali secure mode.
* @Return value:
* 0: success
* non-0: failure.
*/
-static int mali_secure_mode_enable_juno(void)
+static int mali_gpu_reset_and_secure_mode_enable_juno(void)
{
u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF;
MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr);
iowrite32(1, ((u8 *)secure_mode_mapped_addr) + phys_offset);
if (1 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) {
- MALI_DEBUG_PRINT(3, ("Mali enables secured mode successfully! \n"));
+ MALI_DEBUG_PRINT(3, ("Mali reset GPU and enable secured mode successfully! \n"));
return 0;
}
- MALI_PRINT_ERROR(("Failed to enable Mali secured mode !!! \n"));
+ MALI_PRINT_ERROR(("Failed to reset GPU and enable Mali secured mode !!! \n"));
return -1;
}
-static int mali_secure_mode_disable_juno(void)
+static int mali_gpu_reset_and_secure_mode_disable_juno(void)
{
u32 phys_offset = SECURE_MODE_CONTROL_HANDLER & 0x00001FFF;
MALI_DEBUG_ASSERT(NULL != secure_mode_mapped_addr);
iowrite32(0, ((u8 *)secure_mode_mapped_addr) + phys_offset);
if (0 == (u32)ioread32(((u8 *)secure_mode_mapped_addr) + phys_offset)) {
- MALI_DEBUG_PRINT(3, ("Mali disable secured mode successfully! \n"));
+ MALI_DEBUG_PRINT(3, ("Mali reset GPU and disable secured mode successfully! \n"));
return 0;
}
- MALI_PRINT_ERROR(("Failed to disable mali secured mode !!! \n"));
+ MALI_PRINT_ERROR(("Failed to reset GPU and disable mali secured mode !!! \n"));
return -1;
}
secure_mode_mapped_addr = ioremap_nocache(phys_addr_page, map_size);
if (NULL != secure_mode_mapped_addr) {
- return mali_secure_mode_disable_juno();
+ return mali_gpu_reset_and_secure_mode_disable_juno();
}
MALI_DEBUG_PRINT(2, ("Failed to ioremap for Mali secured mode! \n"));
return -1;
static void mali_secure_mode_deinit_juno(void)
{
if (NULL != secure_mode_mapped_addr) {
- mali_secure_mode_disable_juno();
+ mali_gpu_reset_and_secure_mode_disable_juno();
iounmap(secure_mode_mapped_addr);
secure_mode_mapped_addr = NULL;
}
#if defined(CONFIG_ARCH_VEXPRESS) && defined(CONFIG_ARM64)
.secure_mode_init = mali_secure_mode_init_juno,
.secure_mode_deinit = mali_secure_mode_deinit_juno,
- .secure_mode_enable = mali_secure_mode_enable_juno,
- .secure_mode_disable = mali_secure_mode_disable_juno,
+ .gpu_reset_and_secure_mode_enable = mali_gpu_reset_and_secure_mode_enable_juno,
+ .gpu_reset_and_secure_mode_disable = mali_gpu_reset_and_secure_mode_disable_juno,
#else
.secure_mode_init = NULL,
.secure_mode_deinit = NULL,
- .secure_mode_enable = NULL,
- .secure_mode_disable = NULL,
+ .gpu_reset_and_secure_mode_enable = NULL,
+ .gpu_reset_and_secure_mode_disable = NULL,
#endif
#if defined(CONFIG_MALI_DEVFREQ) && defined(CONFIG_DEVFREQ_THERMAL)
.gpu_cooling_ops = &arm_cooling_ops,
#if defined(CONFIG_ARCH_VEXPRESS)
#if defined(CONFIG_ARM64)
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+ mali_gpu_device.dev.archdata.dma_ops = &dummy_dma_ops;
+#else
mali_gpu_device.dev.archdata.dma_ops = dma_ops;
+#endif
if ((mali_read_phys(0x6F000000) & 0x00600450) == 0x00600450) {
MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
num_pp_cores = 6;
if (mem->is_cached) {
descriptor->is_cached = 1;
- args->is_cached = 1;
DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
} else {
descriptor->is_cached = 0;
- args->is_cached = 0;
DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
}
break;
case UMP_IOC_DMABUF_IMPORT:
- #ifdef CONFIG_DMA_SHARED_BUFFER
+#ifdef CONFIG_DMA_SHARED_BUFFER
err = ump_dmabuf_import_wrapper((u32 __user *)argument, session_data);
- #else
+#else
err = -EFAULT;
DBG_MSG(1, ("User space use dmabuf API, but kernel don't support DMA BUF\n"));
- #endif
+#endif
break;
default:
args.size = vma->vm_end - vma->vm_start;
args._ukk_private = vma;
args.secure_id = vma->vm_pgoff;
- args.is_cached = 0;
- if (!(vma->vm_flags & VM_SHARED)) {
- args.is_cached = 1;
- vma->vm_flags = vma->vm_flags | VM_SHARED | VM_MAYSHARE ;
- DBG_MSG(3, ("UMP Map function: Forcing the CPU to use cache\n"));
- }
/* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
vma->vm_flags |= VM_DONTCOPY;
if (mem->import_attach) {
struct dma_buf_attachment *attach = mem->import_attach;
struct dma_buf *dma_buf;
-
+
if (mem->sgt)
dma_buf_unmap_attachment(attach, mem->sgt,
- DMA_BIDIRECTIONAL);
+ DMA_BIDIRECTIONAL);
dma_buf = attach->dmabuf;
dma_buf_detach(attach->dmabuf, attach);
#ifdef CONFIG_DMA_SHARED_BUFFER
static ump_dd_handle get_ump_handle_from_dmabuf(struct ump_session_data *session_data,
- struct dma_buf *dmabuf)
+ struct dma_buf *dmabuf)
{
ump_session_memory_list_element *session_mem, *tmp;
struct dma_buf_attachment *attach;
_mali_osk_mutex_wait(session_data->lock);
_MALI_OSK_LIST_FOREACHENTRY(session_mem, tmp,
- &session_data->list_head_session_memory_list,
- ump_session_memory_list_element, list) {
+ &session_data->list_head_session_memory_list,
+ ump_session_memory_list_element, list) {
if (session_mem->mem->import_attach) {
attach = session_mem->mem->import_attach;
if (attach->dmabuf == dmabuf) {
_mali_osk_mutex_signal(session_data->lock);
ump_handle = (ump_dd_handle)session_mem->mem;
- ump_random_mapping_get(device.secure_id_map, ump_dd_secure_id_get(ump_handle));
+ ump_random_mapping_get(device.secure_id_map, ump_dd_secure_id_get(ump_handle));
return ump_handle;
}
}
}
int ump_dmabuf_import_wrapper(u32 __user *argument,
- struct ump_session_data *session_data)
+ struct ump_session_data *session_data)
{
ump_session_memory_list_element *session = NULL;
_ump_uk_dmabuf_s ump_dmabuf;
struct dma_buf *dma_buf;
struct sg_table *sgt = NULL;
struct scatterlist *sgl;
- unsigned int i = 0;
+ unsigned int i = 0;
int ret = 0;
/* Sanity check input parameters */
}
if (copy_from_user(&ump_dmabuf, argument,
- sizeof(_ump_uk_dmabuf_s))) {
+ sizeof(_ump_uk_dmabuf_s))) {
MSG_ERR(("copy_from_user() failed.\n"));
return -EFAULT;
}
dma_buf_put(dma_buf);
goto found;
}
-
+
attach = dma_buf_attach(dma_buf, ump_global_mdev);
if (IS_ERR(attach)) {
ret = PTR_ERR(attach);
blocks = (ump_dd_physical_block *)_mali_osk_malloc(sizeof(ump_dd_physical_block) * sgt->nents);
if (!blocks) {
DBG_MSG(1, ("Failed to allocate blocks.\n"));
- ret = -EFAULT;
- goto err_dma_buf_unmap;
+ ret = -EFAULT;
+ goto err_dma_buf_unmap;
}
for_each_sg(sgt->sgl, sgl, sgt->nents, i) {
blocks[i].addr = sg_phys(sgl);
_mali_osk_mutex_wait(session_data->lock);
_mali_osk_list_add(&(session->list),
- &(session_data->list_head_session_memory_list));
+ &(session_data->list_head_session_memory_list));
_mali_osk_mutex_signal(session_data->lock);
_mali_osk_free(blocks);
ump_dmabuf.size = ump_dd_size_get(ump_handle);
if (copy_to_user(argument, &ump_dmabuf,
- sizeof(_ump_uk_dmabuf_s))) {
+ sizeof(_ump_uk_dmabuf_s))) {
MSG_ERR(("copy_to_user() failed.\n"));
ret = -EFAULT;
goto err_release_ump_handle;