#
-# This confidential and proprietary software may be used only as
-# authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2007-2011 ARM Limited
-# ALL RIGHTS RESERVED
-# The entire notice above must be reproduced on all authorised
-# copies and copies may only be made to the extent permitted
-# by a licensing agreement from ARM Limited.
+# Copyright (C) 2010-2011 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# This file is called by the Linux build system.
OS_MEMORY_KERNEL_BUFFER_SIZE_IN_MB ?= 16
USING_GPU_UTILIZATION ?= 1
PROFILING_SKIP_PP_JOBS ?= 0
-TARGET_PLATFORM ?= rk30
+PROFILING_SKIP_PP_AND_GP_JOBS ?= 0
MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP ?= 0
MALI_PP_SCHEDULER_KEEP_SUB_JOB_STARTS_ALIGNED ?= 0
MALI_PP_SCHEDULER_FORCE_NO_JOB_OVERLAP_BETWEEN_APPS ?= 0
MALI_UPPER_HALF_SCHEDULING ?= 1
MALI_ENABLE_CPU_CYCLES ?= 0
-
-MALI_PLATFORM ?=rk30
+MALI_PLATFORM ?= rk30
# For customer releases the Linux Device Drivers will be provided as ARM proprietary and GPL releases:
# The ARM proprietary product will only include the license/proprietary directory
ccflags-y += -I$(src)/linux/license/gpl
endif
-
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
ifneq ($(MALI_PLATFORM),)
-EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
-#MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
-mali-y += \
- platform/$(MALI_PLATFORM)/mali_platform.o \
- platform/$(MALI_PLATFORM)/rk3066.o
+ EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+ #MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+ mali-y += \
+ platform/$(MALI_PLATFORM)/mali_platform.o \
+ platform/$(MALI_PLATFORM)/rk3066.o \
+ platform/$(MALI_PLATFORM)/mali_dvfs.o \
+ platform/$(MALI_PLATFORM)/arm_core_scaling.o
endif
-
mali-y += \
linux/mali_osk_atomics.o \
linux/mali_osk_irq.o \
common/mali_gp_job.o \
common/mali_soft_job.o \
common/mali_scheduler.o \
- common/mali_gp_scheduler.o \
- common/mali_pp_scheduler.o \
+ common/mali_executor.o \
common/mali_group.o \
common/mali_dlbu.o \
common/mali_broadcast.o \
common/mali_pmu.o \
common/mali_user_settings_db.o \
common/mali_kernel_utilization.o \
+ common/mali_control_timer.o \
common/mali_l2_cache.o \
- common/mali_dma.o \
common/mali_timeline.o \
common/mali_timeline_fence_wait.o \
common/mali_timeline_sync_fence.o \
mali-$(CONFIG_MALI400_UMP) += linux/mali_memory_ump.o
-mali-$(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) += common/mali_power_performance_policy.o
+mali-$(CONFIG_MALI_DVFS) += common/mali_dvfs_policy.o
# Tell the Linux build system from which .o file to create the kernel module
obj-$(CONFIG_MALI400) := mali.o
ccflags-y += -DMALI_UPPER_HALF_SCHEDULING
endif
-ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../include/ump
+ccflags-$(CONFIG_MALI400_UMP) += -I$(src)/../../ump/include/ump
ccflags-$(CONFIG_MALI400_DEBUG) += -DDEBUG
# Use our defines when compiling
VERSION_STRINGS += USING_PROFILING=$(CONFIG_MALI400_PROFILING)
VERSION_STRINGS += USING_INTERNAL_PROFILING=$(CONFIG_MALI400_INTERNAL_PROFILING)
VERSION_STRINGS += USING_GPU_UTILIZATION=$(USING_GPU_UTILIZATION)
-VERSION_STRINGS += USING_POWER_PERFORMANCE_POLICY=$(CONFIG_POWER_PERFORMANCE_POLICY)
+VERSION_STRINGS += USING_DVFS=$(CONFIG_MALI_DVFS)
VERSION_STRINGS += MALI_UPPER_HALF_SCHEDULING=$(MALI_UPPER_HALF_SCHEDULING)
# Create file with Mali driver configuration
---help---
This enables support for the UMP memory sharing API in the Mali driver.
-config MALI400_POWER_PERFORMANCE_POLICY
- bool "Enable Mali power performance policy"
- depends on ARM
- default n
+config MALI_DVFS
+ bool "Enable Mali dynamically frequency change"
+ depends on MALI400
+ default y
---help---
- This enables support for dynamic performance scaling of Mali with the goal of lowering power consumption.
+ This enables support for dynamic change frequency of Mali with the goal of lowering power consumption.
config MALI_DMA_BUF_MAP_ON_ATTACH
bool "Map dma-buf attachments on attach"
domains at the same time may cause peak currents higher than what some systems can handle.
These systems must not enable this option.
+config MALI_DT
+ bool "Using device tree to initialize module"
+ depends on MALI400 && OF
+ default n
+ ---help---
+ This enable the Mali driver to choose the device tree path to get platform resoures
+ and disable the old config method. Mali driver could run on the platform which the
+ device tree is enabled in kernel and corresponding hardware description is implemented
+ properly in device DTS file.
+
config MALI_QUIET
bool "Make Mali driver very quiet"
depends on MALI400 && !MALI400_DEBUG
--- /dev/null
+#
+# Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+USE_UMPV2=0
+USING_PROFILING ?= 1
+USING_INTERNAL_PROFILING ?= 0
+USING_DVFS ?= 1
+MALI_HEATMAPS_ENABLED ?= 0
+MALI_DMA_BUF_MAP_ON_ATTACH ?= 1
+MALI_PMU_PARALLEL_POWER_UP ?= 0
+USING_DT ?= 0
+
+# The Makefile sets up "arch" based on the CONFIG, creates the version info
+# string and the __malidrv_build_info.c file, and then call the Linux build
+# system to actually build the driver. After that point the Kbuild file takes
+# over.
+
+# set up defaults if not defined by the user
+ARCH ?= arm
+
+OSKOS=linux
+FILES_PREFIX=
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# This conditional makefile exports the global definition ARM_INTERNAL_BUILD. Customer releases will not include arm_internal.mak
+-include ../../../arm_internal.mak
+
+# Give warning of old config parameters are used
+ifneq ($(CONFIG),)
+$(warning "You have specified the CONFIG variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+ifneq ($(CPU),)
+$(warning "You have specified the CPU variable which is no longer in used. Use TARGET_PLATFORM instead.")
+endif
+
+# Include the mapping between TARGET_PLATFORM and KDIR + MALI_PLATFORM
+-include MALI_CONFIGURATION
+export KDIR ?= $(KDIR-$(TARGET_PLATFORM))
+export MALI_PLATFORM ?= $(MALI_PLATFORM-$(TARGET_PLATFORM))
+
+ifneq ($(TARGET_PLATFORM),)
+ifeq ($(MALI_PLATFORM),)
+$(error "Invalid TARGET_PLATFORM: $(TARGET_PLATFORM)")
+endif
+endif
+
+# validate lookup result
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(TARGET_PLATFORM))
+endif
+
+ifeq ($(USING_GPU_UTILIZATION), 1)
+ ifeq ($(USING_DVFS), 1)
+ $(error USING_GPU_UTILIZATION conflict with USING_DVFS you can read the Integration Guide to choose which one do you need)
+ endif
+endif
+
+ifeq ($(USING_UMP),1)
+export CONFIG_MALI400_UMP=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_UMP=1
+ifeq ($(USE_UMPV2),1)
+UMP_SYMVERS_FILE ?= ../umpv2/Module.symvers
+else
+UMP_SYMVERS_FILE ?= ../ump/Module.symvers
+endif
+KBUILD_EXTRA_SYMBOLS = $(realpath $(UMP_SYMVERS_FILE))
+$(warning $(KBUILD_EXTRA_SYMBOLS))
+endif
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+include $(KDIR)/.config
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# report detected/selected settings
+ifdef ARM_INTERNAL_BUILD
+$(warning TARGET_PLATFORM $(TARGET_PLATFORM))
+$(warning KDIR $(KDIR))
+$(warning MALI_PLATFORM $(MALI_PLATFORM))
+endif
+
+# Set up build config
+export CONFIG_MALI400=m
+export CONFIG_MALI450=y
+
+export EXTRA_DEFINES += -DCONFIG_MALI400=1
+export EXTRA_DEFINES += -DCONFIG_MALI450=1
+
+ifneq ($(MALI_PLATFORM),)
+export EXTRA_DEFINES += -DMALI_FAKE_PLATFORM_DEVICE=1
+export MALI_PLATFORM_FILES = $(wildcard platform/$(MALI_PLATFORM)/*.c)
+endif
+
+ifeq ($(USING_PROFILING),1)
+ifeq ($(CONFIG_TRACEPOINTS),)
+$(warning CONFIG_TRACEPOINTS required for profiling)
+else
+export CONFIG_MALI400_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_PROFILING=1
+ifeq ($(USING_INTERNAL_PROFILING),1)
+export CONFIG_MALI400_INTERNAL_PROFILING=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_INTERNAL_PROFILING=1
+endif
+ifeq ($(MALI_HEATMAPS_ENABLED),1)
+export MALI_HEATMAPS_ENABLED=y
+export EXTRA_DEFINES += -DCONFIG_MALI400_HEATMAPS_ENABLED
+endif
+endif
+endif
+
+ifeq ($(MALI_DMA_BUF_MAP_ON_ATTACH),1)
+export CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DMA_BUF_MAP_ON_ATTACH
+endif
+
+ifeq ($(MALI_SHARED_INTERRUPTS),1)
+export CONFIG_MALI_SHARED_INTERRUPTS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_SHARED_INTERRUPTS
+endif
+
+ifeq ($(USING_DVFS),1)
+export CONFIG_MALI_DVFS=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DVFS
+endif
+
+ifeq ($(MALI_PMU_PARALLEL_POWER_UP),1)
+export CONFIG_MALI_PMU_PARALLEL_POWER_UP=y
+export EXTRA_DEFINES += -DCONFIG_MALI_PMU_PARALLEL_POWER_UP
+endif
+
+ifdef CONFIG_OF
+ifeq ($(USING_DT),1)
+export CONFIG_MALI_DT=y
+export EXTRA_DEFINES += -DCONFIG_MALI_DT
+endif
+endif
+
+ifneq ($(BUILD),release)
+# Debug
+export CONFIG_MALI400_DEBUG=y
+else
+# Release
+ifeq ($(MALI_QUIET),1)
+export CONFIG_MALI_QUIET=y
+export EXTRA_DEFINES += -DCONFIG_MALI_QUIET
+endif
+endif
+
+ifeq ($(MALI_SKIP_JOBS),1)
+EXTRA_DEFINES += -DPROFILING_SKIP_PP_JOBS=1 -DPROFILING_SKIP_GP_JOBS=1
+endif
+
+all: $(UMP_SYMVERS_FILE)
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) modules
+ @rm $(FILES_PREFIX)__malidrv_build_info.c $(FILES_PREFIX)__malidrv_build_info.o
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+kernelrelease:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) kernelrelease
+
+export CONFIG KBUILD_EXTRA_SYMBOLS
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_broadcast.h"
#include "mali_kernel_common.h"
#include "mali_osk.h"
-static const int bcast_unit_reg_size = 0x1000;
-static const int bcast_unit_addr_broadcast_mask = 0x0;
-static const int bcast_unit_addr_irq_override_mask = 0x4;
+#define MALI_BROADCAST_REGISTER_SIZE 0x1000
+#define MALI_BROADCAST_REG_BROADCAST_MASK 0x0
+#define MALI_BROADCAST_REG_INTERRUPT_MASK 0x4
struct mali_bcast_unit {
struct mali_hw_core hw_core;
struct mali_bcast_unit *bcast_unit = NULL;
MALI_DEBUG_ASSERT_POINTER(resource);
- MALI_DEBUG_PRINT(2, ("Mali Broadcast unit: Creating Mali Broadcast unit: %s\n", resource->description));
+ MALI_DEBUG_PRINT(2, ("Broadcast: Creating Mali Broadcast unit: %s\n",
+ resource->description));
bcast_unit = _mali_osk_malloc(sizeof(struct mali_bcast_unit));
if (NULL == bcast_unit) {
- MALI_PRINT_ERROR(("Mali Broadcast unit: Failed to allocate memory for Broadcast unit\n"));
+ MALI_PRINT_ERROR(("Broadcast: Failed to allocate memory for Broadcast unit\n"));
return NULL;
}
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core, resource, bcast_unit_reg_size)) {
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&bcast_unit->hw_core,
+ resource, MALI_BROADCAST_REGISTER_SIZE)) {
bcast_unit->current_mask = 0;
mali_bcast_reset(bcast_unit);
return bcast_unit;
} else {
- MALI_PRINT_ERROR(("Mali Broadcast unit: Failed map broadcast unit\n"));
+ MALI_PRINT_ERROR(("Broadcast: Failed map broadcast unit\n"));
}
_mali_osk_free(bcast_unit);
void mali_bcast_unit_delete(struct mali_bcast_unit *bcast_unit)
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
-
mali_hw_core_delete(&bcast_unit->hw_core);
_mali_osk_free(bcast_unit);
}
* Note: redundant calling this function with same @group
* doesn't make any difference as calling it once
*/
-void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+void mali_bcast_add_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
{
u32 bcast_id;
u32 broadcast_mask;
* Note: redundant calling this function with same @group
* doesn't make any difference as calling it once
*/
-void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit, struct mali_group *group)
+void mali_bcast_remove_group(struct mali_bcast_unit *bcast_unit,
+ struct mali_group *group)
{
u32 bcast_id;
u32 broadcast_mask;
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_PRINT(4,
+ ("Broadcast: setting mask 0x%08X + 0x%08X (reset)\n",
+ bcast_unit->current_mask,
+ bcast_unit->current_mask & 0xFF));
+
/* set broadcast mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_broadcast_mask,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
bcast_unit->current_mask);
/* set IRQ override mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_irq_override_mask,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
bcast_unit->current_mask & 0xFF);
}
{
MALI_DEBUG_ASSERT_POINTER(bcast_unit);
+ MALI_DEBUG_PRINT(4, ("Broadcast: setting mask 0x0 + 0x0 (disable)\n"));
+
/* set broadcast mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_broadcast_mask,
+ MALI_BROADCAST_REG_BROADCAST_MASK,
0x0);
/* set IRQ override mask */
mali_hw_core_register_write(&bcast_unit->hw_core,
- bcast_unit_addr_irq_override_mask,
+ MALI_BROADCAST_REG_INTERRUPT_MASK,
0x0);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+#ifndef __MALI_BROADCAST_H__
+#define __MALI_BROADCAST_H__
+
/*
* Interface for the broadcast unit on Mali-450.
*
{
mali_bcast_reset(bcast_unit);
}
+
+#endif /* __MALI_BROADCAST_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_utilization.h"
+#include "mali_osk.h"
+#include "mali_osk_mali.h"
+#include "mali_kernel_common.h"
+#include "mali_session.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
+static u64 period_start_time = 0;
+
+static _mali_osk_timer_t *mali_control_timer = NULL;
+static mali_bool timer_running = MALI_FALSE;
+
+static u32 mali_control_timeout = 50;
+
+void mali_control_timer_add(u32 timeout)
+{
+ _mali_osk_timer_add(mali_control_timer, _mali_osk_time_mstoticks(timeout));
+}
+
+static void mali_control_timer_callback(void *arg)
+{
+ if (mali_utilization_enabled()) {
+ struct mali_gpu_utilization_data *util_data = NULL;
+ u64 time_period = 0;
+
+ /* Calculate gpu utilization */
+ util_data = mali_utilization_calculate(&period_start_time, &time_period);
+
+ if (util_data) {
+#if defined(CONFIG_MALI_DVFS)
+ mali_dvfs_policy_realize(util_data, time_period);
+#else
+ mali_utilization_platform_realize(util_data);
+#endif
+ }
+
+ if (MALI_TRUE == timer_running) {
+ mali_control_timer_add(mali_control_timeout);
+ }
+ }
+}
+
+/* Init a timer (for now it is used for GPU utilization and dvfs) */
+_mali_osk_errcode_t mali_control_timer_init(void)
+{
+ _mali_osk_device_data data;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Use device specific settings (if defined) */
+ if (0 != data.control_interval) {
+ mali_control_timeout = data.control_interval;
+ MALI_DEBUG_PRINT(2, ("Mali GPU Timer: %u\n", mali_control_timeout));
+ }
+ }
+
+ mali_control_timer = _mali_osk_timer_init();
+ if (NULL == mali_control_timer) {
+ return _MALI_OSK_ERR_FAULT;
+ }
+ _mali_osk_timer_setcallback(mali_control_timer, mali_control_timer_callback, NULL);
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_control_timer_term(void)
+{
+ if (NULL != mali_control_timer) {
+ _mali_osk_timer_del(mali_control_timer);
+ timer_running = MALI_FALSE;
+ _mali_osk_timer_term(mali_control_timer);
+ mali_control_timer = NULL;
+ }
+}
+
+mali_bool mali_control_timer_resume(u64 time_now)
+{
+ if (timer_running != MALI_TRUE) {
+ timer_running = MALI_TRUE;
+
+ period_start_time = time_now;
+
+ mali_utilization_reset();
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
+}
+
+void mali_control_timer_suspend(mali_bool suspend)
+{
+ mali_utilization_data_lock();
+
+ if (timer_running == MALI_TRUE) {
+ timer_running = MALI_FALSE;
+
+ mali_utilization_data_unlock();
+
+ if (suspend == MALI_TRUE) {
+ _mali_osk_timer_del(mali_control_timer);
+ mali_utilization_reset();
+ }
+ } else {
+ mali_utilization_data_unlock();
+ }
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_CONTROL_TIMER_H__
+#define __MALI_CONTROL_TIMER_H__
+
+#include "mali_osk.h"
+
+_mali_osk_errcode_t mali_control_timer_init(void);
+
+void mali_control_timer_term(void);
+
+mali_bool mali_control_timer_resume(u64 time_now);
+
+void mali_control_timer_suspend(mali_bool suspend);
+
+void mali_control_timer_add(u32 timeout);
+
+#endif /* __MALI_CONTROL_TIMER_H__ */
+
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_dlbu.h"
_mali_osk_errcode_t mali_dlbu_initialize(void)
{
-
MALI_DEBUG_PRINT(2, ("Mali DLBU: Initializing\n"));
- if (_MALI_OSK_ERR_OK == mali_mmu_get_table_page(&mali_dlbu_phys_addr, &mali_dlbu_cpu_addr)) {
- MALI_SUCCESS;
+ if (_MALI_OSK_ERR_OK ==
+ mali_mmu_get_table_page(&mali_dlbu_phys_addr,
+ &mali_dlbu_cpu_addr)) {
+ return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_FAULT;
{
MALI_DEBUG_PRINT(3, ("Mali DLBU: terminating\n"));
- mali_mmu_release_table_page(mali_dlbu_phys_addr, mali_dlbu_cpu_addr);
+ if (0 != mali_dlbu_phys_addr && 0 != mali_dlbu_cpu_addr) {
+ mali_mmu_release_table_page(mali_dlbu_phys_addr,
+ mali_dlbu_cpu_addr);
+ mali_dlbu_phys_addr = 0;
+ mali_dlbu_cpu_addr = 0;
+ }
}
struct mali_dlbu_core *mali_dlbu_create(const _mali_osk_resource_t *resource)
void mali_dlbu_delete(struct mali_dlbu_core *dlbu)
{
MALI_DEBUG_ASSERT_POINTER(dlbu);
-
- mali_dlbu_reset(dlbu);
mali_hw_core_delete(&dlbu->hw_core);
_mali_osk_free(dlbu);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_DLBU_H__
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_scheduler.h"
+#include "mali_dvfs_policy.h"
+#include "mali_osk_mali.h"
+#include "mali_osk_profiling.h"
+
+#define CLOCK_TUNING_TIME_DEBUG 0
+
+#define MAX_PERFORMANCE_VALUE 256
+#define MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(percent) ((int) ((percent)*(MAX_PERFORMANCE_VALUE)/100.0 + 0.5))
+
+/** The max fps the same as display vsync default 60, can set by module insert parameter */
+int mali_max_system_fps = 60;
+/** A lower limit on their desired FPS default 58, can set by module insert parameter */
+int mali_desired_fps = 58;
+
+static int mali_fps_step1 = 0;
+static int mali_fps_step2 = 0;
+
+static int clock_step = -1;
+static int cur_clk_step = -1;
+static struct mali_gpu_clock *gpu_clk = NULL;
+
+/*Function prototype */
+static int (*mali_gpu_set_freq)(int) = NULL;
+static int (*mali_gpu_get_freq)(void) = NULL;
+
+static mali_bool mali_dvfs_enabled = MALI_FALSE;
+
+#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
+static u32 calculate_window_render_fps(u64 time_period)
+{
+ u32 max_window_number;
+ u64 tmp;
+ u64 max = time_period;
+ u32 leading_zeroes;
+ u32 shift_val;
+ u32 time_period_shift;
+ u32 max_window_number_shift;
+ u32 ret_val;
+
+ max_window_number = mali_session_max_window_num();
+
+ /* To avoid float division, extend the dividend to ns unit */
+ tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
+ if (tmp > time_period) {
+ max = tmp;
+ }
+
+ /*
+ * We may have 64-bit values, a dividend or a divisor or both
+ * To avoid dependencies to a 64-bit divider, we shift down the two values
+ * equally first.
+ */
+ leading_zeroes = _mali_osk_clz((u32)(max >> 32));
+ shift_val = 32 - leading_zeroes;
+
+ time_period_shift = (u32)(time_period >> shift_val);
+ max_window_number_shift = (u32)(tmp >> shift_val);
+
+ ret_val = max_window_number_shift / time_period_shift;
+
+ return ret_val;
+}
+
+static bool mali_pickup_closest_avail_clock(int target_clock_mhz, mali_bool pick_clock_up)
+{
+ int i = 0;
+ bool clock_changed = false;
+
+ /* Round up the closest available frequency step for target_clock_hz */
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ /* Find the first item > target_clock_hz */
+ if (((int)(gpu_clk->item[i].clock) - target_clock_mhz) > 0) {
+ break;
+ }
+ }
+
+ /* If the target clock greater than the maximum clock just pick the maximum one*/
+ if (i == gpu_clk->num_of_steps) {
+ i = gpu_clk->num_of_steps - 1;
+ } else {
+ if ((!pick_clock_up) && (i > 0)) {
+ i = i - 1;
+ }
+ }
+
+ clock_step = i;
+ if (cur_clk_step != clock_step) {
+ clock_changed = true;
+ }
+
+ return clock_changed;
+}
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period)
+{
+ int under_perform_boundary_value = 0;
+ int over_perform_boundary_value = 0;
+ int current_fps = 0;
+ int current_gpu_util = 0;
+ bool clock_changed = false;
+#if CLOCK_TUNING_TIME_DEBUG
+ struct timeval start;
+ struct timeval stop;
+ unsigned int elapse_time;
+ do_gettimeofday(&start);
+#endif
+ u32 window_render_fps;
+
+ if (NULL == gpu_clk) {
+ MALI_DEBUG_PRINT(2, ("Enable DVFS but patform doesn't Support freq change. \n"));
+ return;
+ }
+
+ window_render_fps = calculate_window_render_fps(time_period);
+
+ current_fps = window_render_fps;
+ current_gpu_util = data->utilization_gpu;
+
+ /* Get the specific under_perform_boundary_value and over_perform_boundary_value */
+ if ((mali_desired_fps <= current_fps) && (current_fps < mali_max_system_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(90);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ } else if ((mali_fps_step1 <= current_fps) && (current_fps < mali_desired_fps)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ } else if ((mali_fps_step2 <= current_fps) && (current_fps < mali_fps_step1)) {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(70);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(50);
+ } else {
+ under_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(55);
+ over_perform_boundary_value = MALI_PERCENTAGE_TO_UTILIZATION_FRACTION(35);
+ }
+
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: gpu util = %d \n", current_gpu_util));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: under_perform = %d, over_perform = %d \n", under_perform_boundary_value, over_perform_boundary_value));
+ MALI_DEBUG_PRINT(5, ("Using ARM power policy: render fps = %d, pressure render fps = %d \n", current_fps, window_render_fps));
+
+ /* Get current clock value */
+ cur_clk_step = mali_gpu_get_freq();
+
+ /* Consider offscreen */
+ if (0 == current_fps) {
+ /* GP or PP under perform, need to give full power */
+ if (current_gpu_util > over_perform_boundary_value) {
+ if (cur_clk_step != gpu_clk->num_of_steps - 1) {
+ clock_changed = true;
+ clock_step = gpu_clk->num_of_steps - 1;
+ }
+ }
+
+ /* If GPU is idle, use lowest power */
+ if (0 == current_gpu_util) {
+ if (cur_clk_step != 0) {
+ clock_changed = true;
+ clock_step = 0;
+ }
+ }
+
+ goto real_setting;
+ }
+
+ /* 2. Calculate target clock if the GPU clock can be tuned */
+ if (-1 != cur_clk_step) {
+ int target_clk_mhz = -1;
+ mali_bool pick_clock_up = MALI_TRUE;
+
+ if (current_gpu_util > under_perform_boundary_value) {
+ /* when under perform, need to consider the fps part */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util * mali_desired_fps / under_perform_boundary_value / current_fps;
+ pick_clock_up = MALI_TRUE;
+ } else if (current_gpu_util < over_perform_boundary_value) {
+ /* when over perform, did't need to consider fps, system didn't want to reach desired fps */
+ target_clk_mhz = gpu_clk->item[cur_clk_step].clock * current_gpu_util / under_perform_boundary_value;
+ pick_clock_up = MALI_FALSE;
+ }
+
+ if (-1 != target_clk_mhz) {
+ clock_changed = mali_pickup_closest_avail_clock(target_clk_mhz, pick_clock_up);
+ }
+ }
+
+real_setting:
+ if (clock_changed) {
+ mali_gpu_set_freq(clock_step);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ gpu_clk->item[clock_step].clock,
+ gpu_clk->item[clock_step].vol / 1000,
+ 0, 0, 0);
+ }
+
+#if CLOCK_TUNING_TIME_DEBUG
+ do_gettimeofday(&stop);
+
+ elapse_time = timeval_to_ns(&stop) - timeval_to_ns(&start);
+ MALI_DEBUG_PRINT(2, ("Using ARM power policy: eclapse time = %d\n", elapse_time));
+#endif
+}
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void)
+{
+ _mali_osk_device_data data;
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if ((NULL != data.get_clock_info) && (NULL != data.set_freq) && (NULL != data.get_freq)) {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: using arm dvfs policy \n"));
+
+
+ mali_fps_step1 = mali_max_system_fps / 3;
+ mali_fps_step2 = mali_max_system_fps / 5;
+
+ data.get_clock_info(&gpu_clk);
+
+ if (gpu_clk != NULL) {
+#ifdef DEBUG
+ int i;
+ for (i = 0; i < gpu_clk->num_of_steps; i++) {
+ MALI_DEBUG_PRINT(5, ("mali gpu clock info: step%d clock(%d)Hz,vol(%d) \n",
+ i, gpu_clk->item[i].clock, gpu_clk->item[i].vol));
+ }
+#endif
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform didn't define enough info for ddk to do DVFS \n"));
+ }
+
+ mali_gpu_get_freq = data.get_freq;
+ mali_gpu_set_freq = data.set_freq;
+
+ if ((NULL != gpu_clk) && (gpu_clk->num_of_steps > 0)
+ && (NULL != mali_gpu_get_freq) && (NULL != mali_gpu_set_freq)) {
+ mali_dvfs_enabled = MALI_TRUE;
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ } else {
+ err = _MALI_OSK_ERR_FAULT;
+ MALI_DEBUG_PRINT(2, ("Mali DVFS init: get platform data error .\n"));
+ }
+
+ return err;
+}
+
+/*
+ * Always give full power when start a new period,
+ * if mali dvfs enabled, for performance consideration
+ */
+void mali_dvfs_policy_new_period(void)
+{
+ /* Always give full power when start a new period */
+ unsigned int cur_clk_step = 0;
+
+ cur_clk_step = mali_gpu_get_freq();
+
+ if (cur_clk_step != (gpu_clk->num_of_steps - 1)) {
+ mali_gpu_set_freq(gpu_clk->num_of_steps - 1);
+
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE, gpu_clk->item[gpu_clk->num_of_steps - 1].clock,
+ gpu_clk->item[gpu_clk->num_of_steps - 1].vol / 1000, 0, 0, 0);
+ }
+}
+
+mali_bool mali_dvfs_policy_enabled(void)
+{
+ return mali_dvfs_enabled;
+}
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item)
+{
+ if (mali_platform_device != NULL) {
+
+ struct mali_gpu_device_data *device_data = NULL;
+ device_data = (struct mali_gpu_device_data *)mali_platform_device->dev.platform_data;
+
+ if ((NULL != device_data->get_clock_info) && (NULL != device_data->get_freq)) {
+
+ int cur_clk_step = device_data->get_freq();
+ struct mali_gpu_clock *mali_gpu_clk = NULL;
+
+ device_data->get_clock_info(&mali_gpu_clk);
+ clk_item->clock = mali_gpu_clk->item[cur_clk_step].clock;
+ clk_item->vol = mali_gpu_clk->item[cur_clk_step].vol;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: platform function callback incomplete, need check mali_gpu_device_data in platform .\n"));
+ }
+ }
+}
+#endif
+
--- /dev/null
+/*
+ * Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_DVFS_POLICY_H__
+#define __MALI_DVFS_POLICY_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void mali_dvfs_policy_realize(struct mali_gpu_utilization_data *data, u64 time_period);
+
+_mali_osk_errcode_t mali_dvfs_policy_init(void);
+
+void mali_dvfs_policy_new_period(void);
+
+mali_bool mali_dvfs_policy_enabled(void);
+
+#if defined(CONFIG_MALI400_PROFILING)
+void mali_get_current_gpu_clk_item(struct mali_gpu_clk_item *clk_item);
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif/* __MALI_DVFS_POLICY_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_executor.h"
+#include "mali_scheduler.h"
+#include "mali_kernel_common.h"
+#include "mali_kernel_core.h"
+#include "mali_osk.h"
+#include "mali_osk_list.h"
+#include "mali_pp.h"
+#include "mali_pp_job.h"
+#include "mali_group.h"
+#include "mali_pm.h"
+#include "mali_timeline.h"
+#include "mali_osk_profiling.h"
+#include "mali_session.h"
+
+/*Add for voltage scan function*/
+extern u32 mali_group_error;
+
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_EXECUTOR_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif /* !defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH) */
+
+/*
+ * ---------- static type definitions (structs, enums, etc) ----------
+ */
+
+enum mali_executor_state_t {
+ EXEC_STATE_NOT_PRESENT, /* Virtual group on Mali-300/400 (do not use) */
+ EXEC_STATE_DISABLED, /* Disabled by core scaling (do not use) */
+ EXEC_STATE_EMPTY, /* No child groups for virtual group (do not use) */
+ EXEC_STATE_INACTIVE, /* Can be used, but must be activate first */
+ EXEC_STATE_IDLE, /* Active and ready to be used */
+ EXEC_STATE_WORKING, /* Executing a job */
+};
+
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
+
+/* Lock for this module (protecting all HW access except L2 caches) */
+_mali_osk_spinlock_irq_t *mali_executor_lock_obj = NULL;
+
+mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/*
+ * ---------- static variables ----------
+ */
+
+/* Used to defer job scheduling */
+static _mali_osk_wq_work_t *executor_wq_high_pri = NULL;
+
+/* Store version from GP and PP (user space wants to know this) */
+static u32 pp_version = 0;
+static u32 gp_version = 0;
+
+/* List of physical PP groups which are disabled by some external source */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_disabled);
+static u32 group_list_disabled_count = 0;
+
+/* List of groups which can be used, but activate first */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_inactive);
+static u32 group_list_inactive_count = 0;
+
+/* List of groups which are active and ready to be used */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_idle);
+static u32 group_list_idle_count = 0;
+
+/* List of groups which are executing a job */
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(group_list_working);
+static u32 group_list_working_count = 0;
+
+/* Virtual group (if any) */
+static struct mali_group *virtual_group = NULL;
+
+/* Virtual group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t virtual_group_state = EXEC_STATE_NOT_PRESENT;
+
+/* GP group */
+static struct mali_group *gp_group = NULL;
+
+/* GP group state is tracked with a state variable instead of 4 lists */
+static enum mali_executor_state_t gp_group_state = EXEC_STATE_NOT_PRESENT;
+
+static u32 gp_returned_cookie = 0;
+
+/* Total number of physical PP cores present */
+static u32 num_physical_pp_cores_total = 0;
+
+/* Number of physical cores which are enabled */
+static u32 num_physical_pp_cores_enabled = 0;
+
+/* Enable or disable core scaling */
+static mali_bool core_scaling_enabled = MALI_TRUE;
+
+/* Variables to allow safe pausing of the scheduler */
+static _mali_osk_wait_queue_t *executor_working_wait_queue = NULL;
+static u32 pause_count = 0;
+
+/* PP cores haven't been enabled because of some pp cores haven't been disabled. */
+static int core_scaling_delay_up_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+
+/* Variables used to implement notify pp core changes to userspace when core scaling
+ * is finished in mali_executor_complete_group() function. */
+static _mali_osk_wq_work_t *executor_wq_notify_core_change = NULL;
+static _mali_osk_wait_queue_t *executor_notify_core_change_wait_queue = NULL;
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+static void mali_executor_lock(void);
+static void mali_executor_unlock(void);
+static mali_bool mali_executor_is_suspended(void *data);
+static mali_bool mali_executor_is_working(void);
+static void mali_executor_disable_empty_virtual(void);
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group);
+static mali_bool mali_executor_has_virtual_group(void);
+static mali_bool mali_executor_virtual_group_is_usable(void);
+static void mali_executor_schedule(void);
+static void mali_executor_wq_schedule(void *arg);
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job);
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done);
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state);
+
+static void mali_executor_group_enable_internal(struct mali_group *group);
+static void mali_executor_group_disable_internal(struct mali_group *group);
+static void mali_executor_core_scale(unsigned int target_core_nr);
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group);
+static void mali_executor_notify_core_change(u32 num_cores);
+static void mali_executor_wq_notify_core_change(void *arg);
+static void mali_executor_change_group_status_disabled(struct mali_group *group);
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group);
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count);
+
+/*
+ * ---------- Actual implementation ----------
+ */
+
+_mali_osk_errcode_t mali_executor_initialize(void)
+{
+ mali_executor_lock_obj = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_EXECUTOR);
+ if (NULL == mali_executor_lock_obj) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_executor_wq_schedule, NULL);
+ if (NULL == executor_wq_high_pri) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_working_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_working_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_wq_notify_core_change = _mali_osk_wq_create_work(mali_executor_wq_notify_core_change, NULL);
+ if (NULL == executor_wq_notify_core_change) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ executor_notify_core_change_wait_queue = _mali_osk_wait_queue_init();
+ if (NULL == executor_notify_core_change_wait_queue) {
+ mali_executor_terminate();
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_terminate(void)
+{
+ if (NULL != executor_notify_core_change_wait_queue) {
+ _mali_osk_wait_queue_term(executor_notify_core_change_wait_queue);
+ executor_notify_core_change_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_notify_core_change) {
+ _mali_osk_wq_delete_work(executor_wq_notify_core_change);
+ executor_wq_notify_core_change = NULL;
+ }
+
+ if (NULL != executor_working_wait_queue) {
+ _mali_osk_wait_queue_term(executor_working_wait_queue);
+ executor_working_wait_queue = NULL;
+ }
+
+ if (NULL != executor_wq_high_pri) {
+ _mali_osk_wq_delete_work(executor_wq_high_pri);
+ executor_wq_high_pri = NULL;
+ }
+
+ if (NULL != mali_executor_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_executor_lock_obj);
+ mali_executor_lock_obj = NULL;
+ }
+}
+
+void mali_executor_populate(void)
+{
+ u32 num_groups;
+ u32 i;
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ /* Do we have a virtual group? */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (mali_group_is_virtual(group)) {
+ virtual_group = group;
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ break;
+ }
+ }
+
+ /* Find all the available physical GP and PP cores */
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+
+ if (!mali_group_is_virtual(group)) {
+ if (NULL != pp_core) {
+ if (0 == pp_version) {
+ /* Retrieve PP version from the first available PP core */
+ pp_version = mali_pp_core_get_version(pp_core);
+ }
+
+ if (NULL != virtual_group) {
+ mali_executor_lock();
+ mali_group_add_group(virtual_group, group);
+ mali_executor_unlock();
+ } else {
+ _mali_osk_list_add(&group->executor_list, &group_list_inactive);
+ group_list_inactive_count++;
+ }
+
+ num_physical_pp_cores_total++;
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(gp_core);
+
+ if (0 == gp_version) {
+ /* Retrieve GP version */
+ gp_version = mali_gp_core_get_version(gp_core);
+ }
+
+ gp_group = group;
+ gp_group_state = EXEC_STATE_INACTIVE;
+ }
+
+ }
+ }
+ }
+
+ num_physical_pp_cores_enabled = num_physical_pp_cores_total;
+}
+
+void mali_executor_depopulate(void)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+
+ if (NULL != gp_group) {
+ mali_group_delete(gp_group);
+ gp_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ if (NULL != virtual_group) {
+ mali_group_delete(virtual_group);
+ virtual_group = NULL;
+ }
+
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&group_list_working));
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_delete(group);
+ }
+}
+
+void mali_executor_suspend(void)
+{
+ mali_executor_lock();
+
+ /* Increment the pause_count so that no more jobs will be scheduled */
+ pause_count++;
+
+ mali_executor_unlock();
+
+ _mali_osk_wait_queue_wait_event(executor_working_wait_queue,
+ mali_executor_is_suspended, NULL);
+
+ /*
+ * mali_executor_complete_XX() leaves jobs in idle state.
+ * deactivate option is used when we are going to power down
+ * the entire GPU (OS suspend) and want a consistent SW vs HW
+ * state.
+ */
+ mali_executor_lock();
+
+ mali_executor_deactivate_list_idle(MALI_TRUE);
+
+ /*
+ * The following steps are used to deactive all of activated
+ * (MALI_GROUP_STATE_ACTIVE) and activating (MALI_GROUP
+ * _STAET_ACTIVATION_PENDING) groups, to make sure the variable
+ * pd_mask_wanted is equal with 0. */
+ if (MALI_GROUP_STATE_INACTIVE != mali_group_get_state(gp_group)) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(gp_group);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (MALI_GROUP_STATE_INACTIVE
+ != mali_group_get_state(virtual_group)) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ mali_group_deactivate(virtual_group);
+ }
+ }
+
+ if (0 < group_list_inactive_count) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_inactive,
+ struct mali_group, executor_list) {
+ if (MALI_GROUP_STATE_ACTIVATION_PENDING
+ == mali_group_get_state(group)) {
+ mali_group_deactivate(group);
+ }
+
+ /*
+ * On mali-450 platform, we may have physical group in the group inactive
+ * list, and its state is MALI_GROUP_STATE_ACTIVATION_PENDING, so we only
+ * deactivate it is not enough, we still also need add it back to virtual group.
+ * And now, virtual group must be in INACTIVE state, so it's safe to add
+ * physical group to virtual group at this point.
+ */
+ if (NULL != virtual_group) {
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_inactive_count--;
+
+ mali_group_add_group(virtual_group, group);
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_resume(void)
+{
+ mali_executor_lock();
+
+ /* Decrement pause_count to allow scheduling again (if it reaches 0) */
+ pause_count--;
+ if (0 == pause_count) {
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+u32 mali_executor_get_num_cores_total(void)
+{
+ return num_physical_pp_cores_total;
+}
+
+u32 mali_executor_get_num_cores_enabled(void)
+{
+ return num_physical_pp_cores_enabled;
+}
+
+struct mali_pp_core *mali_executor_get_virtual_pp(void)
+{
+ MALI_DEBUG_ASSERT_POINTER(virtual_group);
+ MALI_DEBUG_ASSERT_POINTER(virtual_group->pp_core);
+ return virtual_group->pp_core;
+}
+
+struct mali_group *mali_executor_get_virtual_group(void)
+{
+ return virtual_group;
+}
+
+void mali_executor_zap_all_active(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *temp;
+ mali_bool ret;
+
+ mali_executor_lock();
+
+ /*
+ * This function is a bit complicated because
+ * mali_group_zap_session() can fail. This only happens because the
+ * group is in an unhandled page fault status.
+ * We need to make sure this page fault is handled before we return,
+ * so that we know every single outstanding MMU transactions have
+ * completed. This will allow caller to safely remove physical pages
+ * when we have returned.
+ */
+
+ MALI_DEBUG_ASSERT(NULL != gp_group);
+ ret = mali_group_zap_session(gp_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ ret = mali_group_zap_session(virtual_group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE,
+ MALI_TRUE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working,
+ struct mali_group, executor_list) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ ret = mali_group_zap_session(group, session);
+ if (MALI_FALSE == ret) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_TRUE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, free it */
+ mali_scheduler_complete_pp_job(pp_job,
+ 0, MALI_FALSE,
+ MALI_TRUE);
+ }
+ }
+ }
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+{
+ if (MALI_SCHEDULER_MASK_EMPTY != mask) {
+ if (MALI_TRUE == deferred_schedule) {
+ _mali_osk_wq_schedule_work_high_pri(executor_wq_high_pri);
+ } else {
+ /* Schedule from this thread*/
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+ }
+ }
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: GP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor GP: Job %d Timeout on %s\n",
+ mali_gp_job_get_id(group->gp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_gp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+#endif
+
+ mali_group_mask_all_interrupts_gp(group);
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only VS completed so far, while PLBU is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS_PLBU == int_result) {
+ if (mali_group_gp_is_active(group)) {
+ /* Only PLBU completed so far, while VS is still active */
+
+ /* Enable all but the current interrupt */
+ mali_group_enable_interrupts_gp(group, int_result);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ }
+ } else if (MALI_INTERRUPT_RESULT_OOM == int_result) {
+ struct mali_gp_job *job = mali_group_get_running_gp_job(group);
+
+ /* PLBU out of mem */
+ MALI_DEBUG_PRINT(3, ("Executor: PLBU needs more heap memory\n"));
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* Give group a chance to generate a SUSPEND event */
+ mali_group_oom(group);
+#endif
+
+ /*
+ * no need to hold interrupt raised while
+ * waiting for more memory.
+ */
+ mali_executor_send_gp_oom_to_user(job);
+
+ mali_executor_unlock();
+
+ return _MALI_OSK_ERR_OK;
+ }
+
+ /*Add for voltage scan function*/
+ if (MALI_INTERRUPT_RESULT_ERROR == int_result)
+ mali_group_error++;
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_gp(group);
+ }
+ } else {
+ struct mali_gp_job *job;
+ mali_bool success;
+
+ success = (int_result != MALI_INTERRUPT_RESULT_ERROR) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success,
+ MALI_TRUE, &job, NULL);
+
+ mali_executor_unlock();
+
+ /* GP jobs always fully complete */
+ MALI_DEBUG_ASSERT(NULL != job);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, success,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+ mali_bool time_out = MALI_FALSE;
+
+ MALI_DEBUG_PRINT(4, ("Executor: PP interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ if (in_upper_half) {
+ if (mali_group_is_in_virtual(group)) {
+ /* Child groups should never handle PP interrupts */
+ MALI_DEBUG_ASSERT(!mali_group_has_timed_out(group));
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+ MALI_DEBUG_ASSERT(!mali_group_is_in_virtual(group));
+
+ if (mali_group_has_timed_out(group)) {
+ int_result = MALI_INTERRUPT_RESULT_ERROR;
+ time_out = MALI_TRUE;
+ MALI_PRINT(("Executor PP: Job %d Timeout on %s\n",
+ mali_pp_job_get_id(group->pp_running_job),
+ mali_group_core_description(group)));
+ } else {
+ int_result = mali_group_get_interrupt_result_pp(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ } else if (MALI_INTERRUPT_RESULT_SUCCESS == int_result) {
+ if (mali_group_is_virtual(group) && mali_group_pp_is_active(group)) {
+ /* Some child groups are still working, so nothing to do right now */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_NONE != int_result);
+ if (!mali_group_has_timed_out(group)) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(group));
+ }
+#endif
+
+ /*Add voltage scan function*/
+
+ if (MALI_INTERRUPT_RESULT_ERROR == int_result)
+ mali_group_error++;
+
+ /* We should now have a real interrupt to handle */
+
+ MALI_DEBUG_PRINT(4, ("Executor: Group %s completed with %s\n",
+ mali_group_core_description(group),
+ (MALI_INTERRUPT_RESULT_ERROR == int_result) ?
+ "ERROR" : "success"));
+
+ if (in_upper_half && MALI_INTERRUPT_RESULT_ERROR == int_result) {
+ /* Don't bother to do processing of errors in upper half */
+ mali_group_mask_all_interrupts_pp(group);
+ mali_executor_unlock();
+
+ if (MALI_FALSE == time_out) {
+ mali_group_schedule_bottom_half_pp(group);
+ }
+ } else {
+ struct mali_pp_job *job = NULL;
+ mali_bool success;
+
+ success = (int_result == MALI_INTERRUPT_RESULT_SUCCESS) ?
+ MALI_TRUE : MALI_FALSE;
+
+ mali_executor_complete_group(group, success,
+ MALI_TRUE, NULL, &job);
+
+ mali_executor_unlock();
+
+ if (NULL != job) {
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group,
+ mali_bool in_upper_half)
+{
+ enum mali_interrupt_result int_result;
+
+ MALI_DEBUG_PRINT(4, ("Executor: MMU interrupt from %s in %s half\n",
+ mali_group_core_description(group),
+ in_upper_half ? "upper" : "bottom"));
+
+ mali_executor_lock();
+ if (!mali_group_is_working(group)) {
+ /* Not working, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_group_is_working(group));
+
+ int_result = mali_group_get_interrupt_result_mmu(group);
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
+ if (MALI_INTERRUPT_RESULT_NONE == int_result) {
+ /* No interrupts signalled, so nothing to do */
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+#else
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_ERROR == int_result);
+#endif
+
+ /* We should now have a real interrupt to handle */
+
+ if (in_upper_half) {
+ /* Don't bother to do processing of errors in upper half */
+
+ struct mali_group *parent = group->parent_group;
+
+ mali_mmu_mask_all_interrupts(group->mmu);
+
+ mali_executor_unlock();
+
+ if (NULL == parent) {
+ mali_group_schedule_bottom_half_mmu(group);
+ } else {
+ mali_group_schedule_bottom_half_mmu(parent);
+ }
+
+ } else {
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+
+#ifdef DEBUG
+
+ u32 fault_address = mali_mmu_get_page_fault_addr(group->mmu);
+ u32 status = mali_mmu_get_status(group->mmu);
+ MALI_DEBUG_PRINT(2, ("Executor: Mali page fault detected at 0x%x from bus id %d of type %s on %s\n",
+ (void *)(uintptr_t)fault_address,
+ (status >> 6) & 0x1F,
+ (status & 32) ? "write" : "read",
+ group->mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Executor: MMU rawstat = 0x%08X, MMU status = 0x%08X\n",
+ mali_mmu_get_rawstat(group->mmu), status));
+#endif
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_TRUE, &gp_job, &pp_job);
+
+ mali_executor_unlock();
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT(NULL == pp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ } else if (NULL != pp_job) {
+ MALI_DEBUG_ASSERT(NULL == gp_job);
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(pp_job,
+ num_physical_pp_cores_total,
+ MALI_TRUE, MALI_TRUE);
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups)
+{
+ u32 i;
+ mali_bool child_groups_activated = MALI_FALSE;
+ mali_bool do_schedule = MALI_FALSE;
+#if defined(DEBUG)
+ u32 num_activated = 0;
+#endif
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_up(groups[i]);
+
+ if ((MALI_GROUP_STATE_ACTIVATION_PENDING != mali_group_get_state(groups[i]) ||
+ (MALI_TRUE != mali_executor_group_is_in_state(groups[i], EXEC_STATE_INACTIVE)))) {
+ /* nothing more to do for this group */
+ continue;
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: activating group %s\n",
+ mali_group_core_description(groups[i])));
+
+#if defined(DEBUG)
+ num_activated++;
+#endif
+
+ if (mali_group_is_in_virtual(groups[i])) {
+ /*
+ * At least one child group of virtual group is powered on.
+ */
+ child_groups_activated = MALI_TRUE;
+ } else if (MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ /* Set gp and pp not in virtual to active. */
+ mali_group_set_active(groups[i]);
+ }
+
+ /* Move group from inactive to idle list */
+ if (groups[i] == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ gp_group_state);
+ gp_group_state = EXEC_STATE_IDLE;
+ } else if (MALI_FALSE == mali_group_is_in_virtual(groups[i])
+ && MALI_FALSE == mali_group_is_virtual(groups[i])) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ mali_executor_change_state_pp_physical(groups[i],
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ do_schedule = MALI_TRUE;
+ }
+
+ if (mali_executor_has_virtual_group() &&
+ MALI_TRUE == child_groups_activated &&
+ MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Try to active virtual group while it may be not sucessful every time,
+ * because there is one situation that not all of child groups are powered on
+ * in one time and virtual group is in activation pending state.
+ */
+ if (mali_group_set_active(virtual_group)) {
+ /* Move group from inactive to idle */
+ MALI_DEBUG_ASSERT(EXEC_STATE_INACTIVE ==
+ virtual_group_state);
+ virtual_group_state = EXEC_STATE_IDLE;
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated, 1 virtual activated.\n", num_groups, num_activated));
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+ } else {
+ MALI_DEBUG_PRINT(3, ("Executor: powering up %u groups completed, %u physical activated\n", num_groups, num_activated));
+ }
+
+ if (MALI_TRUE == do_schedule) {
+ /* Trigger a schedule */
+ mali_executor_schedule();
+ }
+
+ mali_executor_unlock();
+}
+
+void mali_executor_group_power_down(struct mali_group *groups[],
+ u32 num_groups)
+{
+ u32 i;
+
+ MALI_DEBUG_ASSERT_POINTER(groups);
+ MALI_DEBUG_ASSERT(0 < num_groups);
+
+ mali_executor_lock();
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups\n", num_groups));
+
+ for (i = 0; i < num_groups; i++) {
+ /* Groups must be either disabled or inactive */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_DISABLED) ||
+ mali_executor_group_is_in_state(groups[i],
+ EXEC_STATE_INACTIVE));
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down group %s\n",
+ mali_group_core_description(groups[i])));
+
+ mali_group_power_down(groups[i]);
+ }
+
+ MALI_DEBUG_PRINT(3, ("Executor: powering down %u groups completed\n", num_groups));
+
+ mali_executor_unlock();
+}
+
+void mali_executor_abort_session(struct mali_session_data *session)
+{
+ struct mali_group *group;
+ struct mali_group *tmp_group;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3,
+ ("Executor: Aborting all jobs from session 0x%08X.\n",
+ session));
+
+ mali_executor_lock();
+
+ if (mali_group_get_session(gp_group) == session) {
+ if (EXEC_STATE_WORKING == gp_group_state) {
+ struct mali_gp_job *gp_job = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &gp_job, NULL);
+
+ MALI_DEBUG_ASSERT_POINTER(gp_job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(gp_job, MALI_FALSE,
+ MALI_FALSE, MALI_TRUE);
+ } else {
+ /* Same session, but not working, so just clear it */
+ mali_group_clear_session(gp_group);
+ }
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ if (EXEC_STATE_WORKING == virtual_group_state
+ && mali_group_get_session(virtual_group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(virtual_group, MALI_FALSE,
+ MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_working,
+ struct mali_group, executor_list) {
+ if (mali_group_get_session(group) == session) {
+ struct mali_pp_job *pp_job = NULL;
+
+ mali_executor_complete_group(group, MALI_FALSE,
+ MALI_FALSE, NULL, &pp_job);
+
+ if (NULL != pp_job) {
+ /* PP job completed, make sure it is freed */
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_idle, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_inactive, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, tmp_group, &group_list_disabled, struct mali_group, executor_list) {
+ mali_group_clear_session(group);
+ }
+
+ mali_executor_unlock();
+}
+
+
+void mali_executor_core_scaling_enable(void)
+{
+ /* PS: Core scaling is by default enabled */
+ core_scaling_enabled = MALI_TRUE;
+}
+
+void mali_executor_core_scaling_disable(void)
+{
+ core_scaling_enabled = MALI_FALSE;
+}
+
+mali_bool mali_executor_core_scaling_is_enabled(void)
+{
+ return core_scaling_enabled;
+}
+
+void mali_executor_group_enable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_enable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+/*
+ * If a physical group is inactive or idle, we should disable it immediately,
+ * if group is in virtual, and virtual group is idle, disable given physical group in it.
+ */
+void mali_executor_group_disable(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+
+ if ((NULL != mali_group_get_gp_core(group) || NULL != mali_group_get_pp_core(group))
+ && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))) {
+ mali_executor_group_disable_internal(group);
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+}
+
+mali_bool mali_executor_group_is_disabled(struct mali_group *group)
+{
+ /* NB: This function is not optimized for time critical usage */
+
+ mali_bool ret;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+
+ mali_executor_lock();
+ ret = mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED);
+ mali_executor_unlock();
+
+ return ret;
+}
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override)
+{
+ if (target_core_nr == num_physical_pp_cores_enabled) return 0;
+ if (MALI_FALSE == core_scaling_enabled && MALI_FALSE == override) return -EPERM;
+ if (target_core_nr > num_physical_pp_cores_total) return -EINVAL;
+ if (0 == target_core_nr) return -EINVAL;
+
+ mali_executor_core_scale(target_core_nr);
+
+ _mali_osk_wq_schedule_work(executor_wq_notify_core_change);
+
+ return 0;
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ mali_executor_lock();
+
+ switch (gp_group_state) {
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP group is in unknown/illegal state %u\n",
+ gp_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(gp_group, buf + n, size - n);
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in WORKING state (count = %u):\n",
+ group_list_working_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_working, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in IDLE state (count = %u):\n",
+ group_list_idle_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in INACTIVE state (count = %u):\n",
+ group_list_inactive_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP groups in DISABLED state (count = %u):\n",
+ group_list_disabled_count);
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_disabled, struct mali_group, executor_list) {
+ n += mali_group_dump_state(group, buf + n, size - n);
+ }
+
+ if (mali_executor_has_virtual_group()) {
+ switch (virtual_group_state) {
+ case EXEC_STATE_EMPTY:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state EMPTY\n");
+ break;
+ case EXEC_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state INACTIVE\n");
+ break;
+ case EXEC_STATE_IDLE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state IDLE\n");
+ break;
+ case EXEC_STATE_WORKING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in state WORKING\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP group is in unknown/illegal state %u\n",
+ virtual_group_state);
+ break;
+ }
+
+ n += mali_group_dump_state(virtual_group, buf + n, size - n);
+ }
+
+ mali_executor_unlock();
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+_mali_osk_errcode_t _mali_ukk_get_pp_number_of_cores(_mali_uk_get_pp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_total_cores = num_physical_pp_cores_total;
+ args->number_of_enabled_cores = num_physical_pp_cores_enabled;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_pp_core_version(_mali_uk_get_pp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = pp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_number_of_cores(_mali_uk_get_gp_number_of_cores_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->number_of_cores = 1;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_get_gp_core_version(_mali_uk_get_gp_core_version_s *args)
+{
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+ args->version = gp_version;
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_suspend_response(_mali_uk_gp_suspend_response_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ if (_MALIGP_JOB_RESUME_WITH_NEW_HEAP == args->code) {
+ _mali_osk_notification_t *new_notification = NULL;
+
+ new_notification = _mali_osk_notification_create(
+ _MALI_NOTIFICATION_GP_STALLED,
+ sizeof(_mali_uk_gp_job_suspended_s));
+
+ if (NULL != new_notification) {
+ MALI_DEBUG_PRINT(3, ("Executor: Resuming job %u with new heap; 0x%08X - 0x%08X\n",
+ args->cookie, args->arguments[0], args->arguments[1]));
+
+ mali_executor_lock();
+
+ /* Resume the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /*
+ * Correct job is running, resume with new heap
+ */
+
+ mali_gp_job_set_oom_notification(job,
+ new_notification);
+
+ /* This will also re-enable interrupts */
+ mali_group_resume_gp_with_new_heap(gp_group,
+ args->cookie,
+ args->arguments[0],
+ args->arguments[1]);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_OK;
+ } else {
+ MALI_PRINT_ERROR(("Executor: Unable to resume, GP job no longer running.\n"));
+
+ _mali_osk_notification_delete(new_notification);
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+ }
+ } else {
+ MALI_PRINT_ERROR(("Executor: Failed to allocate notification object. Will abort GP job.\n"));
+ }
+ } else {
+ MALI_DEBUG_PRINT(2, ("Executor: Aborting job %u, no new heap provided\n", args->cookie));
+ }
+
+ mali_executor_lock();
+
+ /* Abort the job in question if it is still running */
+ job = mali_group_get_running_gp_job(gp_group);
+ if (NULL != job &&
+ args->cookie == mali_gp_job_get_id(job) &&
+ session == mali_gp_job_get_session(job)) {
+ /* Correct job is still running */
+ struct mali_gp_job *job_done = NULL;
+
+ mali_executor_complete_group(gp_group, MALI_FALSE,
+ MALI_TRUE, &job_done, NULL);
+
+ /* The same job should have completed */
+ MALI_DEBUG_ASSERT(job_done == job);
+
+ /* GP job completed, make sure it is freed */
+ mali_scheduler_complete_gp_job(job_done, MALI_FALSE,
+ MALI_TRUE, MALI_TRUE);
+ }
+
+ mali_executor_unlock();
+ return _MALI_OSK_ERR_FAULT;
+}
+
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static void mali_executor_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_executor_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Executor: lock taken\n"));
+}
+
+static void mali_executor_unlock(void)
+{
+ MALI_DEBUG_PRINT(5, ("Executor: Releasing lock\n"));
+ _mali_osk_spinlock_irq_unlock(mali_executor_lock_obj);
+}
+
+static mali_bool mali_executor_is_suspended(void *data)
+{
+ mali_bool ret;
+
+ /* This callback does not use the data pointer. */
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ ret = pause_count > 0 && !mali_executor_is_working();
+
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static mali_bool mali_executor_is_working()
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return (0 != group_list_working_count ||
+ EXEC_STATE_WORKING == gp_group_state ||
+ EXEC_STATE_WORKING == virtual_group_state);
+}
+
+static void mali_executor_disable_empty_virtual(void)
+{
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_EMPTY);
+ MALI_DEBUG_ASSERT(virtual_group_state != EXEC_STATE_WORKING);
+
+ if (mali_group_is_empty(virtual_group)) {
+ virtual_group_state = EXEC_STATE_EMPTY;
+ }
+}
+
+static mali_bool mali_executor_physical_rejoin_virtual(struct mali_group *group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ /* Only rejoining after job has completed (still active) */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_executor_has_virtual_group());
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_group_is_virtual(group));
+
+ /* Make sure group and virtual group have same status */
+
+ if (MALI_GROUP_STATE_INACTIVE == mali_group_get_state(virtual_group)) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else if (MALI_GROUP_STATE_ACTIVATION_PENDING ==
+ mali_group_get_state(virtual_group)) {
+ /*
+ * Activation is pending for virtual group, leave
+ * this child group as active.
+ */
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+ }
+ } else {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE ==
+ mali_group_get_state(virtual_group));
+
+ if (virtual_group_state == EXEC_STATE_EMPTY) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ }
+ }
+
+ /* Remove group from idle list */
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group,
+ EXEC_STATE_IDLE));
+ _mali_osk_list_delinit(&group->executor_list);
+ group_list_idle_count--;
+
+ /*
+ * And finally rejoin the virtual group
+ * group will start working on same job as virtual_group,
+ * if virtual_group is working on a job
+ */
+ mali_group_add_group(virtual_group, group);
+
+ return trigger_pm_update;
+}
+
+static mali_bool mali_executor_has_virtual_group(void)
+{
+#if defined(CONFIG_MALI450)
+ return (NULL != virtual_group) ? MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+static mali_bool mali_executor_virtual_group_is_usable(void)
+{
+#if defined(CONFIG_MALI450)
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return (EXEC_STATE_INACTIVE == virtual_group_state ||
+ EXEC_STATE_IDLE == virtual_group_state) ?
+ MALI_TRUE : MALI_FALSE;
+#else
+ return MALI_FALSE;
+#endif /* defined(CONFIG_MALI450) */
+}
+
+static mali_bool mali_executor_tackle_gp_bound(void)
+{
+ struct mali_pp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ job = mali_scheduler_job_pp_physical_peek();
+
+ if (NULL != job && MALI_TRUE == mali_is_mali400()) {
+ if (0 < group_list_working_count &&
+ mali_pp_job_is_large_and_unstarted(job)) {
+ return MALI_TRUE;
+ }
+ }
+
+ return MALI_FALSE;
+}
+
+/*
+ * This is where jobs are actually started.
+ */
+static void mali_executor_schedule(void)
+{
+ u32 i;
+ u32 num_physical_needed = 0;
+ u32 num_physical_to_process = 0;
+ mali_bool trigger_pm_update = MALI_FALSE;
+ mali_bool deactivate_idle_group = MALI_TRUE;
+
+ /* Physical groups + jobs to start in this function */
+ struct mali_group *groups_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ struct mali_pp_job *jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ u32 sub_jobs_to_start[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS];
+ int num_jobs_to_start = 0;
+
+ /* Virtual job to start in this function */
+ struct mali_pp_job *virtual_job_to_start = NULL;
+
+ /* GP job to start in this function */
+ struct mali_gp_job *gp_job_to_start = NULL;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (pause_count > 0) {
+ /* Execution is suspended, don't schedule any jobs. */
+ return;
+ }
+
+ /* Lock needed in order to safely handle the job queues */
+ mali_scheduler_lock();
+
+ /* 1. Activate gp firstly if have gp job queued. */
+ if (EXEC_STATE_INACTIVE == gp_group_state &&
+ 0 < mali_scheduler_job_gp_count()) {
+
+ enum mali_group_state state =
+ mali_group_activate(gp_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set GP group state to idle */
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 2. Prepare as many physical groups as needed/possible */
+
+ num_physical_needed = mali_scheduler_job_physical_head_count();
+
+ /* On mali-450 platform, we don't need to enter in this block frequently. */
+ if (0 < num_physical_needed) {
+
+ if (num_physical_needed <= group_list_idle_count) {
+ /* We have enough groups on idle list already */
+ num_physical_to_process = num_physical_needed;
+ num_physical_needed = 0;
+ } else {
+ /* We need to get a hold of some more groups */
+ num_physical_to_process = group_list_idle_count;
+ num_physical_needed -= group_list_idle_count;
+ }
+
+ if (0 < num_physical_needed) {
+
+ /* 2.1. Activate groups which are inactive */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_inactive,
+ struct mali_group, executor_list) {
+ enum mali_group_state state =
+ mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Move from inactive to idle */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ num_physical_to_process++;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ num_physical_needed--;
+ if (0 == num_physical_needed) {
+ /* We have activated all the groups we need */
+ break;
+ }
+ }
+ }
+
+ if (mali_executor_virtual_group_is_usable()) {
+
+ /*
+ * 2.2. And finally, steal and activate groups
+ * from virtual group if we need even more
+ */
+ while (0 < num_physical_needed) {
+ struct mali_group *group;
+
+ group = mali_group_acquire_group(virtual_group);
+ if (NULL != group) {
+ enum mali_group_state state;
+
+ mali_executor_disable_empty_virtual();
+
+ state = mali_group_activate(group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Group is ready, add to idle list */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_idle);
+ group_list_idle_count++;
+ num_physical_to_process++;
+ } else {
+ /*
+ * Group is not ready yet,
+ * add to inactive list
+ */
+ _mali_osk_list_add(
+ &group->executor_list,
+ &group_list_inactive);
+ group_list_inactive_count++;
+
+ trigger_pm_update = MALI_TRUE;
+ }
+ num_physical_needed--;
+ } else {
+ /*
+ * We could not get enough groups
+ * from the virtual group.
+ */
+ break;
+ }
+ }
+ }
+
+ /* 2.3. Assign physical jobs to groups */
+
+ if (0 < num_physical_to_process) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &group_list_idle,
+ struct mali_group, executor_list) {
+ struct mali_pp_job *job = NULL;
+ u32 sub_job = MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+
+ MALI_DEBUG_ASSERT(num_jobs_to_start <
+ MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ MALI_DEBUG_ASSERT(0 <
+ mali_scheduler_job_physical_head_count());
+
+ if (mali_executor_hint_is_enabled(
+ MALI_EXECUTOR_HINT_GP_BOUND)) {
+ if (MALI_TRUE == mali_executor_tackle_gp_bound()) {
+ /*
+ * We're gp bound,
+ * don't start this right now.
+ */
+ deactivate_idle_group = MALI_FALSE;
+ num_physical_to_process = 0;
+ break;
+ }
+ }
+
+ job = mali_scheduler_job_pp_physical_get(
+ &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(sub_job <= MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS);
+
+ /* Put job + group on list of jobs to start later on */
+
+ groups_to_start[num_jobs_to_start] = group;
+ jobs_to_start[num_jobs_to_start] = job;
+ sub_jobs_to_start[num_jobs_to_start] = sub_job;
+ num_jobs_to_start++;
+
+ /* Move group from idle to working */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_working,
+ &group_list_working_count);
+
+ num_physical_to_process--;
+ if (0 == num_physical_to_process) {
+ /* Got all we needed */
+ break;
+ }
+ }
+ }
+ }
+
+ /* 3. Activate virtual group, if needed */
+
+ if (EXEC_STATE_INACTIVE == virtual_group_state &&
+ 0 < mali_scheduler_job_next_is_virtual()) {
+ enum mali_group_state state =
+ mali_group_activate(virtual_group);
+ if (MALI_GROUP_STATE_ACTIVE == state) {
+ /* Set virtual group state to idle */
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+
+ /* 4. To power up group asap, we trigger pm update here. */
+
+ if (MALI_TRUE == trigger_pm_update) {
+ trigger_pm_update = MALI_FALSE;
+ mali_pm_update_async();
+ }
+
+ /* 5. Deactivate idle pp group */
+
+ if (MALI_TRUE == mali_executor_deactivate_list_idle(deactivate_idle_group
+ && (!mali_timeline_has_physical_pp_job()))) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* 6. Assign jobs to idle virtual group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == virtual_group_state) {
+ if (0 < mali_scheduler_job_next_is_virtual()) {
+ virtual_job_to_start =
+ mali_scheduler_job_pp_virtual_get();
+ virtual_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_virtual_pp_job()) {
+ virtual_group_state = EXEC_STATE_INACTIVE;
+
+ if (mali_group_deactivate(virtual_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 7. Assign job to idle GP group (or deactivate if no job) */
+
+ if (EXEC_STATE_IDLE == gp_group_state) {
+ if (0 < mali_scheduler_job_gp_count()) {
+ gp_job_to_start = mali_scheduler_job_gp_get();
+ gp_group_state = EXEC_STATE_WORKING;
+ } else if (!mali_timeline_has_gp_job()) {
+ gp_group_state = EXEC_STATE_INACTIVE;
+ if (mali_group_deactivate(gp_group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ }
+
+ /* 8. We no longer need the schedule/queue lock */
+
+ mali_scheduler_unlock();
+
+ /* 9. start jobs */
+
+ if (NULL != virtual_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(virtual_group));
+ mali_group_start_pp_job(virtual_group,
+ virtual_job_to_start, 0);
+ }
+
+ for (i = 0; i < num_jobs_to_start; i++) {
+ MALI_DEBUG_ASSERT(!mali_group_pp_is_active(
+ groups_to_start[i]));
+ mali_group_start_pp_job(groups_to_start[i],
+ jobs_to_start[i],
+ sub_jobs_to_start[i]);
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(gp_group);
+
+ if (NULL != gp_job_to_start) {
+ MALI_DEBUG_ASSERT(!mali_group_gp_is_active(gp_group));
+ mali_group_start_gp_job(gp_group, gp_job_to_start);
+ }
+
+ /* 10. Trigger any pending PM updates */
+ if (MALI_TRUE == trigger_pm_update) {
+ mali_pm_update_async();
+ }
+}
+
+/* Handler for deferred schedule requests */
+static void mali_executor_wq_schedule(void *arg)
+{
+ MALI_IGNORE(arg);
+ mali_executor_lock();
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_send_gp_oom_to_user(struct mali_gp_job *job)
+{
+ _mali_uk_gp_job_suspended_s *jobres;
+ _mali_osk_notification_t *notification;
+
+ notification = mali_gp_job_get_oom_notification(job);
+
+ /*
+ * Remember the id we send to user space, so we have something to
+ * verify when we get a response
+ */
+ gp_returned_cookie = mali_gp_job_get_id(job);
+
+ jobres = (_mali_uk_gp_job_suspended_s *)notification->result_buffer;
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ jobres->cookie = gp_returned_cookie;
+
+ mali_session_send_notification(mali_gp_job_get_session(job),
+ notification);
+}
+static struct mali_gp_job *mali_executor_complete_gp(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs)
+{
+ struct mali_gp_job *job;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_gp(group, success);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ gp_group_state = EXEC_STATE_IDLE;
+
+ if (release_jobs) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+
+ /* Signal PP job */
+ mali_gp_job_signal_pp_tracker(job, success);
+ }
+
+ return job;
+}
+
+static struct mali_pp_job *mali_executor_complete_pp(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs)
+{
+ struct mali_pp_job *job;
+ u32 sub_job;
+ mali_bool job_is_done;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ /* Extracts the needed HW status from core and reset */
+ job = mali_group_complete_pp(group, success, &sub_job);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Core is now ready to go into idle list */
+ if (mali_group_is_virtual(group)) {
+ virtual_group_state = EXEC_STATE_IDLE;
+ } else {
+ /* Move from working to idle state */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_working,
+ &group_list_working_count,
+ &group_list_idle,
+ &group_list_idle_count);
+ }
+
+ /* It is the executor module which owns the jobs themselves by now */
+ mali_pp_job_mark_sub_job_completed(job, success);
+ job_is_done = mali_pp_job_is_complete(job);
+
+ if (job_is_done && release_jobs) {
+ /* This will potentially queue more GP and PP jobs */
+ mali_timeline_tracker_release(&job->tracker);
+ }
+
+ return job;
+}
+
+static void mali_executor_complete_group(struct mali_group *group,
+ mali_bool success,
+ mali_bool release_jobs,
+ struct mali_gp_job **gp_job_done,
+ struct mali_pp_job **pp_job_done)
+{
+ struct mali_gp_core *gp_core = mali_group_get_gp_core(group);
+ struct mali_pp_core *pp_core = mali_group_get_pp_core(group);
+ struct mali_gp_job *gp_job = NULL;
+ struct mali_pp_job *pp_job = NULL;
+ mali_bool pp_job_is_done = MALI_TRUE;
+
+ if (NULL != gp_core) {
+ gp_job = mali_executor_complete_gp(group,
+ success, release_jobs);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(pp_core);
+ MALI_IGNORE(pp_core);
+ pp_job = mali_executor_complete_pp(group,
+ success, release_jobs);
+
+ pp_job_is_done = mali_pp_job_is_complete(pp_job);
+ }
+
+ if (pause_count > 0) {
+ /* Execution has been suspended */
+
+ if (!mali_executor_is_working()) {
+ /* Last job completed, wake up sleepers */
+ _mali_osk_wait_queue_wake_up(
+ executor_working_wait_queue);
+ }
+ } else if (MALI_TRUE == mali_group_disable_requested(group)) {
+ mali_executor_core_scale_in_group_complete(group);
+
+ mali_executor_schedule();
+ } else {
+ /* try to schedule new jobs */
+ mali_executor_schedule();
+ }
+
+ if (NULL != gp_job) {
+ MALI_DEBUG_ASSERT_POINTER(gp_job_done);
+ *gp_job_done = gp_job;
+ } else if (pp_job_is_done) {
+ MALI_DEBUG_ASSERT_POINTER(pp_job);
+ MALI_DEBUG_ASSERT_POINTER(pp_job_done);
+ *pp_job_done = pp_job;
+ }
+}
+
+static void mali_executor_change_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *old_list,
+ u32 *old_count,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ /*
+ * It's a bit more complicated to change the state for the physical PP
+ * groups since their state is determined by the list they are on.
+ */
+#if defined(DEBUG)
+ mali_bool found = MALI_FALSE;
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ u32 old_counted = 0;
+ u32 new_counted = 0;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(old_list);
+ MALI_DEBUG_ASSERT_POINTER(old_count);
+ MALI_DEBUG_ASSERT_POINTER(new_list);
+ MALI_DEBUG_ASSERT_POINTER(new_count);
+
+ /*
+ * Verify that group is present on old list,
+ * and that the count is correct
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, old_list,
+ struct mali_group, executor_list) {
+ old_counted++;
+ if (group == group_iter) {
+ found = MALI_TRUE;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, new_list,
+ struct mali_group, executor_list) {
+ new_counted++;
+ }
+
+ if (MALI_FALSE == found) {
+ if (old_list == &group_list_idle) {
+ MALI_DEBUG_PRINT(1, (" old Group list is idle,"));
+ } else if (old_list == &group_list_inactive) {
+ MALI_DEBUG_PRINT(1, (" old Group list is inactive,"));
+ } else if (old_list == &group_list_working) {
+ MALI_DEBUG_PRINT(1, (" old Group list is working,"));
+ } else if (old_list == &group_list_disabled) {
+ MALI_DEBUG_PRINT(1, (" old Group list is disable,"));
+ }
+
+ if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_WORKING)) {
+ MALI_DEBUG_PRINT(1, (" group in working \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_INACTIVE)) {
+ MALI_DEBUG_PRINT(1, (" group in inactive \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_IDLE)) {
+ MALI_DEBUG_PRINT(1, (" group in idle \n"));
+ } else if (MALI_TRUE == mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)) {
+ MALI_DEBUG_PRINT(1, (" but group in disabled \n"));
+ }
+ }
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == found);
+ MALI_DEBUG_ASSERT(0 < (*old_count));
+ MALI_DEBUG_ASSERT((*old_count) == old_counted);
+ MALI_DEBUG_ASSERT((*new_count) == new_counted);
+#endif
+
+ _mali_osk_list_move(&group->executor_list, new_list);
+ (*old_count)--;
+ (*new_count)++;
+}
+
+static void mali_executor_set_state_pp_physical(struct mali_group *group,
+ _mali_osk_list_t *new_list,
+ u32 *new_count)
+{
+ _mali_osk_list_add(&group->executor_list, new_list);
+ (*new_count)++;
+}
+
+static mali_bool mali_executor_group_is_in_state(struct mali_group *group,
+ enum mali_executor_state_t state)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (gp_group == group) {
+ if (gp_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else if (virtual_group == group || mali_group_is_in_virtual(group)) {
+ if (virtual_group_state == state) {
+ return MALI_TRUE;
+ }
+ } else {
+ /* Physical PP group */
+ struct mali_group *group_iter;
+ struct mali_group *temp;
+ _mali_osk_list_t *list;
+
+ if (EXEC_STATE_DISABLED == state) {
+ list = &group_list_disabled;
+ } else if (EXEC_STATE_INACTIVE == state) {
+ list = &group_list_inactive;
+ } else if (EXEC_STATE_IDLE == state) {
+ list = &group_list_idle;
+ } else {
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING == state);
+ list = &group_list_working;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group_iter, temp, list,
+ struct mali_group, executor_list) {
+ if (group_iter == group) {
+ return MALI_TRUE;
+ }
+ }
+ }
+
+ /* group not in correct state */
+ return MALI_FALSE;
+}
+
+static void mali_executor_group_enable_internal(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ /* Put into inactive state (== "lowest" enabled state) */
+ if (group == gp_group) {
+ MALI_DEBUG_ASSERT(EXEC_STATE_DISABLED == gp_group_state);
+ gp_group_state = EXEC_STATE_INACTIVE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+
+ ++num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Enabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_ACTIVE == mali_group_activate(group)) {
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_power_is_on(group));
+
+ /* Move from inactive to idle */
+ if (group == gp_group) {
+ gp_group_state = EXEC_STATE_IDLE;
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_idle,
+ &group_list_idle_count);
+
+ if (mali_executor_has_virtual_group()) {
+ if (mali_executor_physical_rejoin_virtual(group)) {
+ mali_pm_update_async();
+ }
+ }
+ }
+ } else {
+ mali_pm_update_async();
+ }
+}
+
+static void mali_executor_group_disable_internal(struct mali_group *group)
+{
+ mali_bool working;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED));
+
+ working = mali_executor_group_is_in_state(group, EXEC_STATE_WORKING);
+ if (MALI_TRUE == working) {
+ /** Group to be disabled once it completes current work,
+ * when virtual group completes, also check child groups for this flag */
+ mali_group_set_disable_request(group, MALI_TRUE);
+ return;
+ }
+
+ /* Put into disabled state */
+ if (group == gp_group) {
+ /* GP group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != gp_group_state);
+ gp_group_state = EXEC_STATE_DISABLED;
+ } else {
+ if (mali_group_is_in_virtual(group)) {
+ /* A child group of virtual group. move the specific group from virtual group */
+ MALI_DEBUG_ASSERT(EXEC_STATE_WORKING != virtual_group_state);
+
+ mali_executor_set_state_pp_physical(group,
+ &group_list_disabled,
+ &group_list_disabled_count);
+
+ mali_group_remove_group(virtual_group, group);
+ mali_executor_disable_empty_virtual();
+ } else {
+ mali_executor_change_group_status_disabled(group);
+ }
+
+ --num_physical_pp_cores_enabled;
+ MALI_DEBUG_PRINT(4, ("Disabling group id %d \n", group->pp_core->core_id));
+ }
+
+ if (MALI_GROUP_STATE_INACTIVE != group->state) {
+ if (MALI_TRUE == mali_group_deactivate(group)) {
+ mali_pm_update_async();
+ }
+ }
+}
+
+static void mali_executor_notify_core_change(u32 num_cores)
+{
+ mali_bool done = MALI_FALSE;
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ /*
+ * This function gets a bit complicated because we can't hold the session lock while
+ * allocating notification objects.
+ */
+ while (!done) {
+ u32 i;
+ u32 num_sessions_alloc;
+ u32 num_sessions_with_lock;
+ u32 used_notification_objects = 0;
+ _mali_osk_notification_t **notobjs;
+
+ /* Pre allocate the number of notifications objects we need right now (might change after lock has been taken) */
+ num_sessions_alloc = mali_session_get_count();
+ if (0 == num_sessions_alloc) {
+ /* No sessions to report to */
+ return;
+ }
+
+ notobjs = (_mali_osk_notification_t **)_mali_osk_malloc(sizeof(_mali_osk_notification_t *) * num_sessions_alloc);
+ if (NULL == notobjs) {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure)\n"));
+ /* there is probably no point in trying again, system must be really low on memory and probably unusable now anyway */
+ return;
+ }
+
+ for (i = 0; i < num_sessions_alloc; i++) {
+ notobjs[i] = _mali_osk_notification_create(_MALI_NOTIFICATION_PP_NUM_CORE_CHANGE, sizeof(_mali_uk_pp_num_cores_changed_s));
+ if (NULL != notobjs[i]) {
+ _mali_uk_pp_num_cores_changed_s *data = notobjs[i]->result_buffer;
+ data->number_of_enabled_cores = num_cores;
+ } else {
+ MALI_PRINT_ERROR(("Failed to notify user space session about num PP core change (alloc failure %u)\n", i));
+ }
+ }
+
+ mali_session_lock();
+
+ /* number of sessions will not change while we hold the lock */
+ num_sessions_with_lock = mali_session_get_count();
+
+ if (num_sessions_alloc >= num_sessions_with_lock) {
+ /* We have allocated enough notification objects for all the sessions atm */
+ struct mali_session_data *session, *tmp;
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ MALI_DEBUG_ASSERT(used_notification_objects < num_sessions_alloc);
+ if (NULL != notobjs[used_notification_objects]) {
+ mali_session_send_notification(session, notobjs[used_notification_objects]);
+ notobjs[used_notification_objects] = NULL; /* Don't track this notification object any more */
+ }
+ used_notification_objects++;
+ }
+ done = MALI_TRUE;
+ }
+
+ mali_session_unlock();
+
+ /* Delete any remaining/unused notification objects */
+ for (; used_notification_objects < num_sessions_alloc; used_notification_objects++) {
+ if (NULL != notobjs[used_notification_objects]) {
+ _mali_osk_notification_delete(notobjs[used_notification_objects]);
+ }
+ }
+
+ _mali_osk_free(notobjs);
+ }
+}
+
+static mali_bool mali_executor_core_scaling_is_done(void *data)
+{
+ u32 i;
+ u32 num_groups;
+ mali_bool ret = MALI_TRUE;
+
+ MALI_IGNORE(data);
+
+ mali_executor_lock();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ if (MALI_TRUE == group->disable_requested && NULL != mali_group_get_pp_core(group)) {
+ ret = MALI_FALSE;
+ break;
+ }
+ }
+ }
+ mali_executor_unlock();
+
+ return ret;
+}
+
+static void mali_executor_wq_notify_core_change(void *arg)
+{
+ MALI_IGNORE(arg);
+
+ if (mali_is_mali450()) {
+ return;
+ }
+
+ _mali_osk_wait_queue_wait_event(executor_notify_core_change_wait_queue,
+ mali_executor_core_scaling_is_done, NULL);
+
+ mali_executor_notify_core_change(num_physical_pp_cores_enabled);
+}
+
+/**
+ * Clear all disable request from the _last_ core scaling behavior.
+ */
+static void mali_executor_core_scaling_reset(void)
+{
+ u32 i;
+ u32 num_groups;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ num_groups = mali_group_get_glob_num_groups();
+
+ for (i = 0; i < num_groups; i++) {
+ struct mali_group *group = mali_group_get_glob_group(i);
+
+ if (NULL != group) {
+ group->disable_requested = MALI_FALSE;
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ core_scaling_delay_up_mask[i] = 0;
+ }
+}
+
+static void mali_executor_core_scale(unsigned int target_core_nr)
+{
+ int current_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ int target_core_scaling_mask[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ mali_bool update_global_core_scaling_mask = MALI_FALSE;
+ int i;
+
+ MALI_DEBUG_ASSERT(0 < target_core_nr);
+ MALI_DEBUG_ASSERT(num_physical_pp_cores_total >= target_core_nr);
+
+ mali_executor_lock();
+
+ if (target_core_nr < num_physical_pp_cores_enabled) {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: disabling %d cores\n", target_core_nr, num_physical_pp_cores_enabled - target_core_nr));
+ } else {
+ MALI_DEBUG_PRINT(2, ("Requesting %d cores: enabling %d cores\n", target_core_nr, target_core_nr - num_physical_pp_cores_enabled));
+ }
+
+ /* When a new core scaling request is comming, we should remove the un-doing
+ * part of the last core scaling request. It's safe because we have only one
+ * lock(executor lock) protection. */
+ mali_executor_core_scaling_reset();
+
+ mali_pm_get_best_power_cost_mask(num_physical_pp_cores_enabled, current_core_scaling_mask);
+ mali_pm_get_best_power_cost_mask(target_core_nr, target_core_scaling_mask);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ target_core_scaling_mask[i] = target_core_scaling_mask[i] - current_core_scaling_mask[i];
+ MALI_DEBUG_PRINT(5, ("target_core_scaling_mask[%d] = %d\n", i, target_core_scaling_mask[i]));
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 > target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && (!mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED))
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_disable_internal(group);
+ target_core_scaling_mask[i]++;
+ if ((0 == target_core_scaling_mask[i])) {
+ break;
+ }
+
+ }
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ /**
+ * Target_core_scaling_mask[i] is bigger than 0,
+ * means we need to enable some pp cores in
+ * this domain whose domain index is i.
+ */
+ if (0 < target_core_scaling_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (num_physical_pp_cores_enabled >= target_core_nr) {
+ update_global_core_scaling_mask = MALI_TRUE;
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(group) && mali_executor_group_is_in_state(group, EXEC_STATE_DISABLED)
+ && (!mali_group_is_virtual(group))) {
+ mali_executor_group_enable_internal(group);
+ target_core_scaling_mask[i]--;
+
+ if ((0 == target_core_scaling_mask[i]) || num_physical_pp_cores_enabled == target_core_nr) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /**
+ * Here, we may still have some pp cores not been enabled because of some
+ * pp cores need to be disabled are still in working state.
+ */
+ if (update_global_core_scaling_mask) {
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < target_core_scaling_mask[i]) {
+ core_scaling_delay_up_mask[i] = target_core_scaling_mask[i];
+ }
+ }
+ }
+
+ mali_executor_schedule();
+ mali_executor_unlock();
+}
+
+static void mali_executor_core_scale_in_group_complete(struct mali_group *group)
+{
+ int num_pp_cores_disabled = 0;
+ int num_pp_cores_to_enable = 0;
+ int i;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_group_disable_requested(group));
+
+ /* Disable child group of virtual group */
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ if (MALI_TRUE == mali_group_disable_requested(child)) {
+ mali_group_set_disable_request(child, MALI_FALSE);
+ mali_executor_group_disable_internal(child);
+ num_pp_cores_disabled++;
+ }
+ }
+ mali_group_set_disable_request(group, MALI_FALSE);
+ } else {
+ mali_executor_group_disable_internal(group);
+ mali_group_set_disable_request(group, MALI_FALSE);
+ if (NULL != mali_group_get_pp_core(group)) {
+ num_pp_cores_disabled++;
+ }
+ }
+
+ num_pp_cores_to_enable = num_pp_cores_disabled;
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0 < core_scaling_delay_up_mask[i]) {
+ struct mali_pm_domain *domain;
+
+ if (0 == num_pp_cores_to_enable) {
+ break;
+ }
+
+ domain = mali_pm_domain_get_from_index(i);
+
+ /* Domain is valid and has pp cores */
+ if ((NULL != domain) && !(_mali_osk_list_empty(&domain->group_list))) {
+ struct mali_group *disabled_group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(disabled_group, temp, &domain->group_list, struct mali_group, pm_domain_list) {
+ if (NULL != mali_group_get_pp_core(disabled_group) && mali_executor_group_is_in_state(disabled_group, EXEC_STATE_DISABLED)) {
+ mali_executor_group_enable_internal(disabled_group);
+ core_scaling_delay_up_mask[i]--;
+ num_pp_cores_to_enable--;
+
+ if ((0 == core_scaling_delay_up_mask[i]) || 0 == num_pp_cores_to_enable) {
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ _mali_osk_wait_queue_wake_up(executor_notify_core_change_wait_queue);
+}
+
+static void mali_executor_change_group_status_disabled(struct mali_group *group)
+{
+ /* Physical PP group */
+ mali_bool idle;
+
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ idle = mali_executor_group_is_in_state(group, EXEC_STATE_IDLE);
+ if (MALI_TRUE == idle) {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ } else {
+ mali_executor_change_state_pp_physical(group,
+ &group_list_inactive,
+ &group_list_inactive_count,
+ &group_list_disabled,
+ &group_list_disabled_count);
+ }
+}
+
+static mali_bool mali_executor_deactivate_list_idle(mali_bool deactivate_idle_group)
+{
+ mali_bool trigger_pm_update = MALI_FALSE;
+
+ if (group_list_idle_count > 0) {
+ if (mali_executor_has_virtual_group()) {
+
+ /* Rejoin virtual group on Mali-450 */
+
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_executor_physical_rejoin_virtual(
+ group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+ }
+ } else if (deactivate_idle_group) {
+ struct mali_group *group;
+ struct mali_group *temp;
+
+ /* Deactivate group on Mali-300/400 */
+
+ _MALI_OSK_LIST_FOREACHENTRY(group, temp,
+ &group_list_idle,
+ struct mali_group, executor_list) {
+ if (mali_group_deactivate(group)) {
+ trigger_pm_update = MALI_TRUE;
+ }
+
+ /* Move from idle to inactive */
+ mali_executor_change_state_pp_physical(group,
+ &group_list_idle,
+ &group_list_idle_count,
+ &group_list_inactive,
+ &group_list_inactive_count);
+ }
+ }
+ }
+
+ return trigger_pm_update;
+}
--- /dev/null
+/*
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __MALI_EXECUTOR_H__
+#define __MALI_EXECUTOR_H__
+
+#include "mali_osk.h"
+#include "mali_scheduler_types.h"
+#include "mali_kernel_common.h"
+
+typedef enum {
+ MALI_EXECUTOR_HINT_GP_BOUND = 0
+#define MALI_EXECUTOR_HINT_MAX 1
+} mali_executor_hint;
+
+extern mali_bool mali_executor_hints[MALI_EXECUTOR_HINT_MAX];
+
+/* forward declare struct instead of using include */
+struct mali_session_data;
+struct mali_group;
+struct mali_pp_core;
+
+extern _mali_osk_spinlock_irq_t *mali_executor_lock_obj;
+
+#define MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_executor_lock_obj);
+
+_mali_osk_errcode_t mali_executor_initialize(void);
+void mali_executor_terminate(void);
+
+void mali_executor_populate(void);
+void mali_executor_depopulate(void);
+
+void mali_executor_suspend(void);
+void mali_executor_resume(void);
+
+u32 mali_executor_get_num_cores_total(void);
+u32 mali_executor_get_num_cores_enabled(void);
+struct mali_pp_core *mali_executor_get_virtual_pp(void);
+struct mali_group *mali_executor_get_virtual_group(void);
+
+void mali_executor_zap_all_active(struct mali_session_data *session);
+
+/**
+ * Schedule GP and PP according to bitmask.
+ *
+ * @param mask A scheduling bitmask.
+ * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ */
+void mali_executor_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+
+_mali_osk_errcode_t mali_executor_interrupt_gp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_pp(struct mali_group *group, mali_bool in_upper_half);
+_mali_osk_errcode_t mali_executor_interrupt_mmu(struct mali_group *group, mali_bool in_upper_half);
+
+void mali_executor_group_power_up(struct mali_group *groups[], u32 num_groups);
+void mali_executor_group_power_down(struct mali_group *groups[], u32 num_groups);
+
+void mali_executor_abort_session(struct mali_session_data *session);
+
+void mali_executor_core_scaling_enable(void);
+void mali_executor_core_scaling_disable(void);
+mali_bool mali_executor_core_scaling_is_enabled(void);
+
+void mali_executor_group_enable(struct mali_group *group);
+void mali_executor_group_disable(struct mali_group *group);
+mali_bool mali_executor_group_is_disabled(struct mali_group *group);
+
+int mali_executor_set_perf_level(unsigned int target_core_nr, mali_bool override);
+
+#if MALI_STATE_TRACKING
+u32 mali_executor_dump_state(char *buf, u32 size);
+#endif
+
+MALI_STATIC_INLINE void mali_executor_hint_enable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_TRUE;
+}
+
+MALI_STATIC_INLINE void mali_executor_hint_disable(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ mali_executor_hints[hint] = MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_executor_hint_is_enabled(mali_executor_hint hint)
+{
+ MALI_DEBUG_ASSERT(hint < MALI_EXECUTOR_HINT_MAX);
+ return mali_executor_hints[hint];
+}
+
+#endif /* __MALI_EXECUTOR_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_gp.h"
mali_gp_stop_bus(core);
/* Wait for bus to be stopped */
- for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
+ for (i = 0; i < MALI_REG_POLL_COUNT_SLOW; i++) {
if (mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS) & MALIGP2_REG_VAL_STATUS_BUS_STOPPED) {
break;
}
}
- if (MALI_REG_POLL_COUNT_FAST == i) {
+ if (MALI_REG_POLL_COUNT_SLOW == i) {
MALI_PRINT_ERROR(("Mali GP: Failed to stop bus on %s\n", core->hw_core.description));
return _MALI_OSK_ERR_FAULT;
}
}
#endif
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend)
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job)
{
u32 val0 = 0;
u32 val1 = 0;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_GP_H__
struct mali_gp_core *mali_gp_get_global_gp_core(void);
+#if MALI_STATE_TRACKING
u32 mali_gp_dump_state(struct mali_gp_core *core, char *buf, u32 size);
+#endif
-void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job, mali_bool suspend);
+void mali_gp_update_performance_counters(struct mali_gp_core *core, struct mali_gp_job *job);
-/*** Accessor functions ***/
-MALI_STATIC_INLINE const char *mali_gp_get_hw_core_desc(struct mali_gp_core *core)
+MALI_STATIC_INLINE const char *mali_gp_core_description(struct mali_gp_core *core)
{
return core->hw_core.description;
}
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_gp_get_int_stat(struct mali_gp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_gp_get_interrupt_result(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT);
+ u32 stat_used = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_STAT) &
+ MALIGP2_REG_VAL_IRQ_MASK_USED;
+
+ if (0 == stat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if ((MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST |
+ MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST) == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ } else if (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_VS;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST == stat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS_PLBU;
+ } else if (MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM & stat_used) {
+ return MALI_INTERRUPT_RESULT_OOM;
+ }
+
+ return MALI_INTERRUPT_RESULT_ERROR;
}
-MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_get_rawstat(struct mali_gp_core *core)
{
- mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT);
}
-MALI_STATIC_INLINE u32 mali_gp_read_rawstat(struct mali_gp_core *core)
+MALI_STATIC_INLINE u32 mali_gp_is_active(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_RAWSTAT) & MALIGP2_REG_VAL_IRQ_MASK_USED;
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ return (status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE) ? MALI_TRUE : MALI_FALSE;
}
-MALI_STATIC_INLINE u32 mali_gp_read_core_status(struct mali_gp_core *core)
+MALI_STATIC_INLINE void mali_gp_mask_all_interrupts(struct mali_gp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALIGP2_REG_ADDR_MGMT_STATUS);
+ mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK, MALIGP2_REG_VAL_IRQ_MASK_NONE);
}
-MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, u32 irq_exceptions)
+MALI_STATIC_INLINE void mali_gp_enable_interrupts(struct mali_gp_core *core, enum mali_interrupt_result exceptions)
{
- /* Enable all interrupts, except those specified in irq_exceptions */
- mali_hw_core_register_write(&core->hw_core, MALIGP2_REG_ADDR_MGMT_INT_MASK,
- MALIGP2_REG_VAL_IRQ_MASK_USED & ~irq_exceptions);
+ /* Enable all interrupts, except those specified in exceptions */
+ u32 value;
+
+ if (MALI_INTERRUPT_RESULT_SUCCESS_VS == exceptions) {
+ /* Enable all used except VS complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST;
+ } else {
+ MALI_DEBUG_ASSERT(MALI_INTERRUPT_RESULT_SUCCESS_PLBU ==
+ exceptions);
+ /* Enable all used except PLBU complete */
+ value = MALIGP2_REG_VAL_IRQ_MASK_USED &
+ ~MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST;
+ }
+
+ mali_hw_core_register_write(&core->hw_core,
+ MALIGP2_REG_ADDR_MGMT_INT_MASK,
+ value);
}
MALI_STATIC_INLINE u32 mali_gp_read_plbu_alloc_start_addr(struct mali_gp_core *core)
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_gp_job.h"
{
MALI_DEBUG_ASSERT_POINTER(job);
MALI_DEBUG_ASSERT(NULL == job->pp_tracker);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
/* de-allocate the pre-allocated oom notifications */
if (NULL != job->oom_notification) {
_mali_osk_free(job);
}
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_gp_job *iter;
+ struct mali_gp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_gp_job, list) {
+
+ /* A span is used to handle job ID wrapping. */
+ bool job_is_after = (mali_gp_job_get_id(job) -
+ mali_gp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN;
+
+ if (job_is_after) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
u32 mali_gp_job_get_gp_counter_src0(void)
{
return gp_counter_src0;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_GP_JOB_H__
#include "mali_session.h"
#include "mali_timeline.h"
#include "mali_scheduler_types.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_timeline.h"
/**
- * The structure represents a GP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a GP job
+ *
+ * The GP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the GP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
*/
struct mali_gp_job {
- _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
- struct mali_session_data *session; /**< Session which submitted this job */
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
_mali_uk_gp_start_job_s uargs; /**< Arguments from user space */
+ struct mali_session_data *session; /**< Session which submitted this job */
+ u32 pid; /**< Process ID of submitting process */
+ u32 tid; /**< Thread ID of submitting thread */
u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
+ struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
+ _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+
+ /*
+ * These members are used by the executor and/or group,
+ * protected by executor lock
+ */
+ _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
u32 heap_current_addr; /**< Holds the current HEAP address when the job has completed */
u32 perf_counter_value0; /**< Value of performance counter 0 (to be returned to user space) */
u32 perf_counter_value1; /**< Value of performance counter 1 (to be returned to user space) */
- u32 pid; /**< Process ID of submitting process */
- u32 tid; /**< Thread ID of submitting thread */
- _mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
- _mali_osk_notification_t *oom_notification; /**< Notification sent back to userspace on OOM */
- struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
- struct mali_timeline_tracker *pp_tracker; /**< Pointer to Timeline tracker for PP job that depends on this job. */
};
struct mali_gp_job *mali_gp_job_create(struct mali_session_data *session, _mali_uk_gp_start_job_s *uargs, u32 id, struct mali_timeline_tracker *pp_tracker);
MALI_STATIC_INLINE u32 mali_gp_job_get_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->id;
}
+MALI_STATIC_INLINE void mali_gp_job_set_cache_order(struct mali_gp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
MALI_STATIC_INLINE u32 mali_gp_job_get_cache_order(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->cache_order;
}
MALI_STATIC_INLINE u64 mali_gp_job_get_user_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.user_job_ptr;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_frame_builder_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_builder_id;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_flush_id(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.flush_id;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_pid(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->pid;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_tid(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->tid;
}
MALI_STATIC_INLINE u32 *mali_gp_job_get_frame_registers(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_registers;
}
MALI_STATIC_INLINE struct mali_session_data *mali_gp_job_get_session(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->session;
}
MALI_STATIC_INLINE mali_bool mali_gp_job_has_vs_job(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (job->uargs.frame_registers[0] != job->uargs.frame_registers[1]) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE mali_bool mali_gp_job_has_plbu_job(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (job->uargs.frame_registers[2] != job->uargs.frame_registers[3]) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_current_heap_addr(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->heap_current_addr;
}
MALI_STATIC_INLINE void mali_gp_job_set_current_heap_addr(struct mali_gp_job *job, u32 heap_addr)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->heap_current_addr = heap_addr;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_flag(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_flag;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src0(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_src0;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_src1(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_src1;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value0(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value0;
}
MALI_STATIC_INLINE u32 mali_gp_job_get_perf_counter_value1(struct mali_gp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value1;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src0(struct mali_gp_job *job, u32 src)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.perf_counter_src0 = src;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_src1(struct mali_gp_job *job, u32 src)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.perf_counter_src1 = src;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value0(struct mali_gp_job *job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value0 = value;
}
MALI_STATIC_INLINE void mali_gp_job_set_perf_counter_value1(struct mali_gp_job *job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value1 = value;
}
-/**
- * Returns MALI_TRUE if first job is after the second job, ordered by job ID.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be ordered after the second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_gp_job_is_after(struct mali_gp_job *first, struct mali_gp_job *second)
+void mali_gp_job_list_add(struct mali_gp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_gp_job_list_move(struct mali_gp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_gp_job_list_remove(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_gp_job_get_finished_notification(struct mali_gp_job *job)
{
- /* A span is used to handle job ID wrapping. */
- return (mali_gp_job_get_id(first) - mali_gp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
}
+MALI_STATIC_INLINE _mali_osk_notification_t *mali_gp_job_get_oom_notification(
+ struct mali_gp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job->oom_notification);
+
+ notification = job->oom_notification;
+ job->oom_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE void mali_gp_job_set_oom_notification(
+ struct mali_gp_job *job,
+ _mali_osk_notification_t *notification)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(NULL == job->oom_notification);
+ job->oom_notification = notification;
+}
+
+MALI_STATIC_INLINE struct mali_timeline_tracker *mali_gp_job_get_tracker(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return &(job->tracker);
+}
+
+
+MALI_STATIC_INLINE u32 *mali_gp_job_get_timeline_point_ptr(
+ struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
/**
* Release reference on tracker for PP job that depends on this GP job.
*
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
-
#include "mali_kernel_common.h"
#include "mali_group.h"
#include "mali_osk.h"
#include "mali_osk_profiling.h"
#include "mali_pm_domain.h"
#include "mali_pm.h"
+#include "mali_executor.h"
+
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
#include <linux/sched.h>
#include <trace/events/gpu.h>
#endif
-
-static void mali_group_bottom_half_mmu(void *data);
-static void mali_group_bottom_half_gp(void *data);
-static void mali_group_bottom_half_pp(void *data);
-
-static void mali_group_timeout(void *data);
-static void mali_group_reset_pp(struct mali_group *group);
-static void mali_group_reset_mmu(struct mali_group *group);
+#define MALI_MAX_NUM_DOMAIN_REFS (MALI_MAX_NUMBER_OF_GROUPS * 2)
#if defined(CONFIG_MALI400_PROFILING)
static void mali_group_report_l2_cache_counters_per_core(struct mali_group *group, u32 core_num);
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
-/*
- * The group object is the most important object in the device driver,
- * and acts as the center of many HW operations.
- * The reason for this is that operations on the MMU will affect all
- * cores connected to this MMU (a group is defined by the MMU and the
- * cores which are connected to this).
- * The group lock is thus the most important lock, followed by the
- * GP and PP scheduler locks. They must be taken in the following
- * order:
- * GP/PP lock first, then group lock(s).
- */
-
static struct mali_group *mali_global_groups[MALI_MAX_NUMBER_OF_GROUPS] = { NULL, };
static u32 mali_global_num_groups = 0;
-/* timer related */
+/* SW timer for job execution */
int mali_max_job_runtime = MALI_MAX_JOB_RUNTIME_DEFAULT;
-extern u32 mali_group_error;
-/* local helper functions */
-static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session);
-static void mali_group_recovery_reset(struct mali_group *group);
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group);
-static void mali_group_post_process_job_pp(struct mali_group *group);
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend);
-
-void mali_group_lock(struct mali_group *group)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(group->lock);
-#else
- _mali_osk_spinlock_lock(group->lock);
-#endif
- MALI_DEBUG_PRINT(5, ("Mali group: Group lock taken 0x%08X\n", group));
-}
-
-void mali_group_unlock(struct mali_group *group)
-{
- MALI_DEBUG_PRINT(5, ("Mali group: Releasing group lock 0x%08X\n", group));
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(group->lock);
-#else
- _mali_osk_spinlock_unlock(group->lock);
-#endif
-}
+/* local helper functions */
+static void mali_group_bottom_half_mmu(void *data);
+static void mali_group_bottom_half_gp(void *data);
+static void mali_group_bottom_half_pp(void *data);
+static void mali_group_timeout(void *data);
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
-}
-#endif
+static void mali_group_reset_pp(struct mali_group *group);
+static void mali_group_reset_mmu(struct mali_group *group);
+static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session);
+static void mali_group_recovery_reset(struct mali_group *group);
-struct mali_group *mali_group_create(struct mali_l2_cache_core *core, struct mali_dlbu_core *dlbu, struct mali_bcast_unit *bcast)
+struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
+ struct mali_dlbu_core *dlbu,
+ struct mali_bcast_unit *bcast,
+ u32 domain_index)
{
struct mali_group *group = NULL;
group = _mali_osk_calloc(1, sizeof(struct mali_group));
if (NULL != group) {
group->timeout_timer = _mali_osk_timer_init();
-
if (NULL != group->timeout_timer) {
- _mali_osk_lock_order_t order;
_mali_osk_timer_setcallback(group->timeout_timer, mali_group_timeout, (void *)group);
- if (NULL != dlbu) {
- order = _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL;
- } else {
- order = _MALI_OSK_LOCK_ORDER_GROUP;
- }
+ group->l2_cache_core[0] = core;
+ _mali_osk_list_init(&group->group_list);
+ _mali_osk_list_init(&group->executor_list);
+ _mali_osk_list_init(&group->pm_domain_list);
+ group->bcast_core = bcast;
+ group->dlbu_core = dlbu;
-#ifdef MALI_UPPER_HALF_SCHEDULING
- group->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#else
- group->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, order);
-#endif
+ /* register this object as a part of the correct power domain */
+ if ((NULL != core) || (NULL != dlbu) || (NULL != bcast))
+ group->pm_domain = mali_pm_register_group(domain_index, group);
- if (NULL != group->lock) {
- group->l2_cache_core[0] = core;
- group->session = NULL;
- group->power_is_on = MALI_TRUE;
- group->state = MALI_GROUP_STATE_IDLE;
- _mali_osk_list_init(&group->group_list);
- _mali_osk_list_init(&group->pp_scheduler_list);
- group->parent_group = NULL;
- group->l2_cache_core_ref_count[0] = 0;
- group->l2_cache_core_ref_count[1] = 0;
- group->bcast_core = bcast;
- group->dlbu_core = dlbu;
-
- mali_global_groups[mali_global_num_groups] = group;
- mali_global_num_groups++;
-
- return group;
- }
- _mali_osk_timer_term(group->timeout_timer);
+ mali_global_groups[mali_global_num_groups] = group;
+ mali_global_num_groups++;
+
+ return group;
}
_mali_osk_free(group);
}
return NULL;
}
+void mali_group_delete(struct mali_group *group)
+{
+ u32 i;
+
+ MALI_DEBUG_PRINT(4, ("Deleting group %s\n",
+ mali_group_core_description(group)));
+
+ MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT((MALI_GROUP_STATE_INACTIVE == group->state) || ((MALI_GROUP_STATE_ACTIVATION_PENDING == group->state)));
+
+ /* Delete the resources that this group owns */
+ if (NULL != group->gp_core) {
+ mali_gp_delete(group->gp_core);
+ }
+
+ if (NULL != group->pp_core) {
+ mali_pp_delete(group->pp_core);
+ }
+
+ if (NULL != group->mmu) {
+ mali_mmu_delete(group->mmu);
+ }
+
+ if (mali_group_is_virtual(group)) {
+ /* Remove all groups from virtual group */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ child->parent_group = NULL;
+ mali_group_delete(child);
+ }
+
+ mali_dlbu_delete(group->dlbu_core);
+
+ if (NULL != group->bcast_core) {
+ mali_bcast_unit_delete(group->bcast_core);
+ }
+ }
+
+ for (i = 0; i < mali_global_num_groups; i++) {
+ if (mali_global_groups[i] == group) {
+ mali_global_groups[i] = NULL;
+ mali_global_num_groups--;
+
+ if (i != mali_global_num_groups) {
+ /* We removed a group from the middle of the array -- move the last
+ * group to the current position to close the gap */
+ mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
+ mali_global_groups[mali_global_num_groups] = NULL;
+ }
+
+ break;
+ }
+ }
+
+ if (NULL != group->timeout_timer) {
+ _mali_osk_timer_del(group->timeout_timer);
+ _mali_osk_timer_term(group->timeout_timer);
+ }
+
+ if (NULL != group->bottom_half_work_mmu) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
+ }
+
+ if (NULL != group->bottom_half_work_gp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ }
+
+ if (NULL != group->bottom_half_work_pp) {
+ _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ }
+
+ _mali_osk_free(group);
+}
+
_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core)
{
/* This group object now owns the MMU core object */
}
}
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain)
+enum mali_group_state mali_group_activate(struct mali_group *group)
{
- group->pm_domain = domain;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(4, ("Group: Activating group %s\n",
+ mali_group_core_description(group)));
+
+ if (MALI_GROUP_STATE_INACTIVE == group->state) {
+ /* Group is inactive, get PM refs in order to power up */
+
+ /*
+ * We'll take a maximum of 2 power domain references pr group,
+ * one for the group itself, and one for it's L2 cache.
+ */
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ struct mali_group *groups[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool all_groups_on;
+
+ /* Deal with child groups first */
+ if (mali_group_is_virtual(group)) {
+ /*
+ * The virtual group might have 0, 1 or 2 L2s in
+ * its l2_cache_core array, but we ignore these and
+ * let the child groups take the needed L2 cache ref
+ * on behalf of the virtual group.
+ * In other words; The L2 refs are taken in pair with
+ * the physical group which the L2 is attached to.
+ */
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /*
+ * Child group is inactive, get PM
+ * refs in order to power up.
+ */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE
+ == child->state);
+
+ child->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(
+ child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ groups[num_domains] = child;
+ num_domains++;
+
+ /*
+ * Take L2 domain ref for child group.
+ */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS
+ > num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL ==
+ child->l2_cache_core[1]);
+ num_domains++;
+ }
+ } else {
+ /* Take L2 domain ref for physical groups. */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ groups[num_domains] = NULL;
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
+ }
+
+ /* Do the group itself last (it's dependencies first) */
+
+ group->state = MALI_GROUP_STATE_ACTIVATION_PENDING;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ groups[num_domains] = group;
+ num_domains++;
+
+ all_groups_on = mali_pm_get_domain_refs(domains, groups,
+ num_domains);
+
+ /*
+ * Complete activation for group, include
+ * virtual group or physical group.
+ */
+ if (MALI_TRUE == all_groups_on) {
+
+ mali_group_set_active(group);
+ }
+ } else if (MALI_GROUP_STATE_ACTIVE == group->state) {
+ /* Already active */
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ } else {
+ /*
+ * Activation already pending, group->power_is_on could
+ * be both true or false. We need to wait for power up
+ * notification anyway.
+ */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING
+ == group->state);
+ }
+
+ MALI_DEBUG_PRINT(4, ("Group: group %s activation result: %s\n",
+ mali_group_core_description(group),
+ MALI_GROUP_STATE_ACTIVE == group->state ?
+ "ACTIVE" : "PENDING"));
+
+ return group->state;
}
-void mali_group_delete(struct mali_group *group)
+mali_bool mali_group_set_active(struct mali_group *group)
{
- u32 i;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVATION_PENDING == group->state);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
- MALI_DEBUG_PRINT(4, ("Deleting group %p\n", group));
+ MALI_DEBUG_PRINT(4, ("Group: Activation completed for %s\n",
+ mali_group_core_description(group)));
- MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ if (mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
- /* Delete the resources that this group owns */
- if (NULL != group->gp_core) {
- mali_gp_delete(group->gp_core);
- }
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ if (MALI_TRUE != child->power_is_on) {
+ return MALI_FALSE;
+ }
- if (NULL != group->pp_core) {
- mali_pp_delete(group->pp_core);
- }
+ child->state = MALI_GROUP_STATE_ACTIVE;
+ }
- if (NULL != group->mmu) {
- mali_mmu_delete(group->mmu);
+ mali_group_reset(group);
}
+ /* Go to ACTIVE state */
+ group->state = MALI_GROUP_STATE_ACTIVE;
+
+ return MALI_TRUE;
+}
+
+mali_bool mali_group_deactivate(struct mali_group *group)
+{
+ struct mali_pm_domain *domains[MALI_MAX_NUM_DOMAIN_REFS];
+ u32 num_domains = 0;
+ mali_bool power_down = MALI_FALSE;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_INACTIVE != group->state);
+
+ MALI_DEBUG_PRINT(3, ("Group: Deactivating group %s\n",
+ mali_group_core_description(group)));
+
+ group->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(group->pm_domain);
+ domains[num_domains] = group->pm_domain;
+ num_domains++;
+
if (mali_group_is_virtual(group)) {
- /* Remove all groups from virtual group */
+ /* Release refs for all child groups */
struct mali_group *child;
struct mali_group *temp;
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- child->parent_group = NULL;
- mali_group_delete(child);
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp,
+ &group->group_list,
+ struct mali_group, group_list) {
+ child->state = MALI_GROUP_STATE_INACTIVE;
+
+ MALI_DEBUG_ASSERT_POINTER(child->pm_domain);
+ domains[num_domains] = child->pm_domain;
+ num_domains++;
+
+ /* Release L2 cache domain for child groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ child->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == child->l2_cache_core[1]);
+ num_domains++;
}
- mali_dlbu_delete(group->dlbu_core);
+ /*
+ * Must do mali_group_power_down() steps right here for
+ * virtual group, because virtual group itself is likely to
+ * stay powered on, however child groups are now very likely
+ * to be powered off (and thus lose their state).
+ */
- if (NULL != group->bcast_core) {
- mali_bcast_unit_delete(group->bcast_core);
- }
+ mali_group_clear_session(group);
+ /*
+ * Disable the broadcast unit (clear it's mask).
+ * This is needed in case the GPU isn't actually
+ * powered down at this point and groups are
+ * removed from an inactive virtual group.
+ * If not, then the broadcast unit will intercept
+ * their interrupts!
+ */
+ mali_bcast_disable(group->bcast_core);
+ } else {
+ /* Release L2 cache domain for physical groups */
+ MALI_DEBUG_ASSERT(MALI_MAX_NUM_DOMAIN_REFS >
+ num_domains);
+ domains[num_domains] = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[0]);
+ MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
+ num_domains++;
}
- for (i = 0; i < mali_global_num_groups; i++) {
- if (mali_global_groups[i] == group) {
- mali_global_groups[i] = NULL;
- mali_global_num_groups--;
+ power_down = mali_pm_put_domain_refs(domains, num_domains);
- if (i != mali_global_num_groups) {
- /* We removed a group from the middle of the array -- move the last
- * group to the current position to close the gap */
- mali_global_groups[i] = mali_global_groups[mali_global_num_groups];
- mali_global_groups[mali_global_num_groups] = NULL;
- }
+ return power_down;
+}
- break;
- }
- }
+void mali_group_power_up(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- if (NULL != group->timeout_timer) {
- _mali_osk_timer_del(group->timeout_timer);
- _mali_osk_timer_term(group->timeout_timer);
- }
+ MALI_DEBUG_PRINT(3, ("Group: Power up for %s\n",
+ mali_group_core_description(group)));
- if (NULL != group->bottom_half_work_mmu) {
- _mali_osk_wq_delete_work(group->bottom_half_work_mmu);
- }
+ group->power_is_on = MALI_TRUE;
- if (NULL != group->bottom_half_work_gp) {
- _mali_osk_wq_delete_work(group->bottom_half_work_gp);
+ if (MALI_FALSE == mali_group_is_virtual(group)
+ && MALI_FALSE == mali_group_is_in_virtual(group)) {
+ mali_group_reset(group);
}
- if (NULL != group->bottom_half_work_pp) {
- _mali_osk_wq_delete_work(group->bottom_half_work_pp);
+ /*
+ * When we just acquire only one physical group form virt group,
+ * we should remove the bcast&dlbu mask from virt group and
+ * reset bcast and dlbu core, although part of pp cores in virt
+ * group maybe not be powered on.
+ */
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ mali_bcast_reset(group->bcast_core);
+ mali_dlbu_update_mask(group->dlbu_core);
}
+}
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(group->lock);
-#else
- _mali_osk_spinlock_term(group->lock);
-#endif
- _mali_osk_free(group);
+void mali_group_power_down(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Power down for %s\n",
+ mali_group_core_description(group)));
+
+ group->power_is_on = MALI_FALSE;
+
+ if (mali_group_is_virtual(group)) {
+ /*
+ * What we do for physical jobs in this function should
+ * already have been done in mali_group_deactivate()
+ * for virtual group.
+ */
+ MALI_DEBUG_ASSERT(NULL == group->session);
+ } else {
+ mali_group_clear_session(group);
+ }
}
MALI_DEBUG_CODE(static void mali_group_print_virtual(struct mali_group *vgroup)
struct mali_group *group;
struct mali_group *temp;
- MALI_DEBUG_PRINT(4, ("Virtual group %p\n", vgroup));
+ MALI_DEBUG_PRINT(4, ("Virtual group %s (%p)\n",
+ mali_group_core_description(vgroup),
+ vgroup));
MALI_DEBUG_PRINT(4, ("l2_cache_core[0] = %p, ref = %d\n", vgroup->l2_cache_core[0], vgroup->l2_cache_core_ref_count[0]));
MALI_DEBUG_PRINT(4, ("l2_cache_core[1] = %p, ref = %d\n", vgroup->l2_cache_core[1], vgroup->l2_cache_core_ref_count[1]));
i = 0;
_MALI_OSK_LIST_FOREACHENTRY(group, temp, &vgroup->group_list, struct mali_group, group_list) {
- MALI_DEBUG_PRINT(4, ("[%d] %p, l2_cache_core[0] = %p\n", i, group, group->l2_cache_core[0]));
+ MALI_DEBUG_PRINT(4, ("[%d] %s (%p), l2_cache_core[0] = %p\n",
+ i, mali_group_core_description(group),
+ group, group->l2_cache_core[0]));
i++;
}
})
/**
* @brief Add child group to virtual group parent
- *
- * Before calling this function, child must have it's state set to JOINING_VIRTUAL
- * to ensure it's not touched during the transition period. When this function returns,
- * child's state will be IN_VIRTUAL.
*/
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw)
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child)
{
mali_bool found;
u32 i;
- struct mali_session_data *child_session;
-
- MALI_DEBUG_PRINT(3, ("Adding group %p to virtual group %p\n", child, parent));
- MALI_ASSERT_GROUP_LOCKED(parent);
+ MALI_DEBUG_PRINT(3, ("Adding group %s to virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
MALI_DEBUG_ASSERT(NULL == child->parent_group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_JOINING_VIRTUAL == child->state);
_mali_osk_list_addtail(&child->group_list, &parent->group_list);
- child->state = MALI_GROUP_STATE_IN_VIRTUAL;
child->parent_group = parent;
MALI_DEBUG_ASSERT_POINTER(child->l2_cache_core[0]);
mali_bcast_add_group(parent->bcast_core, child);
mali_dlbu_add_group(parent->dlbu_core, child);
- child_session = child->session;
- child->session = NULL;
-
- /* Above this comment, only software state is updated and the HW is not
- * touched. Now, check if Mali is powered and skip the rest if it isn't
- * powered.
- */
-
- if (!update_hw) {
- MALI_DEBUG_CODE(mali_group_print_virtual(parent));
- return;
+ if (MALI_TRUE == parent->power_is_on) {
+ mali_bcast_reset(parent->bcast_core);
+ mali_dlbu_update_mask(parent->dlbu_core);
}
- /* Update MMU */
- if (parent->session == child_session) {
- mali_mmu_zap_tlb(child->mmu);
- } else {
+ if (MALI_TRUE == child->power_is_on) {
if (NULL == parent->session) {
- mali_mmu_activate_empty_page_directory(child->mmu);
+ if (NULL != child->session) {
+ /*
+ * Parent has no session, so clear
+ * child session as well.
+ */
+ mali_mmu_activate_empty_page_directory(child->mmu);
+ }
} else {
- mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ if (parent->session == child->session) {
+ /* We already have same session as parent,
+ * so a simple zap should be enough.
+ */
+ mali_mmu_zap_tlb(child->mmu);
+ } else {
+ /*
+ * Parent has a different session, so we must
+ * switch to that sessions page table
+ */
+ mali_mmu_activate_page_directory(child->mmu, mali_session_get_page_directory(parent->session));
+ }
+
+ /* It is the parent which keeps the session from now on */
+ child->session = NULL;
}
+ } else {
+ /* should have been cleared when child was powered down */
+ MALI_DEBUG_ASSERT(NULL == child->session);
}
- mali_dlbu_update_mask(parent->dlbu_core);
-
/* Start job on child when parent is active */
if (NULL != parent->pp_running_job) {
struct mali_pp_job *job = parent->pp_running_job;
- u32 subjob = -1;
- if (mali_pp_job_is_with_dlbu(parent->pp_running_job)) {
- subjob = mali_pp_core_get_id(child->pp_core);
- }
+ MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
+ child, mali_pp_job_get_id(job), parent));
- /* Take the next unstarted sub job directly without scheduler lock should be
- * safe here. Because: 1) Virtual group is the only consumer of this job.
- * 2) Taking next unstarted sub job doesn't do any change to the job queue itself
- */
- if (mali_pp_job_has_unstarted_sub_jobs(job)) {
- subjob = mali_pp_job_get_first_unstarted_sub_job(job);
- mali_pp_job_mark_sub_job_started(job, subjob);
- }
+ /* Only allowed to add active child to an active parent */
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == parent->state);
+ MALI_DEBUG_ASSERT(MALI_GROUP_STATE_ACTIVE == child->state);
- if (-1 != subjob) {
- MALI_DEBUG_PRINT(3, ("Group %x joining running job %d on virtual group %x\n",
- child, mali_pp_job_get_id(job), parent));
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING == parent->state);
- /* Reset broadcast unit only when it will help run subjob */
- mali_bcast_reset(parent->bcast_core);
+ mali_pp_job_start(child->pp_core, job, mali_pp_core_get_id(child->pp_core), MALI_TRUE);
- mali_group_start_job_on_group(child, job, subjob);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_SINGLE_HW_FLUSH,
- mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job), 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
+#endif
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
- mali_pp_job_get_pid(job), mali_pp_job_get_tid(job), 0, 0, 0);
#if defined(CONFIG_MALI400_PROFILING)
- trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
- mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
+ trace_mali_core_active(mali_pp_job_get_pid(job), 1 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
#endif
- }
}
MALI_DEBUG_CODE(mali_group_print_virtual(parent);)
/**
* @brief Remove child group from virtual group parent
- *
- * After the child is removed, it's state will be LEAVING_VIRTUAL and must be set
- * to IDLE before it can be used.
*/
void mali_group_remove_group(struct mali_group *parent, struct mali_group *child)
{
u32 i;
- MALI_ASSERT_GROUP_LOCKED(parent);
-
- MALI_DEBUG_PRINT(3, ("Removing group %p from virtual group %p\n", child, parent));
+ MALI_DEBUG_PRINT(3, ("Removing group %s from virtual group %s\n",
+ mali_group_core_description(child),
+ mali_group_core_description(parent)));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
MALI_DEBUG_ASSERT(!mali_group_is_virtual(child));
MALI_DEBUG_ASSERT(parent == child->parent_group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IN_VIRTUAL == child->state);
- /* Removing groups while running is not yet supported. */
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == parent->state);
-
- mali_group_lock(child);
/* Update Broadcast Unit and DLBU */
mali_bcast_remove_group(parent->bcast_core, child);
mali_dlbu_remove_group(parent->dlbu_core, child);
- /* Update HW only if power is on */
- if (mali_pm_is_power_on()) {
+ if (MALI_TRUE == parent->power_is_on) {
mali_bcast_reset(parent->bcast_core);
mali_dlbu_update_mask(parent->dlbu_core);
}
- _mali_osk_list_delinit(&child->group_list);
-
child->session = parent->session;
child->parent_group = NULL;
- child->state = MALI_GROUP_STATE_LEAVING_VIRTUAL;
+
+ _mali_osk_list_delinit(&child->group_list);
+ if (_mali_osk_list_empty(&parent->group_list)) {
+ parent->session = NULL;
+ }
/* Keep track of the L2 cache cores of child groups */
i = (child->l2_cache_core[0] == parent->l2_cache_core[0]) ? 0 : 1;
MALI_DEBUG_ASSERT(child->l2_cache_core[0] == parent->l2_cache_core[i]);
parent->l2_cache_core_ref_count[i]--;
-
if (parent->l2_cache_core_ref_count[i] == 0) {
parent->l2_cache_core[i] = NULL;
}
MALI_DEBUG_CODE(mali_group_print_virtual(parent));
-
- mali_group_unlock(child);
}
struct mali_group *mali_group_acquire_group(struct mali_group *parent)
{
- struct mali_group *child;
-
- MALI_ASSERT_GROUP_LOCKED(parent);
+ struct mali_group *child = NULL;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(mali_group_is_virtual(parent));
- MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&parent->group_list));
- child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ if (!_mali_osk_list_empty(&parent->group_list)) {
+ child = _MALI_OSK_LIST_ENTRY(parent->group_list.prev, struct mali_group, group_list);
+ mali_group_remove_group(parent, child);
+ }
- mali_group_remove_group(parent, child);
+ if (NULL != child) {
+ if (MALI_GROUP_STATE_ACTIVE != parent->state
+ && MALI_TRUE == child->power_is_on) {
+ mali_group_reset(child);
+ }
+ }
return child;
}
void mali_group_reset(struct mali_group *group)
{
- /*
- * This function should not be used to abort jobs,
- * currently only called during insmod and PM resume
- */
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT(NULL == group->gp_running_job);
MALI_DEBUG_ASSERT(NULL == group->pp_running_job);
+ MALI_DEBUG_ASSERT(NULL == group->session);
- group->session = NULL;
+ MALI_DEBUG_PRINT(3, ("Group: reset of %s\n",
+ mali_group_core_description(group)));
if (NULL != group->dlbu_core) {
mali_dlbu_reset(group->dlbu_core);
mali_bcast_reset(group->bcast_core);
}
- if (NULL != group->mmu) {
- mali_group_reset_mmu(group);
- }
+ MALI_DEBUG_ASSERT(NULL != group->mmu);
+ mali_group_reset_mmu(group);
if (NULL != group->gp_core) {
+ MALI_DEBUG_ASSERT(NULL == group->pp_core);
mali_gp_reset(group->gp_core);
- }
-
- if (NULL != group->pp_core) {
- mali_group_reset_pp(group);
- }
-}
-
-/* This function is called before running a job on virtual group
- * Remove some child group from the bcast mask necessarily
- * Set child groups particular registers respectively etc
- */
-static void mali_group_job_prepare_virtual(struct mali_group *group, struct mali_pp_job *job,
- u32 first_subjob, u32 last_subjob)
-{
- struct mali_group *child;
- struct mali_group *temp;
- u32 subjob = first_subjob;
-
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
- MALI_ASSERT_GROUP_LOCKED(group);
-
- MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
-
- /* Set each core specific registers:
- * 1. Renderer List Address
- * 2. Fragment Shader Stack Address
- * Other general registers are set through Broadcast Unit in one go.
- * Note: for Addtional temporary unused group core in virtual group
- * we need to remove it from Broadcast Unit before start the job in
- * this virtual group, otherwise, we may never get Frame_end interrupt.
- */
- if (!mali_pp_job_is_with_dlbu(job)) {
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- if (subjob <= last_subjob) {
- /* Write specific Renderer List Address for each group */
- mali_pp_write_addr_renderer_list(child->pp_core, job, subjob);
- /* Write specific stack address for each child group */
- mali_pp_write_addr_stack(child->pp_core, job, subjob);
- subjob++;
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual group job %u (0x%08X) part %u/%u started.\n",
- mali_pp_job_get_id(job), job, subjob,
- mali_pp_job_get_sub_job_count(job)));
- } else {
- /* Some physical group are just redundant for this run
- * remove it from broadcast
- */
- mali_bcast_remove_group(group->bcast_core, child);
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Remained PP group %p remove from bcast_core\n", child));
- }
- }
-
- /* Reset broadcast */
- mali_bcast_reset(group->bcast_core);
} else {
- /* Write stack address for each child group */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- mali_pp_write_addr_stack(child->pp_core, job, child->pp_core->core_id);
- mali_bcast_add_group(group->bcast_core, child);
- }
-
- /* Reset broadcast */
- mali_bcast_reset(group->bcast_core);
-
- mali_dlbu_config_job(group->dlbu_core, job);
-
- /* Write Renderer List Address for each child group */
- mali_pp_write_addr_renderer_list(group->pp_core, job, 0);
-
- MALI_DEBUG_PRINT(4, ("Mali Virtual Group: Virtual job %u (0x%08X) part %u/%u started (from schedule).\n",
- mali_pp_job_get_id(job), job, 1,
- mali_pp_job_get_sub_job_count(job)));
- }
-}
-
-/* Call this function to make sure group->group_list are consistent with the group->broad_core mask */
-void mali_group_non_dlbu_job_done_virtual(struct mali_group *group)
-{
- struct mali_group *child, *temp;
-
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
-
- _MALI_OSK_LIST_FOREACHENTRY(child, temp,
- &group->group_list, struct mali_group, group_list) {
- mali_bcast_add_group(group->bcast_core, child);
+ MALI_DEBUG_ASSERT(NULL != group->pp_core);
+ mali_group_reset_pp(group);
}
-
- MALI_DEBUG_PRINT(3, ("Mali group: New physical groups added in virtual group at non dlbu job done"));
- /**
- * When new physical groups added in the virtual groups, they may have different
- * page directory with the virtual group. Here just activate the empty page directory
- * for the virtual group to avoid potential inconsistent page directory.
- */
- mali_mmu_activate_empty_page_directory(group->mmu);
- group->session = NULL;
-}
-
-struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
-{
- return group->gp_core;
-}
-
-struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
-{
- return group->pp_core;
}
void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job)
{
struct mali_session_data *session;
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting GP job 0x%08X on group %s\n",
+ job,
+ mali_group_core_description(group)));
session = mali_gp_job_get_session(job);
- if (NULL != group->l2_cache_core[0]) {
- mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
- }
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate_conditional(group->l2_cache_core[0], mali_gp_job_get_cache_order(job));
mali_group_activate_page_directory(group, session);
#if defined(CONFIG_MALI400_PROFILING)
if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
mali_group_report_l2_cache_counters_per_core(group, 0);
+ }
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_gp_get_hw_core_desc(group->gp_core), sched_clock(),
- mali_gp_job_get_pid(job), 0, mali_gp_job_get_id(job));
+ trace_gpu_sched_switch(mali_gp_core_description(group->gp_core),
+ sched_clock(), mali_gp_job_get_tid(job),
+ 0, mali_gp_job_get_id(job));
#endif
group->gp_running_job = job;
- group->state = MALI_GROUP_STATE_WORKING;
+ group->is_working = MALI_TRUE;
- /* Setup the timeout timer value and save the job id for the job running on the gp core */
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
_mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
+
+ MALI_DEBUG_PRINT(4, ("Group: Started GP job 0x%08X on group %s at %u\n",
+ job,
+ mali_group_core_description(group),
+ group->start_time));
}
/* Used to set all the registers except frame renderer list address and fragment shader stack address
* It means the caller must set these two registers properly before calling this function
*/
-static void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job)
{
struct mali_session_data *session;
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ MALI_DEBUG_PRINT(3, ("Group: Starting PP job 0x%08X part %u/%u on group %s\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group)));
session = mali_pp_job_get_session(job);
mali_group_activate_page_directory(group, session);
if (mali_group_is_virtual(group)) {
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
+ struct mali_group *child;
+ struct mali_group *temp;
+ u32 core_num = 0;
+
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
- /* Try to use DMA unit to start job, fallback to writing directly to the core */
- MALI_DEBUG_ASSERT(mali_dma_cmd_buf_is_valid(&job->dma_cmd_buf));
- if (_MALI_OSK_ERR_OK != mali_dma_start(mali_dma_get_global_dma_core(), &job->dma_cmd_buf)) {
- mali_pp_job_start(group->pp_core, job, sub_job);
+ /* Configure DLBU for the job */
+ mali_dlbu_config_job(group->dlbu_core, job);
+
+ /* Write stack address for each child group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_write_addr_stack(child->pp_core, job);
+ core_num++;
}
+
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
} else {
- mali_pp_job_start(group->pp_core, job, sub_job);
+ mali_pp_job_start(group->pp_core, job, sub_job, MALI_FALSE);
}
/* if the group is virtual, loop through physical groups which belong to this group
mali_pp_job_get_frame_builder_id(job), mali_pp_job_get_flush_id(job));
#endif
}
+
#if defined(CONFIG_MALI400_PROFILING)
if (0 != group->l2_cache_core_ref_count[0]) {
if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
}
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
+
} else { /* group is physical - call profiling events for physical cores */
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
}
+
#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
- trace_gpu_sched_switch(mali_pp_get_hw_core_desc(group->pp_core), sched_clock(), mali_pp_job_get_tid(job), 0, mali_pp_job_get_id(job));
+ trace_gpu_sched_switch(mali_pp_core_description(group->pp_core),
+ sched_clock(), mali_pp_job_get_tid(job),
+ 0, mali_pp_job_get_id(job));
#endif
+
group->pp_running_job = job;
group->pp_running_sub_job = sub_job;
- group->state = MALI_GROUP_STATE_WORKING;
+ group->is_working = MALI_TRUE;
- /* Setup the timeout timer value and save the job id for the job running on the pp core */
+ /* Setup SW timer and record start time */
+ group->start_time = _mali_osk_time_tickcount();
_mali_osk_timer_mod(group->timeout_timer, _mali_osk_time_mstoticks(mali_max_job_runtime));
-}
-
-void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job,
- u32 first_subjob, u32 last_subjob)
-{
- MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(mali_pp_job_is_virtual_group_job(job));
-
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
- MALI_ASSERT_GROUP_LOCKED(group);
-
- MALI_DEBUG_ASSERT(first_subjob <= last_subjob);
-
- /* Prepare the group for running this job */
- mali_group_job_prepare_virtual(group, job, first_subjob, last_subjob);
-
- /* Start job. General setting for all the PP cores */
- mali_group_start_pp_job(group, job, first_subjob);
-}
-
-void mali_group_start_job_on_group(struct mali_group *group, struct mali_pp_job *job, u32 subjob)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(!mali_group_is_virtual(group));
- MALI_DEBUG_ASSERT_POINTER(job);
-
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state || MALI_GROUP_STATE_IN_VIRTUAL == group->state);
-
- /*
- * There are two frame registers which are different for each sub job:
- * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
- * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
- */
- mali_pp_write_addr_renderer_list(group->pp_core, job, subjob);
-
- /* Write specific stack address for each child group */
- mali_pp_write_addr_stack(group->pp_core, job, subjob);
- /* For start a job in a group which is just joining the virtual group
- * just start the job directly, all the accouting information and state
- * updates have been covered by virtual group state
- */
- if (MALI_GROUP_STATE_IN_VIRTUAL == group->state) {
- mali_pp_job_start(group->pp_core, job, subjob);
- return;
- }
+ MALI_DEBUG_PRINT(4, ("Group: Started PP job 0x%08X part %u/%u on group %s at %u\n",
+ job, sub_job + 1,
+ mali_pp_job_get_sub_job_count(job),
+ mali_group_core_description(group),
+ group->start_time));
- /* Start job. General setting for all the PP cores */
- mali_group_start_pp_job(group, job, subjob);
}
-
-
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr)
{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (group->state != MALI_GROUP_STATE_OOM ||
- mali_gp_job_get_id(group->gp_running_job) != job_id) {
- return NULL; /* Illegal request or job has already been aborted */
- }
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- if (NULL != group->l2_cache_core[0]) {
- mali_l2_cache_invalidate(group->l2_cache_core[0]);
- }
+ MALI_DEBUG_ASSERT_POINTER(group->l2_cache_core[0]);
+ mali_l2_cache_invalidate(group->l2_cache_core[0]);
mali_mmu_zap_tlb_without_stall(group->mmu);
mali_gp_resume_with_new_heap(group->gp_core, start_addr, end_addr);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0), 0, 0, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
#if defined(CONFIG_MALI400_PROFILING)
trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 1 /* active */, 1 /* GP */, 0 /* core */,
mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
#endif
-
- group->state = MALI_GROUP_STATE_WORKING;
-
- return group->gp_running_job;
}
static void mali_group_reset_mmu(struct mali_group *group)
struct mali_group *temp;
_mali_osk_errcode_t err;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
if (!mali_group_is_virtual(group)) {
/* This is a physical group or an idle virtual group -- simply wait for
* the reset to complete. */
err = mali_mmu_reset(group->mmu);
MALI_DEBUG_ASSERT(_MALI_OSK_ERR_OK == err);
} else { /* virtual group */
- err = mali_mmu_reset(group->mmu);
- if (_MALI_OSK_ERR_OK == err) {
- return;
- }
-
/* Loop through all members of this virtual group and wait
* until they are done resetting.
*/
struct mali_group *child;
struct mali_group *temp;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
mali_pp_reset_async(group->pp_core);
if (!mali_group_is_virtual(group) || NULL == group->pp_running_job) {
/* This is a physical group or an idle virtual group -- simply wait for
* the reset to complete. */
mali_pp_reset_wait(group->pp_core);
- } else { /* virtual group */
+ } else {
/* Loop through all members of this virtual group and wait until they
* are done resetting.
*/
}
}
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_complete_pp_and_unlock(struct mali_group *group, mali_bool success, mali_bool in_upper_half)
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job)
{
struct mali_pp_job *pp_job_to_return;
- u32 pp_sub_job_to_return;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(group->pp_core);
MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_POINTER(sub_job);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->pp_running_job) {
+
+ /* Deal with HW counters and profiling */
+
+ if (MALI_TRUE == mali_group_is_virtual(group)) {
+ struct mali_group *child;
+ struct mali_group *temp;
+
+ /* update performance counters from each physical pp core within this virtual group */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
+ }
+
+#if defined(CONFIG_MALI400_PROFILING)
+ /* send profiling data per physical core */
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+ }
+ if (0 != group->l2_cache_core_ref_count[0]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+ }
+ if (0 != group->l2_cache_core_ref_count[1]) {
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
+ }
+ }
+
+#endif
+ } else {
+ /* update performance counters for a physical group's pp core */
+ mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
+ MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
+ mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
+ mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
+ 0, 0);
+
+ trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
+ 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
+ mali_pp_job_get_frame_builder_id(group->pp_running_job),
+ mali_pp_job_get_flush_id(group->pp_running_job));
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
+ mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
+ }
+#endif
+ }
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_gp_core_description(group->gp_core),
+ sched_clock(), 0, 0, 0);
+#endif
- mali_group_post_process_job_pp(group);
+ }
if (success) {
/* Only do soft reset for successful jobs, a full recovery
}
pp_job_to_return = group->pp_running_job;
- pp_sub_job_to_return = group->pp_running_sub_job;
- group->state = MALI_GROUP_STATE_IDLE;
group->pp_running_job = NULL;
+ group->is_working = MALI_FALSE;
+ *sub_job = group->pp_running_sub_job;
if (!success) {
MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
mali_group_recovery_reset(group);
}
- /* Return job to user, schedule and unlock group. */
- mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, success, in_upper_half);
+ return pp_job_to_return;
}
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_complete_gp_and_unlock(struct mali_group *group, mali_bool success)
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success)
{
struct mali_gp_job *gp_job_to_return;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(group->gp_core);
MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT(MALI_TRUE == group->is_working);
+
+ /* Stop/clear the timeout timer. */
+ _mali_osk_timer_del_async(group->timeout_timer);
+
+ if (NULL != group->gp_running_job) {
+ mali_gp_update_performance_counters(group->gp_core, group->gp_running_job);
+
+#if defined(CONFIG_MALI400_PROFILING)
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ mali_gp_job_get_perf_counter_value0(group->gp_running_job),
+ mali_gp_job_get_perf_counter_value1(group->gp_running_job),
+ mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
+ 0, 0);
+
+ if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
+ (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
+ mali_group_report_l2_cache_counters_per_core(group, 0);
+#endif
- mali_group_post_process_job_gp(group, MALI_FALSE);
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_sched_switch(
+ mali_pp_core_description(group->pp_core),
+ sched_clock(), 0, 0, 0);
+#endif
+
+#if defined(CONFIG_MALI400_PROFILING)
+ trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
+ mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
+#endif
+
+ mali_gp_job_set_current_heap_addr(group->gp_running_job,
+ mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ }
if (success) {
/* Only do soft reset for successful jobs, a full recovery
}
gp_job_to_return = group->gp_running_job;
- group->state = MALI_GROUP_STATE_IDLE;
group->gp_running_job = NULL;
+ group->is_working = MALI_FALSE;
if (!success) {
MALI_DEBUG_PRINT(2, ("Mali group: Executing recovery reset due to job failure\n"));
mali_group_recovery_reset(group);
}
- /* Return job to user, schedule and unlock group. */
- mali_gp_scheduler_job_done(group, gp_job_to_return, success);
-}
-
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state ||
- mali_gp_job_get_id(group->gp_running_job) != job_id) {
- return; /* No need to cancel or job has already been aborted or completed */
- }
-
- /* Function will unlock the group, so we need to lock it again */
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- mali_group_lock(group);
-}
-
-static void mali_group_abort_pp_job(struct mali_group *group, u32 job_id)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state ||
- mali_pp_job_get_id(group->pp_running_job) != job_id) {
- return; /* No need to cancel or job has already been aborted or completed */
- }
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
- mali_group_lock(group);
-}
-
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session)
-{
- struct mali_gp_job *gp_job;
- struct mali_pp_job *pp_job;
- u32 gp_job_id = 0;
- u32 pp_job_id = 0;
- mali_bool abort_pp = MALI_FALSE;
- mali_bool abort_gp = MALI_FALSE;
-
- mali_group_lock(group);
-
- if (mali_group_is_in_virtual(group)) {
- /* Group is member of a virtual group, don't touch it! */
- mali_group_unlock(group);
- return;
- }
-
- gp_job = group->gp_running_job;
- pp_job = group->pp_running_job;
-
- if ((NULL != gp_job) && (mali_gp_job_get_session(gp_job) == session)) {
- MALI_DEBUG_PRINT(4, ("Aborting GP job 0x%08x from session 0x%08x\n", gp_job, session));
-
- gp_job_id = mali_gp_job_get_id(gp_job);
- abort_gp = MALI_TRUE;
- }
-
- if ((NULL != pp_job) && (mali_pp_job_get_session(pp_job) == session)) {
- MALI_DEBUG_PRINT(4, ("Mali group: Aborting PP job 0x%08x from session 0x%08x\n", pp_job, session));
-
- pp_job_id = mali_pp_job_get_id(pp_job);
- abort_pp = MALI_TRUE;
- }
-
- if (abort_gp) {
- mali_group_abort_gp_job(group, gp_job_id);
- }
- if (abort_pp) {
- mali_group_abort_pp_job(group, pp_job_id);
- }
-
- mali_group_remove_session_if_unused(group, session);
-
- mali_group_unlock(group);
+ return gp_job_to_return;
}
struct mali_group *mali_group_get_glob_group(u32 index)
static void mali_group_activate_page_directory(struct mali_group *group, struct mali_session_data *session)
{
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group %s\n",
+ mali_session_get_page_directory(session), session,
+ mali_group_core_description(group)));
- MALI_DEBUG_PRINT(5, ("Mali group: Activating page directory 0x%08X from session 0x%08X on group 0x%08X\n", mali_session_get_page_directory(session), session, group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
if (group->session != session) {
/* Different session than last time, so we need to do some work */
- MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group 0x%08X\n", session, group->session, group));
+ MALI_DEBUG_PRINT(5, ("Mali group: Activate session: %08x previous: %08x on group %s\n",
+ session, group->session,
+ mali_group_core_description(group)));
mali_mmu_activate_page_directory(group->mmu, mali_session_get_page_directory(session));
group->session = session;
} else {
/* Same session as last time, so no work required */
- MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group 0x%08X\n", session->page_directory, group));
+ MALI_DEBUG_PRINT(4, ("Mali group: Activate existing session 0x%08X on group %s\n",
+ session->page_directory,
+ mali_group_core_description(group)));
mali_mmu_zap_tlb_without_stall(group->mmu);
}
}
-static void mali_group_remove_session_if_unused(struct mali_group *group, struct mali_session_data *session)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (MALI_GROUP_STATE_IDLE == group->state) {
- if (group->session == session) {
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_WORKING != group->state);
- MALI_DEBUG_ASSERT(MALI_TRUE == group->power_is_on);
- MALI_DEBUG_PRINT(3, ("Mali group: Deactivating unused session 0x%08X on group %08X\n", session, group));
- mali_mmu_activate_empty_page_directory(group->mmu);
- group->session = NULL;
- }
- }
-}
-
-mali_bool mali_group_power_is_on(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- return group->power_is_on;
-}
-
-void mali_group_power_on_group(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
- || MALI_GROUP_STATE_IN_VIRTUAL == group->state
- || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
- || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
- || MALI_GROUP_STATE_DISABLED == group->state);
-
- MALI_DEBUG_PRINT(3, ("Group %p powered on\n", group));
-
- group->power_is_on = MALI_TRUE;
-}
-
-void mali_group_power_off_group(struct mali_group *group, mali_bool do_power_change)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT_LOCK_HELD(group->lock);
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_IDLE == group->state
- || MALI_GROUP_STATE_IN_VIRTUAL == group->state
- || MALI_GROUP_STATE_JOINING_VIRTUAL == group->state
- || MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state
- || MALI_GROUP_STATE_DISABLED == group->state);
-
- MALI_DEBUG_PRINT(3, ("Group %p powered off\n", group));
-
- /* It is necessary to set group->session = NULL so that the powered off MMU is not written
- * to on map/unmap. It is also necessary to set group->power_is_on = MALI_FALSE so that
- * pending bottom_halves does not access powered off cores. */
-
- group->session = NULL;
-
- if (do_power_change) {
- group->power_is_on = MALI_FALSE;
- }
-}
-
-void mali_group_power_on(void)
-{
- int i;
- for (i = 0; i < mali_global_num_groups; i++) {
- struct mali_group *group = mali_global_groups[i];
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
- } else {
- mali_group_power_on_group(group);
- }
- mali_group_unlock(group);
- }
- MALI_DEBUG_PRINT(4, ("Mali Group: power on\n"));
-}
-
-void mali_group_power_off(mali_bool do_power_change)
-{
- int i;
-
- for (i = 0; i < mali_global_num_groups; i++) {
- struct mali_group *group = mali_global_groups[i];
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- MALI_DEBUG_ASSERT(MALI_FALSE == group->power_is_on);
- } else {
- mali_group_power_off_group(group, do_power_change);
- }
- mali_group_unlock(group);
- }
- MALI_DEBUG_PRINT(4, ("Mali Group: power off\n"));
-}
-
-static void mali_group_recovery_reset(struct mali_group *group)
+static void mali_group_recovery_reset(struct mali_group *group)
{
_mali_osk_errcode_t err;
- MALI_ASSERT_GROUP_LOCKED(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
/* Stop cores, bus stop */
if (NULL != group->pp_core) {
u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size)
{
int n = 0;
+ int i;
+ struct mali_group *child;
+ struct mali_group *temp;
- n += _mali_osk_snprintf(buf + n, size - n, "Group: %p\n", group);
- n += _mali_osk_snprintf(buf + n, size - n, "\tstate: %d\n", group->state);
- if (group->gp_core) {
- n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
- n += _mali_osk_snprintf(buf + n, size - n, "\tGP job: %p\n", group->gp_running_job);
- }
- if (group->pp_core) {
- n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
- n += _mali_osk_snprintf(buf + n, size - n, "\tPP job: %p, subjob %d \n",
- group->pp_running_job, group->pp_running_sub_job);
+ if (mali_group_is_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Virtual PP Group: %p\n", group);
+ } else if (mali_group_is_in_virtual(group)) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Child PP Group: %p\n", group);
+ } else if (NULL != group->pp_core) {
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "Physical PP Group: %p\n", group);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "GP Group: %p\n", group);
}
- return n;
-}
-#endif
-
-/* Group must be locked when entering this function. Will be unlocked before exiting. */
-static void mali_group_mmu_page_fault_and_unlock(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_ASSERT_GROUP_LOCKED(group);
-
- if (NULL != group->pp_core) {
- struct mali_pp_job *pp_job_to_return;
- u32 pp_sub_job_to_return;
-
- MALI_DEBUG_ASSERT_POINTER(group->pp_running_job);
-
- mali_group_post_process_job_pp(group);
-
- pp_job_to_return = group->pp_running_job;
- pp_sub_job_to_return = group->pp_running_sub_job;
- group->state = MALI_GROUP_STATE_IDLE;
- group->pp_running_job = NULL;
-
- mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+ switch (group->state) {
+ case MALI_GROUP_STATE_INACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: INACTIVE\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVATION_PENDING:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: ACTIVATION_PENDING\n");
+ break;
+ case MALI_GROUP_STATE_ACTIVE:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: MALI_GROUP_STATE_ACTIVE\n");
+ break;
+ default:
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tstate: UNKNOWN (%d)\n", group->state);
+ MALI_DEBUG_ASSERT(0);
+ break;
+ }
- /* Will unlock group. */
- mali_pp_scheduler_job_done(group, pp_job_to_return, pp_sub_job_to_return, MALI_FALSE, MALI_FALSE);
- } else {
- struct mali_gp_job *gp_job_to_return;
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tSW power: %s\n",
+ group->power_is_on ? "On" : "Off");
- MALI_DEBUG_ASSERT_POINTER(group->gp_running_job);
+ n += mali_pm_dump_state_domain(group->pm_domain, buf + n, size - n);
- mali_group_post_process_job_gp(group, MALI_FALSE);
+ for (i = 0; i < 2; i++) {
+ if (NULL != group->l2_cache_core[i]) {
+ struct mali_pm_domain *domain;
+ domain = mali_l2_cache_get_pm_domain(
+ group->l2_cache_core[i]);
+ n += mali_pm_dump_state_domain(domain,
+ buf + n, size - n);
+ }
+ }
- gp_job_to_return = group->gp_running_job;
- group->state = MALI_GROUP_STATE_IDLE;
- group->gp_running_job = NULL;
+ if (group->gp_core) {
+ n += mali_gp_dump_state(group->gp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tGP running job: %p\n", group->gp_running_job);
+ }
- mali_group_recovery_reset(group); /* This will also clear the page fault itself */
+ if (group->pp_core) {
+ n += mali_pp_dump_state(group->pp_core, buf + n, size - n);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPP running job: %p, subjob %d \n",
+ group->pp_running_job,
+ group->pp_running_sub_job);
+ }
- /* Will unlock group. */
- mali_gp_scheduler_job_done(group, gp_job_to_return, MALI_FALSE);
+ _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list,
+ struct mali_group, group_list) {
+ n += mali_group_dump_state(child, buf + n, size - n);
}
+
+ return n;
}
+#endif
_mali_osk_errcode_t mali_group_upper_half_mmu(void *data)
{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
struct mali_group *group = (struct mali_group *)data;
- struct mali_mmu_core *mmu = group->mmu;
- u32 int_stat;
+ _mali_osk_errcode_t ret;
- MALI_DEBUG_ASSERT_POINTER(mmu);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
-#endif
- /* Check if it was our device which caused the interrupt (we could be sharing the IRQ line) */
- int_stat = mali_mmu_get_int_status(mmu);
- if (0 != int_stat) {
- struct mali_group *parent = group->parent_group;
+ ret = mali_executor_interrupt_mmu(group, MALI_TRUE);
- /* page fault or bus error, we thread them both in the same way */
- mali_mmu_mask_all_interrupts(mmu);
- if (NULL == parent) {
- _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
- } else {
- _mali_osk_wq_schedule_work(parent->bottom_half_work_mmu);
- }
- err = _MALI_OSK_ERR_OK;
- goto out;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
-
- return err;
+ return ret;
}
static void mali_group_bottom_half_mmu(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- struct mali_mmu_core *mmu = group->mmu;
- u32 rawstat;
- MALI_DEBUG_CODE(u32 status);
-
- MALI_DEBUG_ASSERT_POINTER(mmu);
-
- mali_group_lock(group);
- MALI_DEBUG_ASSERT(NULL == group->parent_group);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mmu->hw_core.description));
- mali_group_unlock(group);
- return;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
- rawstat = mali_mmu_get_rawstat(mmu);
- MALI_DEBUG_CODE(status = mali_mmu_get_status(mmu));
-
- MALI_DEBUG_PRINT(4, ("Mali MMU: Bottom half, interrupt 0x%08X, status 0x%08X\n", rawstat, status));
-
- if (rawstat & (MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR)) {
- /* An actual page fault has occurred. */
-#ifdef DEBUG
- u32 fault_address = mali_mmu_get_page_fault_addr(mmu);
- MALI_DEBUG_PRINT(2, ("Mali MMU: Page fault detected at 0x%08x from bus id %d of type %s on %s\n",
- fault_address,
- (status >> 6) & 0x1F,
- (status & 32) ? "write" : "read",
- mmu->hw_core.description));
- mali_mmu_pagedir_diag(group->session->page_directory, fault_address);
-#endif
+ mali_executor_interrupt_mmu(group, MALI_FALSE);
- mali_group_mmu_page_fault_and_unlock(group);
- return;
+ if (NULL != group->gp_core) {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(0),
+ mali_mmu_get_rawstat(group->mmu), 0);
+ } else {
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_mmu_get_rawstat(group->mmu), 0);
}
-
- mali_group_unlock(group);
}
_mali_osk_errcode_t mali_group_upper_half_gp(void *data)
{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
struct mali_group *group = (struct mali_group *)data;
- struct mali_gp_core *core = group->gp_core;
- u32 irq_readout;
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
- }
-#endif
+ _mali_osk_errcode_t ret;
- irq_readout = mali_gp_get_int_stat(core);
-
- if (MALIGP2_REG_VAL_IRQ_MASK_NONE != irq_readout) {
- /* Mask out all IRQs from this core until IRQ is handled */
- mali_gp_mask_all_interrupts(core);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0) | MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT, irq_readout, 0, 0, 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
- /* We do need to handle this in a bottom half */
- _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_gp_get_rawstat(group->gp_core),
+ mali_group_core_description(group)));
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
+ ret = mali_executor_interrupt_gp(group, MALI_TRUE);
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
- return err;
+ return ret;
}
static void mali_group_bottom_half_gp(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- u32 irq_readout;
- u32 irq_errors;
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0), 0, 0);
-
- mali_group_lock(group);
-
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Mali group: Interrupt bottom half of %s when core is OFF.", mali_gp_get_hw_core_desc(group->gp_core)));
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- irq_readout = mali_gp_read_rawstat(group->gp_core);
-
- MALI_DEBUG_PRINT(4, ("Mali group: GP bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
-
- if (irq_readout & (MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST)) {
- u32 core_status = mali_gp_read_core_status(group->gp_core);
- if (0 == (core_status & MALIGP2_REG_VAL_STATUS_MASK_ACTIVE)) {
- MALI_DEBUG_PRINT(4, ("Mali group: GP job completed, calling group handler\n"));
- group->core_timed_out = MALI_FALSE;
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
-
- mali_group_complete_gp_and_unlock(group, MALI_TRUE);
- return;
- }
- }
-
- /*
- * Now lets look at the possible error cases (IRQ indicating error or timeout)
- * END_CMD_LST, HANG and PLBU_OOM interrupts are not considered error.
- */
- irq_errors = irq_readout & ~(MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST | MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_HANG | MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM);
- if (0 != irq_errors) {
- MALI_PRINT_ERROR(("Mali group: Unknown interrupt 0x%08X from core %s, aborting job\n", irq_readout, mali_gp_get_hw_core_desc(group->gp_core)));
- group->core_timed_out = MALI_FALSE;
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
-
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- mali_group_error++;
- return;
- } else if (group->core_timed_out) { /* SW timeout */
- group->core_timed_out = MALI_FALSE;
- if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->gp_running_job) {
- MALI_PRINT(("Mali group: Job %d timed out\n", mali_gp_job_get_id(group->gp_running_job)));
-
- mali_group_complete_gp_and_unlock(group, MALI_FALSE);
- mali_group_error++;
- return;
- }
- } else if (irq_readout & MALIGP2_REG_VAL_IRQ_PLBU_OUT_OF_MEM) {
- /* GP wants more memory in order to continue. */
- MALI_DEBUG_PRINT(3, ("Mali group: PLBU needs more heap memory\n"));
-
- group->state = MALI_GROUP_STATE_OOM;
- mali_group_unlock(group); /* Nothing to do on the HW side, so just release group lock right away */
- mali_gp_scheduler_oom(group, group->gp_running_job);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- /*
- * The only way to get here is if we only got one of two needed END_CMD_LST
- * interrupts. Enable all but not the complete interrupt that has been
- * received and continue to run.
- */
- mali_gp_enable_interrupts(group->gp_core, irq_readout & (MALIGP2_REG_VAL_IRQ_PLBU_END_CMD_LST | MALIGP2_REG_VAL_IRQ_VS_END_CMD_LST));
- mali_group_unlock(group);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_EVENT_CHANNEL_SOFTWARE | MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF, 0, _mali_osk_get_tid(), 0, 0, 0);
-}
-static void mali_group_post_process_job_gp(struct mali_group *group, mali_bool suspend)
-{
- /* Stop the timeout timer. */
- _mali_osk_timer_del_async(group->timeout_timer);
-
- if (NULL == group->gp_running_job) {
- /* Nothing to do */
- return;
- }
-
- mali_gp_update_performance_counters(group->gp_core, group->gp_running_job, suspend);
-
-#if defined(CONFIG_MALI400_PROFILING)
- if (suspend) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
- mali_gp_job_get_perf_counter_value0(group->gp_running_job),
- mali_gp_job_get_perf_counter_value1(group->gp_running_job),
- mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
- 0, 0);
- } else {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP | MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
- mali_gp_job_get_perf_counter_value0(group->gp_running_job),
- mali_gp_job_get_perf_counter_value1(group->gp_running_job),
- mali_gp_job_get_perf_counter_src0(group->gp_running_job) | (mali_gp_job_get_perf_counter_src1(group->gp_running_job) << 8),
- 0, 0);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0])))
- mali_group_report_l2_cache_counters_per_core(group, 0);
- }
-#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
-#if defined(CONFIG_MALI400_PROFILING)
- trace_mali_core_active(mali_gp_job_get_pid(group->gp_running_job), 0 /* active */, 1 /* GP */, 0 /* core */,
- mali_gp_job_get_frame_builder_id(group->gp_running_job), mali_gp_job_get_flush_id(group->gp_running_job));
-#endif
+ mali_executor_interrupt_gp(group, MALI_FALSE);
- mali_gp_job_set_current_heap_addr(group->gp_running_job,
- mali_gp_read_plbu_alloc_start_addr(group->gp_core));
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(0),
+ mali_gp_get_rawstat(group->gp_core), 0);
}
_mali_osk_errcode_t mali_group_upper_half_pp(void *data)
{
- _mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
struct mali_group *group = (struct mali_group *)data;
- struct mali_pp_core *core = group->pp_core;
- u32 irq_readout;
-
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- if (MALI_FALSE == mali_pm_domain_lock_state(group->pm_domain)) {
- goto out;
- }
-#endif
-
- /*
- * For Mali-450 there is one particular case we need to watch out for:
- *
- * Criteria 1) this function call can be due to a shared interrupt,
- * and not necessary because this core signaled an interrupt.
- * Criteria 2) this core is a part of a virtual group, and thus it should
- * not do any post processing.
- * Criteria 3) this core has actually indicated that is has completed by
- * having set raw_stat/int_stat registers to != 0
- *
- * If all this criteria is meet, then we could incorrectly start post
- * processing on the wrong group object (this should only happen on the
- * parent group)
- */
-#if !defined(MALI_UPPER_HALF_SCHEDULING)
- if (mali_group_is_in_virtual(group)) {
- /*
- * This check is done without the group lock held, which could lead to
- * a potential race. This is however ok, since we will safely re-check
- * this with the group lock held at a later stage. This is just an
- * early out which will strongly benefit shared IRQ systems.
- */
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-#endif
-
- irq_readout = mali_pp_get_int_stat(core);
- if (MALI200_REG_VAL_IRQ_MASK_NONE != irq_readout) {
- /* Mask out all IRQs from this core until IRQ is handled */
- mali_pp_mask_all_interrupts(core);
-
-#if defined(CONFIG_MALI400_PROFILING)
- /* Currently no support for this interrupt event for the virtual PP core */
- if (!mali_group_is_virtual(group)) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(core->core_id) |
- MALI_PROFILING_EVENT_REASON_SINGLE_HW_INTERRUPT,
- irq_readout, 0, 0, 0, 0);
- }
-#endif
-
-#if defined(MALI_UPPER_HALF_SCHEDULING)
- /* Check if job is complete without errors */
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
- MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler from upper half\n"));
-
- mali_group_lock(group);
-
- /* Check if job is complete without errors, again, after taking the group lock */
- irq_readout = mali_pp_read_rawstat(core);
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME != irq_readout) {
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-
- if (mali_group_is_virtual(group)) {
- u32 status_readout = mali_pp_read_status(group->pp_core);
- if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) {
- MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
- }
-
- if (mali_group_is_in_virtual(group)) {
- /* We're member of a virtual group, so interrupt should be handled by the virtual group */
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
- err = _MALI_OSK_ERR_FAULT;
- goto out;
- }
+ _mali_osk_errcode_t ret;
- group->core_timed_out = MALI_FALSE;
-
- mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_TRUE);
-
- /* No need to enable interrupts again, since the core will be reset while completing the job */
-
- MALI_DEBUG_PRINT(6, ("Mali PP: Upper half job done\n"));
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
- 0, 0, MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
-#endif
+ MALI_DEBUG_PRINT(4, ("Group: Interrupt 0x%08X from %s\n",
+ mali_pp_get_rawstat(group->pp_core),
+ mali_group_core_description(group)));
- /* We do need to handle this in a bottom half */
- _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
- err = _MALI_OSK_ERR_OK;
- goto out;
- }
+ ret = mali_executor_interrupt_pp(group, MALI_TRUE);
-out:
-#if defined(CONFIG_MALI_SHARED_INTERRUPTS)
- mali_pm_domain_unlock_state(group->pm_domain);
-#endif
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_START_STOP_SW_UPPER_HALF,
+ 0, 0, /* No pid and tid for interrupt handler */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
- return err;
+ return ret;
}
static void mali_group_bottom_half_pp(void *data)
{
struct mali_group *group = (struct mali_group *)data;
- struct mali_pp_core *core = group->pp_core;
- u32 irq_readout;
- u32 irq_errors;
+
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_START |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(core->core_id), 0, 0);
-
- mali_group_lock(group);
-
- if (mali_group_is_in_virtual(group)) {
- /* We're member of a virtual group, so interrupt should be handled by the virtual group */
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- if (MALI_FALSE == mali_group_power_is_on(group)) {
- MALI_PRINT_ERROR(("Interrupt bottom half of %s when core is OFF.", mali_pp_get_hw_core_desc(core)));
- mali_group_unlock(group);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
-
- irq_readout = mali_pp_read_rawstat(group->pp_core);
-
- MALI_DEBUG_PRINT(4, ("Mali PP: Bottom half IRQ 0x%08X from core %s\n", irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
-
- /* Check if job is complete without errors */
- if (MALI200_REG_VAL_IRQ_END_OF_FRAME == irq_readout) {
- if (mali_group_is_virtual(group)) {
- u32 status_readout = mali_pp_read_status(group->pp_core);
-
- if (status_readout & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE && !group->core_timed_out) {
- MALI_DEBUG_PRINT(6, ("Mali PP: Not all cores in broadcast completed\n"));
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
- }
-
- if (!group->core_timed_out) {
- MALI_DEBUG_PRINT(3, ("Mali PP: Job completed, calling group handler\n"));
- group->core_timed_out = MALI_FALSE;
-
- mali_group_complete_pp_and_unlock(group, MALI_TRUE, MALI_FALSE);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- return;
- }
- }
-
- /*
- * Now lets look at the possible error cases (IRQ indicating error or timeout)
- * END_OF_FRAME and HANG interrupts are not considered error.
- */
- irq_errors = irq_readout & ~(MALI200_REG_VAL_IRQ_END_OF_FRAME | MALI200_REG_VAL_IRQ_HANG);
- if (0 != irq_errors) {
- MALI_PRINT_ERROR(("Mali PP: Unexpected interrupt 0x%08X from core %s, aborting job\n",
- irq_readout, mali_pp_get_hw_core_desc(group->pp_core)));
- group->core_timed_out = MALI_FALSE;
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
-
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- mali_group_error++;
- return;
- } else if (group->core_timed_out) { /* SW timeout */
- group->core_timed_out = MALI_FALSE;
- if (!_mali_osk_timer_pending(group->timeout_timer) && NULL != group->pp_running_job) {
- MALI_PRINT(("Mali PP: Job %d timed out on core %s\n",
- mali_pp_job_get_id(group->pp_running_job), mali_pp_get_hw_core_desc(core)));
-
- mali_group_complete_pp_and_unlock(group, MALI_FALSE, MALI_FALSE);
- } else {
- mali_group_unlock(group);
- }
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
- MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
- mali_group_error++;
- return;
- }
-
- /*
- * We should never get here, re-enable interrupts and continue
- */
- if (0 == irq_readout) {
- MALI_DEBUG_PRINT(3, ("Mali group: No interrupt found on core %s\n",
- mali_pp_get_hw_core_desc(group->pp_core)));
- } else {
- MALI_PRINT_ERROR(("Mali group: Unhandled PP interrupt 0x%08X on %s\n", irq_readout,
- mali_pp_get_hw_core_desc(group->pp_core)));
- }
- mali_pp_enable_interrupts(core);
- mali_group_unlock(group);
+ mali_executor_interrupt_pp(group, MALI_FALSE);
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
MALI_PROFILING_EVENT_REASON_START_STOP_SW_BOTTOM_HALF,
- 0, _mali_osk_get_tid(), 0, 0, 0);
-}
-
-static void mali_group_post_process_job_pp(struct mali_group *group)
-{
- MALI_ASSERT_GROUP_LOCKED(group);
-
- /* Stop the timeout timer. */
- _mali_osk_timer_del_async(group->timeout_timer);
-
- if (NULL != group->pp_running_job) {
- if (MALI_TRUE == mali_group_is_virtual(group)) {
- struct mali_group *child;
- struct mali_group *temp;
-
- /* update performance counters from each physical pp core within this virtual group */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- mali_pp_update_performance_counters(group->pp_core, child->pp_core, group->pp_running_job, mali_pp_core_get_id(child->pp_core));
- }
-
-#if defined(CONFIG_MALI400_PROFILING)
- /* send profiling data per physical core */
- _MALI_OSK_LIST_FOREACHENTRY(child, temp, &group->group_list, struct mali_group, group_list) {
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(child->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_VIRTUAL,
- mali_pp_job_get_perf_counter_value0(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
- mali_pp_job_get_perf_counter_value1(group->pp_running_job, mali_pp_core_get_id(child->pp_core)),
- mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
- 0, 0);
-
- trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job),
- 0 /* active */, 0 /* PP */, mali_pp_core_get_id(child->pp_core),
- mali_pp_job_get_frame_builder_id(group->pp_running_job),
- mali_pp_job_get_flush_id(group->pp_running_job));
- }
- if (0 != group->l2_cache_core_ref_count[0]) {
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
- }
- }
- if (0 != group->l2_cache_core_ref_count[1]) {
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[1])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[1]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[1]));
- }
- }
-
-#endif
- } else {
- /* update performance counters for a physical group's pp core */
- mali_pp_update_performance_counters(group->pp_core, group->pp_core, group->pp_running_job, group->pp_running_sub_job);
-
-#if defined(CONFIG_MALI400_PROFILING)
- _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_STOP |
- MALI_PROFILING_MAKE_EVENT_CHANNEL_PP(mali_pp_core_get_id(group->pp_core)) |
- MALI_PROFILING_EVENT_REASON_START_STOP_HW_PHYSICAL,
- mali_pp_job_get_perf_counter_value0(group->pp_running_job, group->pp_running_sub_job),
- mali_pp_job_get_perf_counter_value1(group->pp_running_job, group->pp_running_sub_job),
- mali_pp_job_get_perf_counter_src0(group->pp_running_job, group->pp_running_sub_job) | (mali_pp_job_get_perf_counter_src1(group->pp_running_job, group->pp_running_sub_job) << 8),
- 0, 0);
-
- trace_mali_core_active(mali_pp_job_get_pid(group->pp_running_job), 0 /* active */, 0 /* PP */, mali_pp_core_get_id(group->pp_core),
- mali_pp_job_get_frame_builder_id(group->pp_running_job), mali_pp_job_get_flush_id(group->pp_running_job));
-
- if ((MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src0(group->l2_cache_core[0])) &&
- (MALI_HW_CORE_NO_COUNTER != mali_l2_cache_core_get_counter_src1(group->l2_cache_core[0]))) {
- mali_group_report_l2_cache_counters_per_core(group, mali_l2_cache_get_id(group->l2_cache_core[0]));
- }
-#endif
- }
- }
+ 0, _mali_osk_get_tid(), /* pid and tid */
+ MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(
+ mali_pp_core_get_id(group->pp_core)),
+ mali_pp_get_rawstat(group->pp_core), 0);
}
static void mali_group_timeout(void *data)
{
struct mali_group *group = (struct mali_group *)data;
+ MALI_DEBUG_ASSERT_POINTER(group);
- group->core_timed_out = MALI_TRUE;
+ MALI_DEBUG_PRINT(2, ("Group: timeout handler for %s at %u\n",
+ mali_group_core_description(group),
+ _mali_osk_time_tickcount()));
if (NULL != group->gp_core) {
- MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_gp_get_hw_core_desc(group->gp_core)));
- _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+ mali_group_schedule_bottom_half_gp(group);
} else {
- MALI_DEBUG_PRINT(2, ("Mali group: TIMEOUT on %s\n", mali_pp_get_hw_core_desc(group->pp_core)));
- _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ mali_group_schedule_bottom_half_pp(group);
}
}
-void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session)
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session)
{
MALI_DEBUG_ASSERT_POINTER(group);
MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
- /* Early out - safe even if mutex is not held */
- if (group->session != session) return;
-
- mali_group_lock(group);
-
- mali_group_remove_session_if_unused(group, session);
+ if (group->session != session) {
+ /* not running from this session */
+ return MALI_TRUE; /* success */
+ }
- if (group->session == session) {
+ if (group->is_working) {
/* The Zap also does the stall and disable_stall */
mali_bool zap_success = mali_mmu_zap_tlb(group->mmu);
- if (MALI_TRUE != zap_success) {
- MALI_DEBUG_PRINT(2, ("Mali memory unmap failed. Doing pagefault handling.\n"));
-
- mali_group_mmu_page_fault_and_unlock(group);
- return;
- }
+ return zap_success;
+ } else {
+ /* Just remove the session instead of zapping */
+ mali_group_clear_session(group);
+ return MALI_TRUE; /* success */
}
-
- mali_group_unlock(group);
}
#if defined(CONFIG_MALI400_PROFILING)
u32 value1 = 0;
u32 profiling_channel = 0;
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
switch (core_num) {
case 0:
profiling_channel = MALI_PROFILING_EVENT_TYPE_SINGLE |
_mali_osk_profiling_add_event(profiling_channel, source1 << 8 | source0, value0, value1, 0, 0);
}
#endif /* #if defined(CONFIG_MALI400_PROFILING) */
-
-mali_bool mali_group_is_enabled(struct mali_group *group)
-{
- mali_bool enabled = MALI_TRUE;
-
- MALI_DEBUG_ASSERT_POINTER(group);
-
- mali_group_lock(group);
- if (MALI_GROUP_STATE_DISABLED == group->state) {
- enabled = MALI_FALSE;
- }
- mali_group_unlock(group);
-
- return enabled;
-}
-
-void mali_group_enable(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
- || NULL != mali_group_get_gp_core(group));
-
- if (NULL != mali_group_get_pp_core(group)) {
- mali_pp_scheduler_enable_group(group);
- } else {
- mali_gp_scheduler_enable_group(group);
- }
-}
-
-void mali_group_disable(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
- MALI_DEBUG_ASSERT(NULL != mali_group_get_pp_core(group)
- || NULL != mali_group_get_gp_core(group));
-
- if (NULL != mali_group_get_pp_core(group)) {
- mali_pp_scheduler_disable_group(group);
- } else {
- mali_gp_scheduler_disable_group(group);
- }
-}
-
-static struct mali_pm_domain *mali_group_get_l2_domain(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT(NULL == group->l2_cache_core[1]);
-
- /* l2_cache_core[0] stores the related l2 domain */
- return group->l2_cache_core[0]->pm_domain;
-}
-
-void mali_group_get_pm_domain_ref(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- /* Get group used l2 domain ref */
- mali_pm_domain_ref_get(mali_group_get_l2_domain(group));
- /* Get group used core domain ref */
- mali_pm_domain_ref_get(group->pm_domain);
-}
-
-void mali_group_put_pm_domain_ref(struct mali_group *group)
-{
- MALI_DEBUG_ASSERT_POINTER(group);
-
- /* Put group used core domain ref */
- mali_pm_domain_ref_put(group->pm_domain);
- /* Put group used l2 domain ref */
- mali_pm_domain_ref_put(mali_group_get_l2_domain(group));
-}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_GROUP_H__
#define __MALI_GROUP_H__
-#include "linux/jiffies.h"
#include "mali_osk.h"
#include "mali_l2_cache.h"
#include "mali_mmu.h"
#include "mali_gp.h"
#include "mali_pp.h"
#include "mali_session.h"
+#include "mali_osk_profiling.h"
/**
* @brief Default max runtime [ms] for a core job - used by timeout timers
*/
-#define MALI_MAX_JOB_RUNTIME_DEFAULT 4000
+#define MALI_MAX_JOB_RUNTIME_DEFAULT 5000
+
+extern int mali_max_job_runtime;
-/** @brief A mali group object represents a MMU and a PP and/or a GP core.
- *
- */
#define MALI_MAX_NUMBER_OF_GROUPS 10
+#define MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS 8
-enum mali_group_core_state {
- MALI_GROUP_STATE_IDLE,
- MALI_GROUP_STATE_WORKING,
- MALI_GROUP_STATE_OOM,
- MALI_GROUP_STATE_IN_VIRTUAL,
- MALI_GROUP_STATE_JOINING_VIRTUAL,
- MALI_GROUP_STATE_LEAVING_VIRTUAL,
- MALI_GROUP_STATE_DISABLED,
+enum mali_group_state {
+ MALI_GROUP_STATE_INACTIVE,
+ MALI_GROUP_STATE_ACTIVATION_PENDING,
+ MALI_GROUP_STATE_ACTIVE,
};
-/* Forward declaration from mali_pm_domain.h */
-struct mali_pm_domain;
-
/**
* The structure represents a render group
* A render group is defined by all the cores that share the same Mali MMU
struct mali_mmu_core *mmu;
struct mali_session_data *session;
- mali_bool power_is_on;
- enum mali_group_core_state state;
+ enum mali_group_state state;
+ mali_bool power_is_on;
+
+ mali_bool is_working;
+ unsigned long start_time; /* in ticks */
struct mali_gp_core *gp_core;
struct mali_gp_job *gp_running_job;
struct mali_pp_job *pp_running_job;
u32 pp_running_sub_job;
+ struct mali_pm_domain *pm_domain;
+
struct mali_l2_cache_core *l2_cache_core[2];
u32 l2_cache_core_ref_count[2];
+ /* Parent virtual group (if any) */
+ struct mali_group *parent_group;
+
struct mali_dlbu_core *dlbu_core;
struct mali_bcast_unit *bcast_core;
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_t *lock;
-#else
- _mali_osk_spinlock_t *lock;
-#endif
-
- _mali_osk_list_t pp_scheduler_list;
+ /* Used for working groups which needs to be disabled */
+ mali_bool disable_requested;
- /* List used for virtual groups. For a virtual group, the list represents the
- * head element. */
+ /* Used by group to link child groups (for virtual group) */
_mali_osk_list_t group_list;
- struct mali_group *pm_domain_list;
- struct mali_pm_domain *pm_domain;
+ /* Used by executor module in order to link groups of same state */
+ _mali_osk_list_t executor_list;
- /* Parent virtual group (if any) */
- struct mali_group *parent_group;
+ /* Used by PM domains to link groups of same domain */
+ _mali_osk_list_t pm_domain_list;
_mali_osk_wq_work_t *bottom_half_work_mmu;
_mali_osk_wq_work_t *bottom_half_work_gp;
_mali_osk_wq_work_t *bottom_half_work_pp;
_mali_osk_timer_t *timeout_timer;
- mali_bool core_timed_out;
};
/** @brief Create a new Mali group object
*
- * @param cluster Pointer to the cluster to which the group is connected.
- * @param mmu Pointer to the MMU that defines this group
* @return A pointer to a new group object
*/
struct mali_group *mali_group_create(struct mali_l2_cache_core *core,
struct mali_dlbu_core *dlbu,
- struct mali_bcast_unit *bcast);
+ struct mali_bcast_unit *bcast,
+ u32 domain_index);
+
+void mali_group_delete(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group, struct mali_mmu_core *mmu_core);
+_mali_osk_errcode_t mali_group_add_mmu_core(struct mali_group *group,
+ struct mali_mmu_core *mmu_core);
void mali_group_remove_mmu_core(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group, struct mali_gp_core *gp_core);
+_mali_osk_errcode_t mali_group_add_gp_core(struct mali_group *group,
+ struct mali_gp_core *gp_core);
void mali_group_remove_gp_core(struct mali_group *group);
-_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group, struct mali_pp_core *pp_core);
+_mali_osk_errcode_t mali_group_add_pp_core(struct mali_group *group,
+ struct mali_pp_core *pp_core);
void mali_group_remove_pp_core(struct mali_group *group);
-void mali_group_set_pm_domain(struct mali_group *group, struct mali_pm_domain *domain);
-
-void mali_group_delete(struct mali_group *group);
-
-/** @brief Virtual groups */
-void mali_group_add_group(struct mali_group *parent, struct mali_group *child, mali_bool update_hw);
-void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
-struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+MALI_STATIC_INLINE const char *mali_group_core_description(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ if (NULL != group->pp_core) {
+ return mali_pp_core_description(group->pp_core);
+ } else {
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ return mali_gp_core_description(group->gp_core);
+ }
+}
MALI_STATIC_INLINE mali_bool mali_group_is_virtual(struct mali_group *group)
{
+ MALI_DEBUG_ASSERT_POINTER(group);
+
#if defined(CONFIG_MALI450)
return (NULL != group->dlbu_core);
#else
#endif
}
-/** @brief Check if a group is considered as part of a virtual group
- *
- * @note A group is considered to be "part of" a virtual group also during the transition
- * in to / out of the virtual group.
+/** @brief Check if a group is a part of a virtual group or not
*/
MALI_STATIC_INLINE mali_bool mali_group_is_in_virtual(struct mali_group *group)
{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
#if defined(CONFIG_MALI450)
- return (MALI_GROUP_STATE_IN_VIRTUAL == group->state ||
- MALI_GROUP_STATE_JOINING_VIRTUAL == group->state ||
- MALI_GROUP_STATE_LEAVING_VIRTUAL == group->state);
+ return (NULL != group->parent_group) ? MALI_TRUE : MALI_FALSE;
#else
return MALI_FALSE;
#endif
/** @brief Reset group
*
- * This function will reset the entire group, including all the cores present in the group.
+ * This function will reset the entire group,
+ * including all the cores present in the group.
*
* @param group Pointer to the group to reset
*/
void mali_group_reset(struct mali_group *group);
-/** @brief Zap MMU TLB on all groups
- *
- * Zap TLB on group if \a session is active.
+MALI_STATIC_INLINE struct mali_session_data *mali_group_get_session(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ return group->session;
+}
+
+MALI_STATIC_INLINE void mali_group_clear_session(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+
+ if (NULL != group->session) {
+ mali_mmu_activate_empty_page_directory(group->mmu);
+ group->session = NULL;
+ }
+}
+
+enum mali_group_state mali_group_activate(struct mali_group *group);
+
+/*
+ * Change state from ACTIVATION_PENDING to ACTIVE
+ * For virtual group, all childs need to be ACTIVE first
*/
-void mali_group_zap_session(struct mali_group *group, struct mali_session_data *session);
+mali_bool mali_group_set_active(struct mali_group *group);
-/** @brief Get pointer to GP core object
+/*
+ * @return MALI_TRUE means one or more domains can now be powered off,
+ * and caller should call either mali_pm_update_async() or
+ * mali_pm_update_sync() in order to do so.
*/
-struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group);
+mali_bool mali_group_deactivate(struct mali_group *group);
-/** @brief Get pointer to PP core object
+MALI_STATIC_INLINE enum mali_group_state mali_group_get_state(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->state;
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_power_is_on(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->power_is_on;
+}
+
+void mali_group_power_up(struct mali_group *group);
+void mali_group_power_down(struct mali_group *group);
+
+MALI_STATIC_INLINE void mali_group_set_disable_request(
+ struct mali_group *group, mali_bool disable)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ group->disable_requested = disable;
+
+ /**
+ * When one of child group's disable_requeset is set TRUE, then
+ * the disable_request of parent group should also be set to TRUE.
+ * While, the disable_request of parent group should only be set to FALSE
+ * only when all of its child group's disable_request are set to FALSE.
+ */
+ if (NULL != group->parent_group && MALI_TRUE == disable) {
+ group->parent_group->disable_requested = disable;
+ }
+}
+
+MALI_STATIC_INLINE mali_bool mali_group_disable_requested(
+ struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->disable_requested;
+}
+
+/** @brief Virtual groups */
+void mali_group_add_group(struct mali_group *parent, struct mali_group *child);
+struct mali_group *mali_group_acquire_group(struct mali_group *parent);
+void mali_group_remove_group(struct mali_group *parent, struct mali_group *child);
+
+/** @brief Checks if the group is working.
*/
-struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group);
+MALI_STATIC_INLINE mali_bool mali_group_is_working(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ if (mali_group_is_in_virtual(group)) {
+ struct mali_group *tmp_group = mali_executor_get_virtual_group();
+ return tmp_group->is_working;
+ }
+ return group->is_working;
+}
-/** @brief Lock group object
- *
- * Most group functions will lock the group object themselves. The expection is
- * the group_bottom_half which requires the group to be locked on entry.
+MALI_STATIC_INLINE struct mali_gp_job *mali_group_get_running_gp_job(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return group->gp_running_job;
+}
+
+/** @brief Zap MMU TLB on all groups
*
- * @param group Pointer to group to lock
+ * Zap TLB on group if \a session is active.
*/
-void mali_group_lock(struct mali_group *group);
+mali_bool mali_group_zap_session(struct mali_group *group,
+ struct mali_session_data *session);
-/** @brief Unlock group object
- *
- * @param group Pointer to group to unlock
+/** @brief Get pointer to GP core object
*/
-void mali_group_unlock(struct mali_group *group);
-#ifdef DEBUG
-void mali_group_assert_locked(struct mali_group *group);
-#define MALI_ASSERT_GROUP_LOCKED(group) mali_group_assert_locked(group)
-#else
-#define MALI_ASSERT_GROUP_LOCKED(group)
-#endif
+MALI_STATIC_INLINE struct mali_gp_core *mali_group_get_gp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->gp_core;
+}
+
+/** @brief Get pointer to PP core object
+ */
+MALI_STATIC_INLINE struct mali_pp_core *mali_group_get_pp_core(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ return group->pp_core;
+}
/** @brief Start GP job
*/
void mali_group_start_gp_job(struct mali_group *group, struct mali_gp_job *job);
+void mali_group_start_pp_job(struct mali_group *group, struct mali_pp_job *job, u32 sub_job);
+
/** @brief Start virtual group Job on a virtual group
*/
void mali_group_start_job_on_virtual(struct mali_group *group, struct mali_pp_job *job, u32 first_subjob, u32 last_subjob);
/** @brief Resume GP job that suspended waiting for more heap memory
*/
-struct mali_gp_job *mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
-/** @brief Abort GP job
- *
- * Used to abort suspended OOM jobs when user space failed to allocte more memory.
- */
-void mali_group_abort_gp_job(struct mali_group *group, u32 job_id);
-/** @brief Abort all GP jobs from \a session
- *
- * Used on session close when terminating all running and queued jobs from \a session.
- */
-void mali_group_abort_session(struct mali_group *group, struct mali_session_data *session);
+void mali_group_resume_gp_with_new_heap(struct mali_group *group, u32 job_id, u32 start_addr, u32 end_addr);
-mali_bool mali_group_power_is_on(struct mali_group *group);
-void mali_group_power_on_group(struct mali_group *group);
-void mali_group_power_off_group(struct mali_group *group, mali_bool power_status);
-void mali_group_power_on(void);
-
-/** @brief Prepare group for power off
- *
- * Update the group's state and prepare for the group to be powered off.
- *
- * If do_power_change is MALI_FALSE group session will be set to NULL so that
- * no more activity will happen to this group, but the power state flag will be
- * left unchanged.
- *
- * @do_power_change MALI_TRUE if power status is to be updated
- */
-void mali_group_power_off(mali_bool do_power_change);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_get_interrupt_result(group->gp_core);
+}
-struct mali_group *mali_group_get_glob_group(u32 index);
-u32 mali_group_get_glob_num_groups(void);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_get_interrupt_result(group->pp_core);
+}
-u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
+MALI_STATIC_INLINE enum mali_interrupt_result mali_group_get_interrupt_result_mmu(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_mmu_get_interrupt_result(group->mmu);
+}
-/* MMU-related functions */
-_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_gp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_is_active(group->gp_core);
+}
-/* GP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_pp_is_active(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_is_active(group->pp_core);
+}
-/* PP-related functions */
-_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+MALI_STATIC_INLINE mali_bool mali_group_has_timed_out(struct mali_group *group)
+{
+ unsigned long time_cost;
+ struct mali_group *tmp_group = group;
-/** @brief Check if group is enabled
- *
- * @param group group to check
- * @return MALI_TRUE if enabled, MALI_FALSE if not
- */
-mali_bool mali_group_is_enabled(struct mali_group *group);
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
-/** @brief Enable group
- *
- * An enabled job is put on the idle scheduler list and can be used to handle jobs. Does nothing if
- * group is already enabled.
- *
- * @param group group to enable
- */
-void mali_group_enable(struct mali_group *group);
+ /* if the group is in virtual need to use virtual_group's start time */
+ if (mali_group_is_in_virtual(group)) {
+ tmp_group = mali_executor_get_virtual_group();
+ }
-/** @brief Disable group
- *
- * A disabled group will no longer be used by the scheduler. If part of a virtual group, the group
- * will be removed before being disabled. Cores part of a disabled group is safe to power down.
- *
- * @param group group to disable
- */
-void mali_group_disable(struct mali_group *group);
+ time_cost = _mali_osk_time_tickcount() - tmp_group->start_time;
+ if (_mali_osk_time_mstoticks(mali_max_job_runtime) <= time_cost) {
+ /*
+ * current tick is at or after timeout end time,
+ * so this is a valid timeout
+ */
+ return MALI_TRUE;
+ } else {
+ /*
+ * Not a valid timeout. A HW interrupt probably beat
+ * us to it, and the timer wasn't properly deleted
+ * (async deletion used due to atomic context).
+ */
+ return MALI_FALSE;
+ }
+}
-MALI_STATIC_INLINE mali_bool mali_group_virtual_disable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_gp(struct mali_group *group)
{
- mali_bool empty = MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_gp_mask_all_interrupts(group->gp_core);
+}
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+MALI_STATIC_INLINE void mali_group_mask_all_interrupts_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return mali_pp_mask_all_interrupts(group->pp_core);
+}
- if (_mali_osk_list_empty(&group->group_list)) {
- group->state = MALI_GROUP_STATE_DISABLED;
- group->session = NULL;
+MALI_STATIC_INLINE void mali_group_enable_interrupts_gp(
+ struct mali_group *group,
+ enum mali_interrupt_result exceptions)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ mali_gp_enable_interrupts(group->gp_core, exceptions);
+}
- empty = MALI_TRUE;
- }
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_gp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->gp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_gp);
+}
- return empty;
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_pp(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->pp_core);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_pp);
}
-MALI_STATIC_INLINE mali_bool mali_group_virtual_enable_if_empty(struct mali_group *group)
+MALI_STATIC_INLINE void mali_group_schedule_bottom_half_mmu(struct mali_group *group)
{
- mali_bool empty = MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT_POINTER(group->mmu);
+ _mali_osk_wq_schedule_work(group->bottom_half_work_mmu);
+}
- MALI_ASSERT_GROUP_LOCKED(group);
- MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+struct mali_pp_job *mali_group_complete_pp(struct mali_group *group, mali_bool success, u32 *sub_job);
- if (_mali_osk_list_empty(&group->group_list)) {
- MALI_DEBUG_ASSERT(MALI_GROUP_STATE_DISABLED == group->state);
+struct mali_gp_job *mali_group_complete_gp(struct mali_group *group, mali_bool success);
- group->state = MALI_GROUP_STATE_IDLE;
+#if defined(CONFIG_MALI400_PROFILING)
+MALI_STATIC_INLINE void mali_group_oom(struct mali_group *group)
+{
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SUSPEND |
+ MALI_PROFILING_MAKE_EVENT_CHANNEL_GP(0),
+ 0, 0, 0, 0, 0);
+}
+#endif
- empty = MALI_TRUE;
- }
+struct mali_group *mali_group_get_glob_group(u32 index);
+u32 mali_group_get_glob_num_groups(void);
- return empty;
-}
+u32 mali_group_dump_state(struct mali_group *group, char *buf, u32 size);
-/* Get group used l2 domain and core domain ref */
-void mali_group_get_pm_domain_ref(struct mali_group *group);
-/* Put group used l2 domain and core domain ref */
-void mali_group_put_pm_domain_ref(struct mali_group *group);
+_mali_osk_errcode_t mali_group_upper_half_mmu(void *data);
+_mali_osk_errcode_t mali_group_upper_half_gp(void *data);
+_mali_osk_errcode_t mali_group_upper_half_pp(void *data);
+
+MALI_STATIC_INLINE mali_bool mali_group_is_empty(struct mali_group *group)
+{
+ MALI_DEBUG_ASSERT_POINTER(group);
+ MALI_DEBUG_ASSERT(mali_group_is_virtual(group));
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
+ return _mali_osk_list_empty(&group->group_list);
+}
#endif /* __MALI_GROUP_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_hw_core.h"
void mali_hw_core_delete(struct mali_hw_core *core)
{
- _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
- core->mapped_registers = NULL;
+ if (NULL != core->mapped_registers) {
+ _mali_osk_mem_unmapioregion(core->phys_addr, core->size, core->mapped_registers);
+ core->mapped_registers = NULL;
+ }
_mali_osk_mem_unreqregion(core->phys_addr, core->size);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_HW_CORE_H__
* This struct is embedded inside all core specific structs.
*/
struct mali_hw_core {
- u32 phys_addr; /**< Physical address of the registers */
+ uintptr_t phys_addr; /**< Physical address of the registers */
u32 phys_offset; /**< Offset from start of Mali to registers */
u32 size; /**< Size of registers */
mali_io_address mapped_registers; /**< Virtual mapping of the registers */
#define MALI_REG_POLL_COUNT_FAST 1000
#define MALI_REG_POLL_COUNT_SLOW 1000000
+/*
+ * GP and PP core translate their int_stat/rawstat into one of these
+ */
+enum mali_interrupt_result {
+ MALI_INTERRUPT_RESULT_NONE,
+ MALI_INTERRUPT_RESULT_SUCCESS,
+ MALI_INTERRUPT_RESULT_SUCCESS_VS,
+ MALI_INTERRUPT_RESULT_SUCCESS_PLBU,
+ MALI_INTERRUPT_RESULT_OOM,
+ MALI_INTERRUPT_RESULT_ERROR
+};
+
_mali_osk_errcode_t mali_hw_core_create(struct mali_hw_core *core, const _mali_osk_resource_t *resource, u32 reg_size);
void mali_hw_core_delete(struct mali_hw_core *core);
}
}
-
MALI_STATIC_INLINE void mali_hw_core_register_write(struct mali_hw_core *core, u32 relative_address, u32 new_val)
{
MALI_DEBUG_PRINT(6, ("register_write for core %s, relative addr=0x%04X, val=0x%08X\n",
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_COMMON_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_broadcast.h"
#include "mali_gp.h"
#include "mali_pp.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
#include "mali_pp_job.h"
#include "mali_group.h"
#include "mali_pm.h"
#include "mali_scheduler.h"
#include "mali_kernel_utilization.h"
#include "mali_l2_cache.h"
-#include "mali_dma.h"
#include "mali_timeline.h"
#include "mali_soft_job.h"
#include "mali_pm_domain.h"
#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
#include "mali_profiling_internal.h"
#endif
+#include "mali_control_timer.h"
+#include "mali_dvfs_policy.h"
+#define MALI_SHARED_MEMORY_DEFAULT_SIZE 0xffffffff
/* Mali GPU memory. Real values come from module parameter or from device specific data */
unsigned int mali_dedicated_mem_start = 0;
unsigned int mali_dedicated_mem_size = 0;
-unsigned long mali_shared_mem_size = 0;
+
+/* Default shared memory size is set to 4G. */
+unsigned int mali_shared_mem_size = MALI_SHARED_MEMORY_DEFAULT_SIZE;
/* Frame buffer memory to be accessible by Mali GPU */
int mali_fb_start = 0;
int mali_inited_pp_cores_group_2 = 0;
static _mali_product_id_t global_product_id = _MALI_PRODUCT_ID_UNKNOWN;
-static u32 global_gpu_base_address = 0;
+static uintptr_t global_gpu_base_address = 0;
static u32 global_gpu_major_version = 0;
static u32 global_gpu_minor_version = 0;
static _mali_osk_errcode_t mali_set_global_gpu_base_address(void)
{
+ _mali_osk_errcode_t err = _MALI_OSK_ERR_OK;
+
global_gpu_base_address = _mali_osk_resource_base_address();
if (0 == global_gpu_base_address) {
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ err = _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
- return _MALI_OSK_ERR_OK;
+ return err;
}
static u32 mali_get_bcast_id(_mali_osk_resource_t *resource_pp)
static _mali_osk_errcode_t mali_parse_product_info(void)
{
- /*
- * Mali-200 has the PP core first, while Mali-300, Mali-400 and Mali-450 have the GP core first.
- * Look at the version register for the first PP core in order to determine the GPU HW revision.
- */
-
- u32 first_pp_offset;
_mali_osk_resource_t first_pp_resource;
- /* Find out where the first PP core is located */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x8000, NULL)) {
- /* Mali-300/400/450 */
- first_pp_offset = 0x8000;
- } else {
- /* Mali-200 */
- first_pp_offset = 0x0000;
- }
-
/* Find the first PP core resource (again) */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + first_pp_offset, &first_pp_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PP0, &first_pp_resource)) {
/* Create a dummy PP object for this core so that we can read the version register */
- struct mali_group *group = mali_group_create(NULL, NULL, NULL);
+ struct mali_group *group = mali_group_create(NULL, NULL, NULL, MALI_DOMAIN_INDEX_PP0);
if (NULL != group) {
struct mali_pp_core *pp_core = mali_pp_create(&first_pp_resource, group, MALI_FALSE, mali_get_bcast_id(&first_pp_resource));
if (NULL != pp_core) {
- u32 pp_version = mali_pp_core_get_version(pp_core);
+ u32 pp_version;
+
+ pp_version = mali_pp_core_get_version(pp_core);
+
mali_group_delete(group);
global_gpu_major_version = (pp_version >> 8) & 0xFF;
return _MALI_OSK_ERR_FAULT;
}
-
-static void mali_resource_count(u32 *pp_count, u32 *l2_count)
-{
- *pp_count = 0;
- *l2_count = 0;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
- ++(*pp_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
- ++(*pp_count);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
- ++(*l2_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
- ++(*l2_count);
- }
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
- ++(*l2_count);
- }
-}
-
static void mali_delete_groups(void)
{
struct mali_group *group;
MALI_DEBUG_ASSERT(0 == mali_l2_cache_core_get_glob_num_l2_cores());
}
-static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource)
+static struct mali_l2_cache_core *mali_create_l2_cache_core(_mali_osk_resource_t *resource, u32 domain_index)
{
struct mali_l2_cache_core *l2_cache = NULL;
MALI_DEBUG_PRINT(3, ("Found L2 cache %s\n", resource->description));
- l2_cache = mali_l2_cache_create(resource);
+ l2_cache = mali_l2_cache_create(resource, domain_index);
if (NULL == l2_cache) {
MALI_PRINT_ERROR(("Failed to create L2 cache object\n"));
return NULL;
if (mali_is_mali400()) {
_mali_osk_resource_t l2_resource;
- if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_resource)) {
+ if (_MALI_OSK_ERR_OK != _mali_osk_resource_find(MALI400_OFFSET_L2_CACHE0, &l2_resource)) {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
- l2_cache = mali_create_l2_cache_core(&l2_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_resource, MALI_DOMAIN_INDEX_L20);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
} else if (mali_is_mali450()) {
/*
* L2 for GP at 0x10000
_mali_osk_resource_t l2_pp_grp1_resource;
/* Make cluster for GP's L2 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, &l2_gp_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE0, &l2_gp_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for GP\n"));
- l2_cache = mali_create_l2_cache_core(&l2_gp_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_gp_resource, MALI_DOMAIN_INDEX_L20);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L20_DOMAIN_INDEX), l2_cache);
} else {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for GP in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
/* Find corresponding l2 domain */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, &l2_pp_grp0_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE1, &l2_pp_grp0_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 0\n"));
- l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp0_resource, MALI_DOMAIN_INDEX_L21);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L21_DOMAIN_INDEX), l2_cache);
} else {
MALI_DEBUG_PRINT(3, ("Did not find required Mali L2 cache for PP group 0 in config file\n"));
return _MALI_OSK_ERR_FAULT;
}
/* Second PP core group is optional, don't fail if we don't find it */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, &l2_pp_grp1_resource)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI450_OFFSET_L2_CACHE2, &l2_pp_grp1_resource)) {
MALI_DEBUG_PRINT(3, ("Creating Mali-450 L2 cache core for PP group 1\n"));
- l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource);
+ l2_cache = mali_create_l2_cache_core(&l2_pp_grp1_resource, MALI_DOMAIN_INDEX_L22);
if (NULL == l2_cache) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_l2(mali_pmu_get_domain_mask(MALI_L22_DOMAIN_INDEX), l2_cache);
}
}
static struct mali_group *mali_create_group(struct mali_l2_cache_core *cache,
_mali_osk_resource_t *resource_mmu,
_mali_osk_resource_t *resource_gp,
- _mali_osk_resource_t *resource_pp)
+ _mali_osk_resource_t *resource_pp,
+ u32 domain_index)
{
struct mali_mmu_core *mmu;
struct mali_group *group;
MALI_DEBUG_PRINT(3, ("Starting new group for MMU %s\n", resource_mmu->description));
/* Create the group object */
- group = mali_group_create(cache, NULL, NULL);
+ group = mali_group_create(cache, NULL, NULL, domain_index);
if (NULL == group) {
MALI_PRINT_ERROR(("Failed to create group object for MMU %s\n", resource_mmu->description));
return NULL;
}
}
- /* Reset group */
- mali_group_lock(group);
- mali_group_reset(group);
- mali_group_unlock(group);
-
return group;
}
mali_bcast_remove_group(bcast_core, phys_group);
}
#endif /* DEBUG */
- group = mali_group_create(NULL, dlbu_core, bcast_core);
+ group = mali_group_create(NULL, dlbu_core, bcast_core, MALI_DOMAIN_INDEX_DUMMY);
if (NULL == group) {
MALI_PRINT_ERROR(("Failed to create group object for MMU PP broadcast core %s\n", resource_mmu_pp_bcast->description));
mali_bcast_unit_delete(bcast_core);
cluster_id_pp_grp1 = 2;
}
- resource_gp_found = _mali_osk_resource_find(global_gpu_base_address + 0x00000, &resource_gp);
- resource_gp_mmu_found = _mali_osk_resource_find(global_gpu_base_address + 0x03000, &resource_gp_mmu);
- resource_pp_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x08000, &(resource_pp[0]));
- resource_pp_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x0A000, &(resource_pp[1]));
- resource_pp_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x0C000, &(resource_pp[2]));
- resource_pp_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x0E000, &(resource_pp[3]));
- resource_pp_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x28000, &(resource_pp[4]));
- resource_pp_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x2A000, &(resource_pp[5]));
- resource_pp_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x2C000, &(resource_pp[6]));
- resource_pp_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x2E000, &(resource_pp[7]));
- resource_pp_mmu_found[0] = _mali_osk_resource_find(global_gpu_base_address + 0x04000, &(resource_pp_mmu[0]));
- resource_pp_mmu_found[1] = _mali_osk_resource_find(global_gpu_base_address + 0x05000, &(resource_pp_mmu[1]));
- resource_pp_mmu_found[2] = _mali_osk_resource_find(global_gpu_base_address + 0x06000, &(resource_pp_mmu[2]));
- resource_pp_mmu_found[3] = _mali_osk_resource_find(global_gpu_base_address + 0x07000, &(resource_pp_mmu[3]));
- resource_pp_mmu_found[4] = _mali_osk_resource_find(global_gpu_base_address + 0x1C000, &(resource_pp_mmu[4]));
- resource_pp_mmu_found[5] = _mali_osk_resource_find(global_gpu_base_address + 0x1D000, &(resource_pp_mmu[5]));
- resource_pp_mmu_found[6] = _mali_osk_resource_find(global_gpu_base_address + 0x1E000, &(resource_pp_mmu[6]));
- resource_pp_mmu_found[7] = _mali_osk_resource_find(global_gpu_base_address + 0x1F000, &(resource_pp_mmu[7]));
+ resource_gp_found = _mali_osk_resource_find(MALI_OFFSET_GP, &resource_gp);
+ resource_gp_mmu_found = _mali_osk_resource_find(MALI_OFFSET_GP_MMU, &resource_gp_mmu);
+ resource_pp_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0, &(resource_pp[0]));
+ resource_pp_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1, &(resource_pp[1]));
+ resource_pp_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2, &(resource_pp[2]));
+ resource_pp_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3, &(resource_pp[3]));
+ resource_pp_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4, &(resource_pp[4]));
+ resource_pp_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5, &(resource_pp[5]));
+ resource_pp_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6, &(resource_pp[6]));
+ resource_pp_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7, &(resource_pp[7]));
+ resource_pp_mmu_found[0] = _mali_osk_resource_find(MALI_OFFSET_PP0_MMU, &(resource_pp_mmu[0]));
+ resource_pp_mmu_found[1] = _mali_osk_resource_find(MALI_OFFSET_PP1_MMU, &(resource_pp_mmu[1]));
+ resource_pp_mmu_found[2] = _mali_osk_resource_find(MALI_OFFSET_PP2_MMU, &(resource_pp_mmu[2]));
+ resource_pp_mmu_found[3] = _mali_osk_resource_find(MALI_OFFSET_PP3_MMU, &(resource_pp_mmu[3]));
+ resource_pp_mmu_found[4] = _mali_osk_resource_find(MALI_OFFSET_PP4_MMU, &(resource_pp_mmu[4]));
+ resource_pp_mmu_found[5] = _mali_osk_resource_find(MALI_OFFSET_PP5_MMU, &(resource_pp_mmu[5]));
+ resource_pp_mmu_found[6] = _mali_osk_resource_find(MALI_OFFSET_PP6_MMU, &(resource_pp_mmu[6]));
+ resource_pp_mmu_found[7] = _mali_osk_resource_find(MALI_OFFSET_PP7_MMU, &(resource_pp_mmu[7]));
if (mali_is_mali450()) {
- resource_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast);
- resource_dlbu_found = _mali_osk_resource_find(global_gpu_base_address + 0x14000, &resource_dlbu);
- resource_pp_mmu_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x15000, &resource_pp_mmu_bcast);
- resource_pp_bcast_found = _mali_osk_resource_find(global_gpu_base_address + 0x16000, &resource_pp_bcast);
+ resource_bcast_found = _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast);
+ resource_dlbu_found = _mali_osk_resource_find(MALI_OFFSET_DLBU, &resource_dlbu);
+ resource_pp_mmu_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST_MMU, &resource_pp_mmu_bcast);
+ resource_pp_bcast_found = _mali_osk_resource_find(MALI_OFFSET_PP_BCAST, &resource_pp_bcast);
if (_MALI_OSK_ERR_OK != resource_bcast_found ||
_MALI_OSK_ERR_OK != resource_dlbu_found ||
}
MALI_DEBUG_ASSERT(1 <= mali_l2_cache_core_get_glob_num_l2_cores());
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_gp), &resource_gp_mmu, &resource_gp, NULL, MALI_DOMAIN_INDEX_GP);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- /* Add GP in group, for PMU ref count */
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX), group);
-
/* Create group for first (and mandatory) PP core */
MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= (cluster_id_pp_grp0 + 1)); /* >= 1 on Mali-300 and Mali-400, >= 2 on Mali-450 */
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[0], NULL, &resource_pp[0], MALI_DOMAIN_INDEX_PP0);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- /* Find corresponding pp domain */
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(MALI_PP0_DOMAIN_INDEX), group);
-
mali_inited_pp_cores_group_1++;
/* Create groups for rest of the cores in the first PP core group */
for (i = 1; i < 4; i++) { /* First half of the PP cores belong to first core group */
if (mali_inited_pp_cores_group_1 < mali_max_pp_cores_group_1) {
if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp0), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
-
mali_inited_pp_cores_group_1++;
}
}
if (mali_inited_pp_cores_group_2 < mali_max_pp_cores_group_2) {
if (_MALI_OSK_ERR_OK == resource_pp_found[i] && _MALI_OSK_ERR_OK == resource_pp_mmu_found[i]) {
MALI_DEBUG_ASSERT(mali_l2_cache_core_get_glob_num_l2_cores() >= 2); /* Only Mali-450 have a second core group */
- group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i]);
+ group = mali_create_group(mali_l2_cache_core_get_glob_l2_core(cluster_id_pp_grp1), &resource_pp_mmu[i], NULL, &resource_pp[i], MALI_DOMAIN_INDEX_PP0 + i);
if (NULL == group) {
return _MALI_OSK_ERR_FAULT;
}
- mali_pm_domain_add_group(mali_pmu_get_domain_mask(i + MALI_PP0_DOMAIN_INDEX), group);
+
mali_inited_pp_cores_group_2++;
}
}
return _MALI_OSK_ERR_OK;
}
-static _mali_osk_errcode_t mali_create_pm_domains(void)
-{
- int i;
-
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- if (0x0 == mali_pmu_get_domain_mask(i)) continue;
-
- if (NULL == mali_pm_domain_create(mali_pmu_get_domain_mask(i))) {
- return _MALI_OSK_ERR_NOMEM;
- }
- }
-
- return _MALI_OSK_ERR_OK;
-}
-
-static void mali_use_default_pm_domain_config(void)
-{
- u32 pp_count_gr1 = 0;
- u32 pp_count_gr2 = 0;
- u32 l2_count = 0;
-
- MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
-
- /* GP core */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x00000, NULL)) {
- mali_pmu_set_domain_mask(MALI_GP_DOMAIN_INDEX, 0x01);
- }
-
- /* PP0 - PP3 core */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x08000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 2);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP0_DOMAIN_INDEX, 0x01 << 1);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0A000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 3);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP1_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0C000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 4);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP2_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x0E000, NULL)) {
- ++pp_count_gr1;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 5);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_PP3_DOMAIN_INDEX, 0x01 << 2);
- }
- }
-
- /* PP4 - PP7 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x28000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP4_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2A000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP5_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2C000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP6_DOMAIN_INDEX, 0x01 << 3);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x2E000, NULL)) {
- ++pp_count_gr2;
-
- mali_pmu_set_domain_mask(MALI_PP7_DOMAIN_INDEX, 0x01 << 3);
- }
-
- /* L2gp/L2PP0/L2PP4 */
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x10000, NULL)) {
- ++l2_count;
-
- if (mali_is_mali400()) {
- mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 1);
- } else if (mali_is_mali450()) {
- mali_pmu_set_domain_mask(MALI_L20_DOMAIN_INDEX, 0x01 << 0);
- }
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x1000, NULL)) {
- ++l2_count;
-
- mali_pmu_set_domain_mask(MALI_L21_DOMAIN_INDEX, 0x01 << 1);
- }
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x11000, NULL)) {
- ++l2_count;
-
- mali_pmu_set_domain_mask(MALI_L22_DOMAIN_INDEX, 0x01 << 3);
- }
-
- MALI_DEBUG_PRINT(2, ("Using default PMU domain config: (%d) gr1_pp_cores, (%d) gr2_pp_cores, (%d) l2_count. \n", pp_count_gr1, pp_count_gr2, l2_count));
-}
-
-static void mali_set_pmu_global_domain_config(void)
-{
- _mali_osk_device_data data = { 0, };
- int i = 0;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
- /* Check whether has customized pmu domain configure */
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- if (0 != data.pmu_domain_config[i]) break;
- }
-
- if (MALI_MAX_NUMBER_OF_DOMAINS == i) {
- mali_use_default_pm_domain_config();
- } else {
- /* Copy the customer config to global config */
- mali_pmu_copy_domain_mask(data.pmu_domain_config, sizeof(data.pmu_domain_config));
- }
- }
-}
-
static _mali_osk_errcode_t mali_parse_config_pmu(void)
{
_mali_osk_resource_t resource_pmu;
MALI_DEBUG_ASSERT(0 != global_gpu_base_address);
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x02000, &resource_pmu)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_PMU, &resource_pmu)) {
struct mali_pmu_core *pmu;
- mali_set_pmu_global_domain_config();
-
pmu = mali_pmu_create(&resource_pmu);
if (NULL == pmu) {
MALI_PRINT_ERROR(("Failed to create PMU\n"));
return _MALI_OSK_ERR_OK;
}
-static _mali_osk_errcode_t mali_parse_config_dma(void)
-{
- _mali_osk_resource_t resource_dma;
-
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x12000, &resource_dma)) {
- if (NULL == mali_dma_create(&resource_dma)) {
- return _MALI_OSK_ERR_FAULT;
- }
- return _MALI_OSK_ERR_OK;
- } else {
- return _MALI_OSK_ERR_ITEM_NOT_FOUND;
- }
-}
-
static _mali_osk_errcode_t mali_parse_config_memory(void)
{
+ _mali_osk_device_data data = { 0, };
_mali_osk_errcode_t ret;
- if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
+ /* The priority of setting the value of mali_shared_mem_size,
+ * mali_dedicated_mem_start and mali_dedicated_mem_size:
+ * 1. module parameter;
+ * 2. platform data;
+ * 3. default value;
+ **/
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
/* Memory settings are not overridden by module parameters, so use device settings */
- _mali_osk_device_data data = { 0, };
-
- if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size) {
/* Use device specific settings (if defined) */
mali_dedicated_mem_start = data.dedicated_mem_start;
mali_dedicated_mem_size = data.dedicated_mem_size;
- mali_shared_mem_size = data.shared_mem_size;
}
- if (0 == mali_dedicated_mem_start && 0 == mali_dedicated_mem_size && 0 == mali_shared_mem_size) {
- /* No GPU memory specified */
- return _MALI_OSK_ERR_INVALID_ARGS;
+ if (MALI_SHARED_MEMORY_DEFAULT_SIZE == mali_shared_mem_size &&
+ 0 != data.shared_mem_size) {
+ mali_shared_mem_size = data.shared_mem_size;
}
-
- MALI_DEBUG_PRINT(2, ("Using device defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
- mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
- } else {
- MALI_DEBUG_PRINT(2, ("Using module defined memory settings (dedicated: 0x%08X@0x%08X, shared: 0x%08X)\n",
- mali_dedicated_mem_size, mali_dedicated_mem_start, mali_shared_mem_size));
}
if (0 < mali_dedicated_mem_size && 0 != mali_dedicated_mem_start) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (dedicated: 0x%08X@0x%08X)\n",
+ mali_dedicated_mem_size, mali_dedicated_mem_start));
+
/* Dedicated memory */
ret = mali_memory_core_resource_dedicated_memory(mali_dedicated_mem_start, mali_dedicated_mem_size);
if (_MALI_OSK_ERR_OK != ret) {
}
if (0 < mali_shared_mem_size) {
+ MALI_DEBUG_PRINT(2, ("Mali memory settings (shared: 0x%08X)\n", mali_shared_mem_size));
+
/* Shared OS memory */
ret = mali_memory_core_resource_os_memory(mali_shared_mem_size);
if (_MALI_OSK_ERR_OK != ret) {
static void mali_detect_gpu_class(void)
{
- u32 number_of_pp_cores = 0;
- u32 number_of_l2_caches = 0;
-
- mali_resource_count(&number_of_pp_cores, &number_of_l2_caches);
- if (number_of_l2_caches > 1) {
+ if (_mali_osk_l2_resource_count() > 1) {
mali_gpu_class_is_mali450 = MALI_TRUE;
}
}
/* Ensure broadcast unit is in a good state before we start creating
* groups and cores.
*/
- if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(global_gpu_base_address + 0x13000, &resource_bcast)) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_BCAST, &resource_bcast)) {
struct mali_bcast_unit *bcast_core;
bcast_core = mali_bcast_unit_create(&resource_bcast);
_mali_osk_errcode_t mali_initialize_subsystems(void)
{
_mali_osk_errcode_t err;
- struct mali_pmu_core *pmu;
+
+#ifdef CONFIG_MALI_DT
+ err = _mali_osk_resource_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+#endif
mali_pp_job_initialize();
+ mali_timeline_initialize();
+
err = mali_session_initialize();
- if (_MALI_OSK_ERR_OK != err) goto session_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
#if defined(CONFIG_MALI400_PROFILING)
err = _mali_osk_profiling_init(mali_boot_profiling ? MALI_TRUE : MALI_FALSE);
#endif
err = mali_memory_initialize();
- if (_MALI_OSK_ERR_OK != err) goto memory_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_executor_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
+
+ err = mali_scheduler_initialize();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Configure memory early. Memory allocation needed for mali_mmu_initialize. */
+ /* Configure memory early, needed by mali_mmu_initialize. */
err = mali_parse_config_memory();
- if (_MALI_OSK_ERR_OK != err) goto parse_memory_config_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
err = mali_set_global_gpu_base_address();
- if (_MALI_OSK_ERR_OK != err) goto set_global_gpu_base_address_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Detect gpu class according to l2 cache number */
+ /* Detect GPU class (uses L2 cache count) */
mali_detect_gpu_class();
err = mali_check_shared_interrupts();
- if (_MALI_OSK_ERR_OK != err) goto check_shared_interrupts_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
- err = mali_pp_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto pp_scheduler_init_failed;
+ /* Initialize the MALI PMU (will not touch HW!) */
+ err = mali_parse_config_pmu();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
/* Initialize the power management module */
err = mali_pm_initialize();
- if (_MALI_OSK_ERR_OK != err) goto pm_init_failed;
-
- /* Initialize the MALI PMU */
- err = mali_parse_config_pmu();
- if (_MALI_OSK_ERR_OK != err) goto parse_pmu_config_failed;
-
- /* Make sure the power stays on for the rest of this function */
- err = _mali_osk_pm_dev_ref_add();
- if (_MALI_OSK_ERR_OK != err) goto pm_always_on_failed;
-
- /*
- * If run-time PM is used, then the mali_pm module has now already been
- * notified that the power now is on (through the resume callback functions).
- * However, if run-time PM is not used, then there will probably not be any
- * calls to the resume callback functions, so we need to explicitly tell it
- * that the power is on.
- */
- mali_pm_set_power_is_on();
-
- /* Reset PMU HW and ensure all Mali power domains are on */
- pmu = mali_pmu_get_global_pmu_core();
- if (NULL != pmu) {
- err = mali_pmu_reset(pmu);
- if (_MALI_OSK_ERR_OK != err) goto pmu_reset_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
}
+ /* Make sure the entire GPU stays on for the rest of this function */
+ mali_pm_init_begin();
+
/* Ensure HW is in a good state before starting to access cores. */
err = mali_init_hw_reset();
- if (_MALI_OSK_ERR_OK != err) goto init_hw_reset_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_terminate_subsystems();
+ return err;
+ }
/* Detect which Mali GPU we are dealing with */
err = mali_parse_product_info();
- if (_MALI_OSK_ERR_OK != err) goto product_info_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
/* The global_product_id is now populated with the correct Mali GPU */
- /* Create PM domains only if PMU exists */
- if (NULL != pmu) {
- err = mali_create_pm_domains();
- if (_MALI_OSK_ERR_OK != err) goto pm_domain_failed;
- }
+ /* Start configuring the actual Mali hardware. */
- /* Initialize MMU module */
err = mali_mmu_initialize();
- if (_MALI_OSK_ERR_OK != err) goto mmu_init_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
if (mali_is_mali450()) {
err = mali_dlbu_initialize();
- if (_MALI_OSK_ERR_OK != err) goto dlbu_init_failed;
-
- err = mali_parse_config_dma();
- if (_MALI_OSK_ERR_OK != err) goto dma_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
}
- /* Start configuring the actual Mali hardware. */
err = mali_parse_config_l2_cache();
- if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
+
err = mali_parse_config_groups();
- if (_MALI_OSK_ERR_OK != err) goto config_parsing_failed;
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
- /* Initialize the schedulers */
- err = mali_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto scheduler_init_failed;
- err = mali_gp_scheduler_initialize();
- if (_MALI_OSK_ERR_OK != err) goto gp_scheduler_init_failed;
+ /* Move groups into executor */
+ mali_executor_populate();
+
+ /* Need call after all group has assigned a domain */
+ mali_pm_power_cost_setup();
- /* PP scheduler population can't fail */
- mali_pp_scheduler_populate();
+ /* Initialize the GPU timer */
+ err = mali_control_timer_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
+ }
/* Initialize the GPU utilization tracking */
err = mali_utilization_init();
- if (_MALI_OSK_ERR_OK != err) goto utilization_init_failed;
-
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
-
- MALI_SUCCESS; /* all ok */
-
- /* Error handling */
-
-utilization_init_failed:
- mali_pp_scheduler_depopulate();
- mali_gp_scheduler_terminate();
-gp_scheduler_init_failed:
- mali_scheduler_terminate();
-scheduler_init_failed:
-config_parsing_failed:
- mali_delete_groups(); /* Delete any groups not (yet) owned by a scheduler */
- mali_delete_l2_cache_cores(); /* Delete L2 cache cores even if config parsing failed. */
- {
- struct mali_dma_core *dma = mali_dma_get_global_dma_core();
- if (NULL != dma) mali_dma_delete(dma);
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
}
-dma_parsing_failed:
- mali_dlbu_terminate();
-dlbu_init_failed:
- mali_mmu_terminate();
-mmu_init_failed:
- mali_pm_domain_terminate();
-pm_domain_failed:
- /* Nothing to roll back */
-product_info_parsing_failed:
- /* Nothing to roll back */
-init_hw_reset_failed:
- /* Nothing to roll back */
-pmu_reset_failed:
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
-pm_always_on_failed:
- pmu = mali_pmu_get_global_pmu_core();
- if (NULL != pmu) {
- mali_pmu_delete(pmu);
+
+#if defined(CONFIG_MALI_DVFS)
+ err = mali_dvfs_policy_init();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_init_end();
+ mali_terminate_subsystems();
+ return err;
}
-parse_pmu_config_failed:
- mali_pm_terminate();
-pm_init_failed:
- mali_pp_scheduler_terminate();
-pp_scheduler_init_failed:
-check_shared_interrupts_failed:
- global_gpu_base_address = 0;
-set_global_gpu_base_address_failed:
- /* undoing mali_parse_config_memory() is done by mali_memory_terminate() */
-parse_memory_config_failed:
- mali_memory_terminate();
-memory_init_failed:
-#if defined(CONFIG_MALI400_PROFILING)
- _mali_osk_profiling_term();
#endif
- mali_session_terminate();
-session_init_failed:
- mali_pp_job_terminate();
- return err;
+
+ /* Allowing the system to be turned off */
+ mali_pm_init_end();
+
+ return _MALI_OSK_ERR_OK; /* all ok */
}
void mali_terminate_subsystems(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- struct mali_dma_core *dma = mali_dma_get_global_dma_core();
MALI_DEBUG_PRINT(2, ("terminate_subsystems() called\n"));
- /* shut down subsystems in reverse order from startup */
+ mali_utilization_term();
+ mali_control_timer_term();
- /* We need the GPU to be powered up for the terminate sequence */
- _mali_osk_pm_dev_ref_add();
+ mali_executor_depopulate();
+ mali_delete_groups(); /* Delete groups not added to executor */
+ mali_executor_terminate();
- mali_utilization_term();
- mali_pp_scheduler_depopulate();
- mali_gp_scheduler_terminate();
mali_scheduler_terminate();
+ mali_pp_job_terminate();
mali_delete_l2_cache_cores();
+ mali_mmu_terminate();
+
if (mali_is_mali450()) {
mali_dlbu_terminate();
}
- mali_mmu_terminate();
+
+ mali_pm_terminate();
+
if (NULL != pmu) {
mali_pmu_delete(pmu);
}
- if (NULL != dma) {
- mali_dma_delete(dma);
- }
- mali_pm_terminate();
- mali_memory_terminate();
+
#if defined(CONFIG_MALI400_PROFILING)
_mali_osk_profiling_term();
#endif
- /* Allowing the system to be turned off */
- _mali_osk_pm_dev_ref_dec();
+ mali_memory_terminate();
- mali_pp_scheduler_terminate();
mali_session_terminate();
- mali_pp_job_terminate();
+ mali_timeline_terminate();
+
+ global_gpu_base_address = 0;
}
_mali_product_id_t mali_kernel_core_get_product_id(void)
args->version = _MALI_UK_API_VERSION; /* report our version */
/* success regardless of being compatible or not */
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
_mali_osk_errcode_t _mali_ukk_wait_for_notification(_mali_uk_wait_for_notification_s *args)
if (NULL == queue) {
MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
args->type = _MALI_NOTIFICATION_CORE_SHUTDOWN_IN_PROGRESS;
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
/* receive a notification, might sleep */
/* finished with the notification */
_mali_osk_notification_delete(notification);
- MALI_SUCCESS; /* all ok */
+ return _MALI_OSK_ERR_OK;; /* all ok */
}
_mali_osk_errcode_t _mali_ukk_post_notification(_mali_uk_post_notification_s *args)
/* if the queue does not exist we're currently shutting down */
if (NULL == queue) {
MALI_DEBUG_PRINT(1, ("No notification queue registered with the session. Asking userspace to stop querying\n"));
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
notification = _mali_osk_notification_create(args->type, 0);
_mali_osk_notification_queue_send(queue, notification);
- MALI_SUCCESS; /* all ok */
+ return _MALI_OSK_ERR_OK;; /* all ok */
}
_mali_osk_errcode_t _mali_ukk_request_high_priority(_mali_uk_request_high_priority_s *args)
MALI_DEBUG_PRINT(2, ("Session 0x%08X with pid %d was granted higher priority.\n", session, _mali_osk_get_pid()));
}
- MALI_SUCCESS;
+ return _MALI_OSK_ERR_OK;;
}
_mali_osk_errcode_t _mali_ukk_open(void **context)
MALI_ERROR(_MALI_OSK_ERR_NOMEM);
}
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&session->number_of_window_jobs, 0)) {
- MALI_DEBUG_PRINT_ERROR(("Initialization of atomic number_of_window_jobs failed.\n"));
- mali_timeline_system_destroy(session->timeline_system);
- mali_soft_job_system_destroy(session->soft_job_system);
- mali_memory_session_end(session);
- mali_mmu_pagedir_free(session->page_directory);
- _mali_osk_notification_queue_term(session->ioctl_queue);
- _mali_osk_free(session);
- return _MALI_OSK_ERR_FAULT;
- }
+#if defined(CONFIG_MALI_DVFS)
+ _mali_osk_atomic_init(&session->number_of_window_jobs, 0);
#endif
session->use_high_priority_job_queue = MALI_FALSE;
_MALI_OSK_INIT_LIST_HEAD(&session->pp_job_fb_lookup_list[i]);
}
+ session->pid = _mali_osk_get_pid();
+ session->comm = _mali_osk_get_comm();
+ session->max_mali_mem_allocated = 0;
+ _mali_osk_memset(session->mali_mem_array, 0, sizeof(size_t) * MALI_MEM_TYPE_MAX);
*context = (void *)session;
/* Add session to the list of all sessions. */
mali_session_add(session);
- MALI_DEBUG_PRINT(2, ("Session started\n"));
- MALI_SUCCESS;
+ MALI_DEBUG_PRINT(3, ("Session started\n"));
+ return _MALI_OSK_ERR_OK;;
}
+#if defined(DEBUG)
+/* parameter used for debug */
+extern u32 num_pm_runtime_resume;
+extern u32 num_pm_updates;
+extern u32 num_pm_updates_up;
+extern u32 num_pm_updates_down;
+#endif
+
_mali_osk_errcode_t _mali_ukk_close(void **context)
{
struct mali_session_data *session;
/* Stop the soft job timer. */
mali_timeline_system_stop_timer(session->timeline_system);
- /* Abort queued and running GP and PP jobs. */
- mali_gp_scheduler_abort_session(session);
- mali_pp_scheduler_abort_session(session);
+ /* Abort queued jobs */
+ mali_scheduler_abort_session(session);
+
+ /* Abort executing jobs */
+ mali_executor_abort_session(session);
/* Abort the soft job system. */
mali_soft_job_system_abort(session->soft_job_system);
/* Free remaining memory allocated to this session */
mali_memory_session_end(session);
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
_mali_osk_atomic_term(&session->number_of_window_jobs);
#endif
*context = NULL;
- MALI_DEBUG_PRINT(2, ("Session has ended\n"));
+ MALI_DEBUG_PRINT(3, ("Session has ended\n"));
- MALI_SUCCESS;
+#if defined(DEBUG)
+ MALI_DEBUG_PRINT(3, ("Stats: # runtime resumes: %u\n", num_pm_runtime_resume));
+ MALI_DEBUG_PRINT(3, (" # PM updates: .... %u (up %u, down %u)\n", num_pm_updates, num_pm_updates_up, num_pm_updates_down));
+
+ num_pm_runtime_resume = 0;
+ num_pm_updates = 0;
+ num_pm_updates_up = 0;
+ num_pm_updates_down = 0;
+#endif
+
+ return _MALI_OSK_ERR_OK;;
}
#if MALI_STATE_TRACKING
{
int n = 0; /* Number of bytes written to buf */
- n += mali_gp_scheduler_dump_state(buf + n, size - n);
- n += mali_pp_scheduler_dump_state(buf + n, size - n);
+ n += mali_scheduler_dump_state(buf + n, size - n);
+ n += mali_executor_dump_state(buf + n, size - n);
return n;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_CORE_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_kernel_descriptor_mapping.h"
#include "mali_osk.h"
#include "mali_osk_bitops.h"
+#include "mali_memory_types.h"
+#include "mali_session.h"
#define MALI_PAD_INT(x) (((x) + (BITS_PER_LONG - 1)) & ~(BITS_PER_LONG - 1))
{
_mali_osk_errcode_t err = _MALI_OSK_ERR_FAULT;
int new_descriptor;
+ mali_mem_allocation *descriptor;
+ struct mali_session_data *session;
MALI_DEBUG_ASSERT_POINTER(map);
MALI_DEBUG_ASSERT_POINTER(odescriptor);
+ MALI_DEBUG_ASSERT_POINTER(target);
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
new_descriptor = _mali_osk_find_first_zero_bit(map->table->usage, map->current_nr_mappings);
_mali_osk_set_nonatomic_bit(new_descriptor, map->table->usage);
map->table->mappings[new_descriptor] = target;
*odescriptor = new_descriptor;
+
+ /* To calculate the mali mem usage for the session */
+ descriptor = (mali_mem_allocation *)target;
+ session = descriptor->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ session->mali_mem_array[descriptor->type] += descriptor->size;
+ if ((MALI_MEM_OS == descriptor->type || MALI_MEM_BLOCK == descriptor->type) &&
+ (session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK] > session->max_mali_mem_allocated)) {
+ session->max_mali_mem_allocated = session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK];
+ }
err = _MALI_OSK_ERR_OK;
unlock_and_exit:
void *mali_descriptor_mapping_free(mali_descriptor_mapping *map, int descriptor)
{
void *old_value = NULL;
+ mali_mem_allocation *tmp_descriptor;
+ struct mali_session_data *session;
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
if ((descriptor >= 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
map->table->mappings[descriptor] = NULL;
_mali_osk_clear_nonatomic_bit(descriptor, map->table->usage);
}
+ if (NULL != old_value) {
+ tmp_descriptor = (mali_mem_allocation *)old_value;
+ session = tmp_descriptor->session;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ MALI_DEBUG_ASSERT(session->mali_mem_array[tmp_descriptor->type] >= tmp_descriptor->size);
+
+ session->mali_mem_array[tmp_descriptor->type] -= tmp_descriptor->size;
+ }
_mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
return old_value;
static void descriptor_table_free(mali_descriptor_table *table)
{
_mali_osk_free(table);
-}
+}
\ No newline at end of file
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "mali_osk.h"
+struct mali_session_data;
+
/**
* The actual descriptor mapping table, never directly accessed by clients
*/
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_utilization.h"
#include "mali_session.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
+#include "mali_dvfs_policy.h"
+#include "mali_control_timer.h"
+
/* Thresholds for GP bound detection. */
#define MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD 240
#define MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD 250
-/* Define how often to calculate and report GPU utilization, in milliseconds */
-static _mali_osk_spinlock_irq_t *time_data_lock;
+static _mali_osk_spinlock_irq_t *utilization_data_lock;
-static u32 num_running_gp_cores;
-static u32 num_running_pp_cores;
+static u32 num_running_gp_cores = 0;
+static u32 num_running_pp_cores = 0;
static u64 work_start_time_gpu = 0;
static u64 work_start_time_gp = 0;
static u64 accumulated_work_time_gp = 0;
static u64 accumulated_work_time_pp = 0;
-static u64 period_start_time = 0;
-static _mali_osk_timer_t *utilization_timer = NULL;
-static mali_bool timer_running = MALI_FALSE;
-
static u32 last_utilization_gpu = 0 ;
static u32 last_utilization_gp = 0 ;
static u32 last_utilization_pp = 0 ;
-extern u32 mali_utilization_timeout;
void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data) = NULL;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
-extern void mali_power_performance_policy_callback(struct mali_gpu_utilization_data *data);
-#define NUMBER_OF_NANOSECONDS_PER_SECOND 1000000000ULL
-static u32 calculate_window_render_fps(u64 time_period)
-{
- u32 max_window_number;
- u64 tmp;
- u64 max = time_period;
- u32 leading_zeroes;
- u32 shift_val;
- u32 time_period_shift;
- u32 max_window_number_shift;
- u32 ret_val;
-
- max_window_number = mali_session_max_window_num();
- /* To avoid float division, extend the dividend to ns unit */
- tmp = (u64)max_window_number * NUMBER_OF_NANOSECONDS_PER_SECOND;
- if (tmp > time_period) {
- max = tmp;
- }
+/* Define the first timer control timer timeout in milliseconds */
+static u32 mali_control_first_timeout = 100;
+static struct mali_gpu_utilization_data mali_util_data = {0, };
- /*
- * We may have 64-bit values, a dividend or a divisor or both
- * To avoid dependencies to a 64-bit divider, we shift down the two values
- * equally first.
- */
- leading_zeroes = _mali_osk_clz((u32)(max >> 32));
- shift_val = 32 - leading_zeroes;
-
- time_period_shift = (u32)(time_period >> shift_val);
- max_window_number_shift = (u32)(tmp >> shift_val);
-
- ret_val = max_window_number_shift / time_period_shift;
-
- return ret_val;
-}
-#endif /* defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY) */
-
-static void calculate_gpu_utilization(void *arg)
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period)
{
u64 time_now;
- u64 time_period;
u32 leading_zeroes;
u32 shift_val;
u32 work_normalized_gpu;
u32 utilization_gpu;
u32 utilization_gp;
u32 utilization_pp;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- u32 window_render_fps;
-#endif
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
+
+ time_now = _mali_osk_time_get_ns();
+
+ *time_period = time_now - *start_time;
if (accumulated_work_time_gpu == 0 && work_start_time_gpu == 0) {
/*
* - No need to reschedule timer
* - Report zero usage
*/
- timer_running = MALI_FALSE;
-
last_utilization_gpu = 0;
last_utilization_gp = 0;
last_utilization_pp = 0;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_pp = last_utilization_pp;
- if (NULL != mali_utilization_callback) {
- struct mali_gpu_utilization_data data = { 0, };
- mali_utilization_callback(&data);
- }
+ mali_utilization_data_unlock();
- mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ /* Stop add timer until the next job submited */
+ mali_control_timer_suspend(MALI_FALSE);
- return;
- }
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
- time_now = _mali_osk_time_get_ns();
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
- time_period = time_now - period_start_time;
+ return &mali_util_data;
+ }
/* If we are currently busy, update working period up to now */
if (work_start_time_gpu != 0) {
*/
/* Shift the 64-bit values down so they fit inside a 32-bit integer */
- leading_zeroes = _mali_osk_clz((u32)(time_period >> 32));
+ leading_zeroes = _mali_osk_clz((u32)(*time_period >> 32));
shift_val = 32 - leading_zeroes;
work_normalized_gpu = (u32)(accumulated_work_time_gpu >> shift_val);
work_normalized_gp = (u32)(accumulated_work_time_gp >> shift_val);
work_normalized_pp = (u32)(accumulated_work_time_pp >> shift_val);
- period_normalized = (u32)(time_period >> shift_val);
+ period_normalized = (u32)(*time_period >> shift_val);
/*
* Now, we should report the usage in parts of 256
utilization_gp = work_normalized_gp / period_normalized;
utilization_pp = work_normalized_pp / period_normalized;
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- window_render_fps = calculate_window_render_fps(time_period);
-#endif
-
last_utilization_gpu = utilization_gpu;
last_utilization_gp = utilization_gp;
last_utilization_pp = utilization_pp;
if ((MALI_GP_BOUND_GP_UTILIZATION_THRESHOLD < last_utilization_gp) &&
(MALI_GP_BOUND_PP_UTILIZATION_THRESHOLD > last_utilization_pp)) {
- mali_scheduler_hint_enable(MALI_SCHEDULER_HINT_GP_BOUND);
+ mali_executor_hint_enable(MALI_EXECUTOR_HINT_GP_BOUND);
} else {
- mali_scheduler_hint_disable(MALI_SCHEDULER_HINT_GP_BOUND);
+ mali_executor_hint_disable(MALI_EXECUTOR_HINT_GP_BOUND);
}
/* starting a new period */
accumulated_work_time_gpu = 0;
accumulated_work_time_gp = 0;
accumulated_work_time_pp = 0;
- period_start_time = time_now;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ *start_time = time_now;
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ mali_util_data.utilization_gp = last_utilization_gp;
+ mali_util_data.utilization_gpu = last_utilization_gpu;
+ mali_util_data.utilization_pp = last_utilization_pp;
- if (NULL != mali_utilization_callback) {
- struct mali_gpu_utilization_data data = {
- utilization_gpu, utilization_gp, utilization_pp,
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- window_render_fps, window_render_fps
-#endif
- };
- mali_utilization_callback(&data);
- }
+ mali_utilization_data_unlock();
+
+ MALI_DEBUG_PRINT(4, ("last_utilization_gpu = %d \n", last_utilization_gpu));
+ MALI_DEBUG_PRINT(4, ("last_utilization_gp = %d \n", last_utilization_gp));
+ MALI_DEBUG_PRINT(4, ("last_utilization_pp = %d \n", last_utilization_pp));
+
+ return &mali_util_data;
}
_mali_osk_errcode_t mali_utilization_init(void)
_mali_osk_device_data data;
if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
- /* Use device specific settings (if defined) */
- if (0 != data.utilization_interval) {
- mali_utilization_timeout = data.utilization_interval;
- }
if (NULL != data.utilization_callback) {
mali_utilization_callback = data.utilization_callback;
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Platform has it's own policy \n"));
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed with interval %u\n", mali_utilization_timeout));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: Utilization handler installed \n"));
}
}
-#endif
-
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- if (mali_utilization_callback == NULL) {
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: MALI Power Performance Policy Algorithm \n"));
- mali_utilization_callback = mali_power_performance_policy_callback;
- }
-#endif
+#endif /* defined(USING_GPU_UTILIZATION) */
if (NULL == mali_utilization_callback) {
- MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No utilization handler installed\n"));
+ MALI_DEBUG_PRINT(2, ("Mali GPU Utilization: No platform utilization handler installed\n"));
}
- time_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
-
- if (NULL == time_data_lock) {
+ utilization_data_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_UTILIZATION);
+ if (NULL == utilization_data_lock) {
return _MALI_OSK_ERR_FAULT;
}
num_running_gp_cores = 0;
num_running_pp_cores = 0;
- utilization_timer = _mali_osk_timer_init();
- if (NULL == utilization_timer) {
- _mali_osk_spinlock_irq_term(time_data_lock);
- return _MALI_OSK_ERR_FAULT;
- }
- _mali_osk_timer_setcallback(utilization_timer, calculate_gpu_utilization, NULL);
-
return _MALI_OSK_ERR_OK;
}
-void mali_utilization_suspend(void)
-{
- _mali_osk_spinlock_irq_lock(time_data_lock);
-
- if (timer_running == MALI_TRUE) {
- timer_running = MALI_FALSE;
- _mali_osk_spinlock_irq_unlock(time_data_lock);
- _mali_osk_timer_del(utilization_timer);
- return;
- }
-
- _mali_osk_spinlock_irq_unlock(time_data_lock);
-}
-
void mali_utilization_term(void)
{
- if (NULL != utilization_timer) {
- _mali_osk_timer_del(utilization_timer);
- timer_running = MALI_FALSE;
- _mali_osk_timer_term(utilization_timer);
- utilization_timer = NULL;
+ if (NULL != utilization_data_lock) {
+ _mali_osk_spinlock_irq_term(utilization_data_lock);
}
-
- _mali_osk_spinlock_irq_term(time_data_lock);
}
void mali_utilization_gp_start(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
++num_running_gp_cores;
if (1 == num_running_gp_cores) {
work_start_time_gp = time_now;
if (0 == num_running_pp_cores) {
+ mali_bool is_resume = MALI_FALSE;
/*
* There are no PP cores running, so this is also the point
* at which we consider the GPU to be busy as well.
*/
work_start_time_gpu = time_now;
- }
-
- /* Start a new period (and timer) if needed */
- if (timer_running != MALI_TRUE) {
- timer_running = MALI_TRUE;
- period_start_time = time_now;
- /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- mali_session_max_window_num();
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+ /* Do some policy in new period for performance consideration */
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
#endif
- _mali_osk_spinlock_irq_unlock(time_data_lock);
-
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
} else {
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
+
} else {
/* Nothing to do */
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
}
void mali_utilization_pp_start(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
++num_running_pp_cores;
if (1 == num_running_pp_cores) {
work_start_time_pp = time_now;
if (0 == num_running_gp_cores) {
+ mali_bool is_resume = MALI_FALSE;
/*
* There are no GP cores running, so this is also the point
* at which we consider the GPU to be busy as well.
*/
work_start_time_gpu = time_now;
- }
-
- /* Start a new period (and timer) if needed */
- if (timer_running != MALI_TRUE) {
- timer_running = MALI_TRUE;
- period_start_time = time_now;
- /* Clear session->number_of_window_jobs */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- mali_session_max_window_num();
+ /* Start a new period if stoped */
+ is_resume = mali_control_timer_resume(time_now);
+
+ mali_utilization_data_unlock();
+
+ if (is_resume) {
+#if defined(CONFIG_MALI_DVFS)
+ /* Clear session->number_of_window_jobs, prepare parameter for dvfs */
+ mali_session_max_window_num();
+ if (0 == last_utilization_gpu) {
+ /*
+ * for mali_dev_pause is called in set clock,
+ * so each time we change clock, we will set clock to
+ * highest step even if under down clock case,
+ * it is not nessesary, so we only set the clock under
+ * last time utilization equal 0, we stop the timer then
+ * start the GPU again case
+ */
+ mali_dvfs_policy_new_period();
+ }
#endif
- _mali_osk_spinlock_irq_unlock(time_data_lock);
- _mali_osk_timer_add(utilization_timer, _mali_osk_time_mstoticks(mali_utilization_timeout));
+ /*
+ * First timeout using short interval for power consideration
+ * because we give full power in the new period, but if the
+ * job loading is light, finish in 10ms, the other time all keep
+ * in high freq it will wast time.
+ */
+ mali_control_timer_add(mali_control_first_timeout);
+ }
} else {
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
} else {
/* Nothing to do */
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
}
void mali_utilization_gp_end(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
--num_running_gp_cores;
if (0 == num_running_gp_cores) {
}
}
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
}
void mali_utilization_pp_end(void)
{
- _mali_osk_spinlock_irq_lock(time_data_lock);
+ mali_utilization_data_lock();
--num_running_pp_cores;
if (0 == num_running_pp_cores) {
}
}
- _mali_osk_spinlock_irq_unlock(time_data_lock);
+ mali_utilization_data_unlock();
+}
+
+mali_bool mali_utilization_enabled(void)
+{
+#if defined(CONFIG_MALI_DVFS)
+ return mali_dvfs_policy_enabled();
+#else
+ return (NULL != mali_utilization_callback);
+#endif /* defined(CONFIG_MALI_DVFS) */
+}
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data)
+{
+ MALI_DEBUG_ASSERT_POINTER(mali_utilization_callback);
+
+ mali_utilization_callback(util_data);
+}
+
+void mali_utilization_reset(void)
+{
+ accumulated_work_time_gpu = 0;
+ accumulated_work_time_gp = 0;
+ accumulated_work_time_pp = 0;
+
+ last_utilization_gpu = 0;
+ last_utilization_gp = 0;
+ last_utilization_pp = 0;
+}
+
+void mali_utilization_data_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(utilization_data_lock);
+}
+
+void mali_utilization_data_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(utilization_data_lock);
}
u32 _mali_ukk_utilization_gp_pp(void)
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_UTILIZATION_H__
#include <linux/mali/mali_utgard.h>
#include "mali_osk.h"
-extern void (*mali_utilization_callback)(struct mali_gpu_utilization_data *data);
-
/**
* Initialize/start the Mali GPU utilization metrics reporting.
*
/**
* Check if Mali utilization is enabled
*/
-MALI_STATIC_INLINE mali_bool mali_utilization_enabled(void)
-{
- return (NULL != mali_utilization_callback);
-}
+mali_bool mali_utilization_enabled(void);
/**
* Should be called when a job is about to execute a GP job
void mali_utilization_pp_end(void);
/**
- * Should be called to stop the utilization timer during system suspend
+ * Should be called to calcution the GPU utilization
*/
-void mali_utilization_suspend(void);
+struct mali_gpu_utilization_data *mali_utilization_calculate(u64 *start_time, u64 *time_period);
+
+_mali_osk_spinlock_irq_t *mali_utilization_get_lock(void);
+
+void mali_utilization_platform_realize(struct mali_gpu_utilization_data *util_data);
+
+void mali_utilization_data_lock(void);
+
+void mali_utilization_data_unlock(void);
+
+void mali_utilization_reset(void);
#endif /* __MALI_KERNEL_UTILIZATION_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_ukk.h"
-#if defined(CONFIG_MALI400_PROFILING)
#include "mali_osk_profiling.h"
-#endif
_mali_osk_errcode_t _mali_ukk_vsync_event_report(_mali_uk_vsync_event_report_s *args)
{
_mali_uk_vsync_event event = (_mali_uk_vsync_event)args->event;
MALI_IGNORE(event); /* event is not used for release code, and that is OK */
-#if defined(CONFIG_MALI400_PROFILING)
/*
* Manually generate user space events in kernel space.
* This saves user space from calling kernel space twice in this case.
}
if (event == _MALI_UK_VSYNC_EVENT_END_WAIT) {
-
_mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_RESUME |
MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_VSYNC,
_mali_osk_get_pid(), _mali_osk_get_tid(), 0, 0, 0);
}
-#endif
+
MALI_DEBUG_PRINT(4, ("Received VSYNC event: %d\n", event));
MALI_SUCCESS;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_l2_cache.h"
#include "mali_hw_core.h"
#include "mali_scheduler.h"
+#include "mali_pm.h"
#include "mali_pm_domain.h"
/**
MALI400_L2_CACHE_REGISTER_SIZE = 0x0004,
MALI400_L2_CACHE_REGISTER_STATUS = 0x0008,
/*unused = 0x000C */
- MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010, /**< Misc cache commands, e.g. clear */
+ MALI400_L2_CACHE_REGISTER_COMMAND = 0x0010,
MALI400_L2_CACHE_REGISTER_CLEAR_PAGE = 0x0014,
- MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018, /**< Limit of outstanding read requests */
- MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C, /**< Enable misc cache features */
+ MALI400_L2_CACHE_REGISTER_MAX_READS = 0x0018,
+ MALI400_L2_CACHE_REGISTER_ENABLE = 0x001C,
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0 = 0x0020,
MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0 = 0x0024,
MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1 = 0x0028,
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_command {
- MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01, /**< Clear the entire cache */
- /* Read HW TRM carefully before adding/using other commands than the clear above */
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL = 0x01,
} mali_l2_cache_command;
/**
* These are the commands that can be sent to the Mali L2 cache unit
*/
typedef enum mali_l2_cache_enable {
- MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /**< Default state of enable register */
- MALI400_L2_CACHE_ENABLE_ACCESS = 0x01, /**< Permit cacheable accesses */
- MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02, /**< Permit cache read allocate */
+ MALI400_L2_CACHE_ENABLE_DEFAULT = 0x0, /* Default */
+ MALI400_L2_CACHE_ENABLE_ACCESS = 0x01,
+ MALI400_L2_CACHE_ENABLE_READ_ALLOCATE = 0x02,
} mali_l2_cache_enable;
/**
* Mali L2 cache status bits
*/
typedef enum mali_l2_cache_status {
- MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01, /**< Command handler of L2 cache is busy */
- MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02, /**< L2 cache is busy handling data requests */
+ MALI400_L2_CACHE_STATUS_COMMAND_BUSY = 0x01,
+ MALI400_L2_CACHE_STATUS_DATA_BUSY = 0x02,
} mali_l2_cache_status;
-#define MALI400_L2_MAX_READS_DEFAULT 0x1C
+#define MALI400_L2_MAX_READS_NOT_SET -1
-static struct mali_l2_cache_core *mali_global_l2_cache_cores[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
-static u32 mali_global_num_l2_cache_cores = 0;
+static struct mali_l2_cache_core *
+ mali_global_l2s[MALI_MAX_NUMBER_OF_L2_CACHE_CORES] = { NULL, };
+static u32 mali_global_num_l2s = 0;
-int mali_l2_max_reads = MALI400_L2_MAX_READS_DEFAULT;
+int mali_l2_max_reads = MALI400_L2_MAX_READS_NOT_SET;
/* Local helper functions */
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val);
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
-static void mali_l2_cache_counter_lock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(cache->counter_lock);
-#else
- _mali_osk_spinlock_lock(cache->counter_lock);
-#endif
-}
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val);
-static void mali_l2_cache_counter_unlock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_lock(struct mali_l2_cache_core *cache)
{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(cache->counter_lock);
-#else
- _mali_osk_spinlock_unlock(cache->counter_lock);
-#endif
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_lock(cache->lock);
}
-static void mali_l2_cache_command_lock(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_unlock(struct mali_l2_cache_core *cache)
{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_lock(cache->command_lock);
-#else
- _mali_osk_spinlock_lock(cache->command_lock);
-#endif
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ _mali_osk_spinlock_irq_unlock(cache->lock);
}
-static void mali_l2_cache_command_unlock(struct mali_l2_cache_core *cache)
-{
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_unlock(cache->command_lock);
-#else
- _mali_osk_spinlock_unlock(cache->command_lock);
-#endif
-}
+/* Implementation of the L2 cache interface */
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource)
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index)
{
struct mali_l2_cache_core *cache = NULL;
+#if defined(DEBUG)
+ u32 cache_size;
+#endif
- MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n", resource->description));
+ MALI_DEBUG_PRINT(4, ("Mali L2 cache: Creating Mali L2 cache: %s\n",
+ resource->description));
- if (mali_global_num_l2_cache_cores >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
- MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 cache core objects created\n"));
+ if (mali_global_num_l2s >= MALI_MAX_NUMBER_OF_L2_CACHE_CORES) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Too many L2 caches\n"));
return NULL;
}
cache = _mali_osk_malloc(sizeof(struct mali_l2_cache_core));
- if (NULL != cache) {
- cache->core_id = mali_global_num_l2_cache_cores;
- cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
- cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
- cache->pm_domain = NULL;
- cache->mali_l2_status = MALI_L2_NORMAL;
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&cache->hw_core, resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
- MALI_DEBUG_CODE(u32 cache_size = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_SIZE));
- MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
- resource->description,
- 1 << (((cache_size >> 16) & 0xff) - 10),
- 1 << ((cache_size >> 8) & 0xff),
- 1 << (cache_size & 0xff),
- 1 << ((cache_size >> 24) & 0xff)));
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
- cache->command_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
- cache->command_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
- if (NULL != cache->command_lock) {
-#ifdef MALI_UPPER_HALF_SCHEDULING
- cache->counter_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#else
- cache->counter_lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_L2_COMMAND);
-#endif
- if (NULL != cache->counter_lock) {
- mali_l2_cache_reset(cache);
-
- cache->last_invalidated_id = 0;
-
- mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = cache;
- mali_global_num_l2_cache_cores++;
-
- return cache;
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n", cache->hw_core.description));
- }
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
- _mali_osk_spinlock_term(cache->command_lock);
-#endif
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to create command lock for L2 cache core %s\n", cache->hw_core.description));
- }
+ if (NULL == cache) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
+ }
- mali_hw_core_delete(&cache->hw_core);
- }
+ cache->core_id = mali_global_num_l2s;
+ cache->counter_src0 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_src1 = MALI_HW_CORE_NO_COUNTER;
+ cache->counter_value0_base = 0;
+ cache->counter_value1_base = 0;
+ cache->pm_domain = NULL;
+ cache->power_is_on = MALI_FALSE;
+ cache->last_invalidated_id = 0;
+
+ if (_MALI_OSK_ERR_OK != mali_hw_core_create(&cache->hw_core,
+ resource, MALI400_L2_CACHE_REGISTERS_SIZE)) {
+ _mali_osk_free(cache);
+ return NULL;
+ }
+
+#if defined(DEBUG)
+ cache_size = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_SIZE);
+ MALI_DEBUG_PRINT(2, ("Mali L2 cache: Created %s: % 3uK, %u-way, % 2ubyte cache line, % 3ubit external bus\n",
+ resource->description,
+ 1 << (((cache_size >> 16) & 0xff) - 10),
+ 1 << ((cache_size >> 8) & 0xff),
+ 1 << (cache_size & 0xff),
+ 1 << ((cache_size >> 24) & 0xff)));
+#endif
+ cache->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_L2);
+ if (NULL == cache->lock) {
+ MALI_PRINT_ERROR(("Mali L2 cache: Failed to create counter lock for L2 cache core %s\n",
+ cache->hw_core.description));
+ mali_hw_core_delete(&cache->hw_core);
_mali_osk_free(cache);
- } else {
- MALI_PRINT_ERROR(("Mali L2 cache: Failed to allocate memory for L2 cache core\n"));
+ return NULL;
}
- return NULL;
+ /* register with correct power domain */
+ cache->pm_domain = mali_pm_register_l2_cache(
+ domain_index, cache);
+
+ mali_global_l2s[mali_global_num_l2s] = cache;
+ mali_global_num_l2s++;
+
+ return cache;
}
void mali_l2_cache_delete(struct mali_l2_cache_core *cache)
{
u32 i;
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ if (mali_global_l2s[i] != cache) {
+ continue;
+ }
- /* reset to defaults */
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)MALI400_L2_MAX_READS_DEFAULT);
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_DEFAULT);
-
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_term(cache->counter_lock);
- _mali_osk_spinlock_irq_term(cache->command_lock);
-#else
- _mali_osk_spinlock_term(cache->command_lock);
- _mali_osk_spinlock_term(cache->counter_lock);
-#endif
-
- mali_hw_core_delete(&cache->hw_core);
-
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- if (mali_global_l2_cache_cores[i] == cache) {
- mali_global_l2_cache_cores[i] = NULL;
- mali_global_num_l2_cache_cores--;
-
- if (i != mali_global_num_l2_cache_cores) {
- /* We removed a l2 cache from the middle of the array -- move the last
- * l2 cache to the current position to close the gap */
- mali_global_l2_cache_cores[i] = mali_global_l2_cache_cores[mali_global_num_l2_cache_cores];
- mali_global_l2_cache_cores[mali_global_num_l2_cache_cores] = NULL;
- }
+ mali_global_l2s[i] = NULL;
+ mali_global_num_l2s--;
+ if (i == mali_global_num_l2s) {
+ /* Removed last element, nothing more to do */
break;
}
+
+ /*
+ * We removed a l2 cache from the middle of the array,
+ * so move the last l2 cache to current position
+ */
+ mali_global_l2s[i] = mali_global_l2s[mali_global_num_l2s];
+ mali_global_l2s[mali_global_num_l2s] = NULL;
+
+ /* All good */
+ break;
}
+ _mali_osk_spinlock_irq_term(cache->lock);
+ mali_hw_core_delete(&cache->hw_core);
_mali_osk_free(cache);
}
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache)
{
- return cache->core_id;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ mali_l2_cache_reset(cache);
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == cache->power_is_on);
+ cache->power_is_on = MALI_TRUE;
+
+ mali_l2_cache_unlock(cache);
}
-static void mali_l2_cache_core_set_counter_internal(struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache)
{
- u32 value = 0; /* disabled src */
- u32 reg_offset = 0;
- mali_bool core_is_on;
-
MALI_DEBUG_ASSERT_POINTER(cache);
- core_is_on = mali_l2_cache_lock_power_state(cache);
-
- mali_l2_cache_counter_lock(cache);
+ mali_l2_cache_lock(cache);
- switch (source_id) {
- case 0:
- cache->counter_src0 = counter;
- reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
- break;
+ MALI_DEBUG_ASSERT(MALI_TRUE == cache->power_is_on);
- case 1:
- cache->counter_src1 = counter;
- reg_offset = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
- break;
+ /*
+ * The HW counters will start from zero again when we resume,
+ * but we should report counters as always increasing.
+ * Take a copy of the HW values now in order to add this to
+ * the values we report after being powered up.
+ *
+ * The physical power off of the L2 cache might be outside our
+ * own control (e.g. runtime PM). That is why we must manually
+ * set set the counter value to zero as well.
+ */
- default:
- MALI_DEBUG_ASSERT(0);
- break;
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value0_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0, 0);
}
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
- return;
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ cache->counter_value1_base += mali_hw_core_register_read(
+ &cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1, 0);
}
- if (MALI_HW_CORE_NO_COUNTER != counter) {
- value = counter;
- }
- if (MALI_TRUE == core_is_on) {
- mali_hw_core_register_write(&cache->hw_core, reg_offset, value);
- }
+ cache->power_is_on = MALI_FALSE;
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter)
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter)
{
- mali_l2_cache_core_set_counter_internal(cache, 0, counter);
-}
+ u32 reg_offset_src;
+ u32 reg_offset_val;
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter)
-{
- mali_l2_cache_core_set_counter_internal(cache, 1, counter);
-}
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT(source_id >= 0 && source_id <= 1);
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache)
-{
- return cache->counter_src0;
-}
+ mali_l2_cache_lock(cache);
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache)
-{
- return cache->counter_src1;
+ if (0 == source_id) {
+ /* start counting from 0 */
+ cache->counter_value0_base = 0;
+ cache->counter_src0 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0;
+ } else {
+ /* start counting from 0 */
+ cache->counter_value1_base = 0;
+ cache->counter_src1 = counter;
+ reg_offset_src = MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1;
+ reg_offset_val = MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1;
+ }
+
+ if (cache->power_is_on) {
+ u32 hw_src;
+
+ if (MALI_HW_CORE_NO_COUNTER != counter) {
+ hw_src = counter;
+ } else {
+ hw_src = 0; /* disable value for HW */
+ }
+
+ /* Set counter src */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_src, hw_src);
+
+ /* Make sure the HW starts counting from 0 again */
+ mali_hw_core_register_write(&cache->hw_core,
+ reg_offset_val, 0);
+ }
+
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1)
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1)
{
+ MALI_DEBUG_ASSERT_POINTER(cache);
MALI_DEBUG_ASSERT(NULL != src0);
MALI_DEBUG_ASSERT(NULL != value0);
MALI_DEBUG_ASSERT(NULL != src1);
MALI_DEBUG_ASSERT(NULL != value1);
- /* Caller must hold the PM lock and know that we are powered on */
-
- mali_l2_cache_counter_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
-
- return;
- }
+ mali_l2_cache_lock(cache);
*src0 = cache->counter_src0;
*src1 = cache->counter_src1;
if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
- *value0 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
- }
+ if (MALI_TRUE == cache->power_is_on) {
+ *value0 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL0);
+ } else {
+ *value0 = 0;
+ }
- if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
- *value1 = mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ /* Add base offset value (in case we have been power off) */
+ *value0 += cache->counter_value0_base;
}
- mali_l2_cache_counter_unlock(cache);
-}
-
-static void mali_l2_cache_reset_counters_all(void)
-{
- int i;
- u32 value;
- struct mali_l2_cache_core *cache;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
-
- for (i = 0; i < num_cores; i++) {
- cache = mali_l2_cache_core_get_glob_l2_core(i);
- if (!cache)
- continue;
-
- if (mali_l2_cache_lock_power_state(cache)) {
- mali_l2_cache_counter_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
- mali_l2_cache_unlock_power_state(cache);
- return;
- }
-
- /* Reset performance counters */
- if (MALI_HW_CORE_NO_COUNTER == cache->counter_src0) {
- value = 0;
- } else {
- value = cache->counter_src0;
- }
- mali_hw_core_register_write(&cache->hw_core,
- MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, value);
-
- if (MALI_HW_CORE_NO_COUNTER == cache->counter_src1) {
- value = 0;
- } else {
- value = cache->counter_src1;
- }
- mali_hw_core_register_write(&cache->hw_core,
- MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, value);
-
- mali_l2_cache_counter_unlock(cache);
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ if (MALI_TRUE == cache->power_is_on) {
+ *value1 = mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_VAL1);
+ } else {
+ *value1 = 0;
}
- mali_l2_cache_unlock_power_state(cache);
+ /* Add base offset value (in case we have been power off) */
+ *value1 += cache->counter_value1_base;
}
-}
+ mali_l2_cache_unlock(cache);
+}
struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index)
{
- if (mali_global_num_l2_cache_cores > index) {
- return mali_global_l2_cache_cores[index];
+ if (mali_global_num_l2s > index) {
+ return mali_global_l2s[index];
}
return NULL;
u32 mali_l2_cache_core_get_glob_num_l2_cores(void)
{
- return mali_global_num_l2_cache_cores;
+ return mali_global_num_l2s;
}
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
+void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
{
- /* Invalidate cache (just to keep it in a known state at startup) */
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
-
- mali_l2_cache_counter_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_counter_unlock(cache);
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ if (NULL == cache) {
return;
}
- /* Enable cache */
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_ENABLE, (u32)MALI400_L2_CACHE_ENABLE_ACCESS | (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_MAX_READS, (u32)mali_l2_max_reads);
-
- /* Restart any performance counters (if enabled) */
- if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0, cache->counter_src0);
- }
-
- if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
- mali_hw_core_register_write(&cache->hw_core, MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1, cache->counter_src1);
- }
-
- mali_l2_cache_counter_unlock(cache);
-}
+ mali_l2_cache_lock(cache);
-void mali_l2_cache_reset_all(void)
-{
- int i;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
+ cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
- for (i = 0; i < num_cores; i++) {
- mali_l2_cache_reset(mali_l2_cache_core_get_glob_l2_core(i));
- }
+ mali_l2_cache_unlock(cache);
}
-void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache)
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id)
{
MALI_DEBUG_ASSERT_POINTER(cache);
- if (NULL != cache) {
- cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (NULL == cache) {
+ return;
}
-}
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id)
-{
- MALI_DEBUG_ASSERT_POINTER(cache);
+ /*
+ * If the last cache invalidation was done by a job with a higher id we
+ * don't have to flush. Since user space will store jobs w/ their
+ * corresponding memory in sequence (first job #0, then job #1, ...),
+ * we don't have to flush for job n-1 if job n has already invalidated
+ * the cache since we know for sure that job n-1's memory was already
+ * written when job n was started.
+ */
- if (NULL != cache) {
- /* If the last cache invalidation was done by a job with a higher id we
- * don't have to flush. Since user space will store jobs w/ their
- * corresponding memory in sequence (first job #0, then job #1, ...),
- * we don't have to flush for job n-1 if job n has already invalidated
- * the cache since we know for sure that job n-1's memory was already
- * written when job n was started. */
- if (((s32)id) <= ((s32)cache->last_invalidated_id)) {
- return MALI_FALSE;
- } else {
- cache->last_invalidated_id = mali_scheduler_get_new_cache_order();
- }
+ mali_l2_cache_lock(cache);
- mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (((s32)id) > ((s32)cache->last_invalidated_id)) {
+ /* Set latest invalidated id to current "point in time" */
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+ mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
}
- return MALI_TRUE;
+
+ mali_l2_cache_unlock(cache);
}
void mali_l2_cache_invalidate_all(void)
{
u32 i;
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- /*additional check*/
- if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
- _mali_osk_errcode_t ret;
- mali_global_l2_cache_cores[i]->last_invalidated_id = mali_scheduler_get_new_cache_order();
- ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_COMMAND, MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
- if (_MALI_OSK_ERR_OK != ret) {
- MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
- }
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ _mali_osk_errcode_t ret;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ cache->last_invalidated_id =
+ mali_scheduler_get_new_cache_order();
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache\n"));
}
- mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+
+ mali_l2_cache_unlock(cache);
}
}
void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages)
{
u32 i;
- for (i = 0; i < mali_global_num_l2_cache_cores; i++) {
- /*additional check*/
- if (MALI_TRUE == mali_l2_cache_lock_power_state(mali_global_l2_cache_cores[i])) {
- u32 j;
- for (j = 0; j < num_pages; j++) {
- _mali_osk_errcode_t ret;
- ret = mali_l2_cache_send_command(mali_global_l2_cache_cores[i], MALI400_L2_CACHE_REGISTER_CLEAR_PAGE, pages[j]);
- if (_MALI_OSK_ERR_OK != ret) {
- MALI_PRINT_ERROR(("Failed to invalidate page cache\n"));
- }
+ for (i = 0; i < mali_global_num_l2s; i++) {
+ struct mali_l2_cache_core *cache = mali_global_l2s[i];
+ u32 j;
+
+ MALI_DEBUG_ASSERT_POINTER(cache);
+
+ mali_l2_cache_lock(cache);
+
+ if (MALI_TRUE != cache->power_is_on) {
+ mali_l2_cache_unlock(cache);
+ continue;
+ }
+
+ for (j = 0; j < num_pages; j++) {
+ _mali_osk_errcode_t ret;
+
+ ret = mali_l2_cache_send_command(cache,
+ MALI400_L2_CACHE_REGISTER_CLEAR_PAGE,
+ pages[j]);
+ if (_MALI_OSK_ERR_OK != ret) {
+ MALI_PRINT_ERROR(("Failed to invalidate cache (page)\n"));
}
}
- mali_l2_cache_unlock_power_state(mali_global_l2_cache_cores[i]);
+
+ mali_l2_cache_unlock(cache);
}
}
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache)
-{
- return mali_pm_domain_lock_state(cache->pm_domain);
-}
+/* -------- local helper functions below -------- */
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache)
+static void mali_l2_cache_reset(struct mali_l2_cache_core *cache)
{
- return mali_pm_domain_unlock_state(cache->pm_domain);
-}
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
-/* -------- local helper functions below -------- */
+ /* Invalidate cache (just to keep it in a known state at startup) */
+ mali_l2_cache_send_command(cache, MALI400_L2_CACHE_REGISTER_COMMAND,
+ MALI400_L2_CACHE_COMMAND_CLEAR_ALL);
+
+ /* Enable cache */
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_ENABLE,
+ (u32)MALI400_L2_CACHE_ENABLE_ACCESS |
+ (u32)MALI400_L2_CACHE_ENABLE_READ_ALLOCATE);
+
+ if (MALI400_L2_MAX_READS_NOT_SET != mali_l2_max_reads) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_MAX_READS,
+ (u32)mali_l2_max_reads);
+ }
+
+ /* Restart any performance counters (if enabled) */
+ if (cache->counter_src0 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC0,
+ cache->counter_src0);
+ }
+
+ if (cache->counter_src1 != MALI_HW_CORE_NO_COUNTER) {
+ mali_hw_core_register_write(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_PERFCNT_SRC1,
+ cache->counter_src1);
+ }
+}
-static _mali_osk_errcode_t mali_l2_cache_send_command(struct mali_l2_cache_core *cache, u32 reg, u32 val)
+static _mali_osk_errcode_t mali_l2_cache_send_command(
+ struct mali_l2_cache_core *cache, u32 reg, u32 val)
{
int i = 0;
const int loop_count = 100000;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ MALI_DEBUG_ASSERT_LOCK_HELD(cache->lock);
+
/*
- * Grab lock in order to send commands to the L2 cache in a serialized fashion.
- * The L2 cache will ignore commands if it is busy.
+ * First, wait for L2 cache command handler to go idle.
+ * (Commands received while processing another command will be ignored)
*/
- mali_l2_cache_command_lock(cache);
-
- if (MALI_L2_PAUSE == cache->mali_l2_status) {
- mali_l2_cache_command_unlock(cache);
- MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for L2 come back\n"));
-
- MALI_ERROR(_MALI_OSK_ERR_BUSY);
- }
-
- /* First, wait for L2 cache command handler to go idle */
-
for (i = 0; i < loop_count; i++) {
- if (!(mali_hw_core_register_read(&cache->hw_core, MALI400_L2_CACHE_REGISTER_STATUS) & (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
+ if (!(mali_hw_core_register_read(&cache->hw_core,
+ MALI400_L2_CACHE_REGISTER_STATUS) &
+ (u32)MALI400_L2_CACHE_STATUS_COMMAND_BUSY)) {
break;
}
}
if (i == loop_count) {
- mali_l2_cache_command_unlock(cache);
MALI_DEBUG_PRINT(1, ("Mali L2 cache: aborting wait for command interface to go idle\n"));
- MALI_ERROR(_MALI_OSK_ERR_FAULT);
+ return _MALI_OSK_ERR_FAULT;
}
/* then issue the command */
mali_hw_core_register_write(&cache->hw_core, reg, val);
- mali_l2_cache_command_unlock(cache);
-
- MALI_SUCCESS;
-}
-
-void mali_l2_cache_pause_all(mali_bool pause)
-{
- int i;
- struct mali_l2_cache_core *cache;
- u32 num_cores = mali_l2_cache_core_get_glob_num_l2_cores();
- mali_l2_power_status status = MALI_L2_NORMAL;
-
- if (pause) {
- status = MALI_L2_PAUSE;
- }
-
- for (i = 0; i < num_cores; i++) {
- cache = mali_l2_cache_core_get_glob_l2_core(i);
- if (NULL != cache) {
- cache->mali_l2_status = status;
-
- /* Take and release the counter and command locks to
- * ensure there are no active threads that didn't get
- * the status flag update.
- *
- * The locks will also ensure the necessary memory
- * barriers are done on SMP systems.
- */
- mali_l2_cache_counter_lock(cache);
- mali_l2_cache_counter_unlock(cache);
-
- mali_l2_cache_command_lock(cache);
- mali_l2_cache_command_unlock(cache);
- }
- }
-
- /* Resume from pause: do the cache invalidation here to prevent any
- * loss of cache operation during the pause period to make sure the SW
- * status is consistent with L2 cache status.
- */
- if (!pause) {
- mali_l2_cache_invalidate_all();
- mali_l2_cache_reset_counters_all();
- }
+ return _MALI_OSK_ERR_OK;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_L2_CACHE_H__
#include "mali_hw_core.h"
#define MALI_MAX_NUMBER_OF_L2_CACHE_CORES 3
-/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 Quad-core) */
+/* Maximum 1 GP and 4 PP for an L2 cache core (Mali-400 MP4) */
#define MALI_MAX_NUMBER_OF_GROUPS_PER_L2_CACHE 5
-struct mali_group;
-struct mali_pm_domain;
-
-/* Flags describing state of the L2 */
-typedef enum mali_l2_power_status {
- MALI_L2_NORMAL, /**< L2 is in normal state and operational */
- MALI_L2_PAUSE, /**< L2 may not be accessed and may be powered off */
-} mali_l2_power_status;
-
/**
* Definition of the L2 cache core struct
* Used to track a L2 cache unit in the system.
* Contains information about the mapping of the registers
*/
struct mali_l2_cache_core {
- struct mali_hw_core hw_core; /**< Common for all HW cores */
- u32 core_id; /**< Unique core ID */
-#ifdef MALI_UPPER_HALF_SCHEDULING
- _mali_osk_spinlock_irq_t *command_lock; /**< Serialize all L2 cache commands */
- _mali_osk_spinlock_irq_t *counter_lock; /**< Synchronize L2 cache counter access */
-#else
- _mali_osk_spinlock_t *command_lock;
- _mali_osk_spinlock_t *counter_lock;
-#endif
- u32 counter_src0; /**< Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
- u32 counter_src1; /**< Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
- u32 last_invalidated_id;
+ /* Common HW core functionality */
+ struct mali_hw_core hw_core;
+
+ /* Synchronize L2 cache access */
+ _mali_osk_spinlock_irq_t *lock;
+
+ /* Unique core ID */
+ u32 core_id;
+
+ /* The power domain this L2 cache belongs to */
struct mali_pm_domain *pm_domain;
- mali_l2_power_status mali_l2_status; /**< Indicate whether the L2 is paused or not */
+
+ /* MALI_TRUE if power is on for this L2 cache */
+ mali_bool power_is_on;
+
+ /* A "timestamp" to avoid unnecessary flushes */
+ u32 last_invalidated_id;
+
+ /* Performance counter 0, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src0;
+
+ /* Performance counter 1, MALI_HW_CORE_NO_COUNTER for disabled */
+ u32 counter_src1;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value0_base;
+
+ /*
+ * Performance counter 0 value base/offset
+ * (allows accumulative reporting even after power off)
+ */
+ u32 counter_value1_base;
+
+ /* Used by PM domains to link L2 caches of same domain */
+ _mali_osk_list_t pm_domain_list;
};
_mali_osk_errcode_t mali_l2_cache_initialize(void);
void mali_l2_cache_terminate(void);
-/**
- * L2 pause is just a status that the L2 can't be accessed temporarily.
-*/
-void mali_l2_cache_pause_all(mali_bool pause);
-struct mali_l2_cache_core *mali_l2_cache_create(_mali_osk_resource_t *resource);
+
+struct mali_l2_cache_core *mali_l2_cache_create(
+ _mali_osk_resource_t *resource, u32 domain_index);
void mali_l2_cache_delete(struct mali_l2_cache_core *cache);
-MALI_STATIC_INLINE void mali_l2_cache_set_pm_domain(struct mali_l2_cache_core *cache, struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache)
{
- cache->pm_domain = domain;
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->core_id;
}
-u32 mali_l2_cache_get_id(struct mali_l2_cache_core *cache);
+MALI_STATIC_INLINE struct mali_pm_domain *mali_l2_cache_get_pm_domain(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->pm_domain;
+}
+
+void mali_l2_cache_power_up(struct mali_l2_cache_core *cache);
+void mali_l2_cache_power_down(struct mali_l2_cache_core *cache);
+
+void mali_l2_cache_core_set_counter_src(
+ struct mali_l2_cache_core *cache, u32 source_id, u32 counter);
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src0(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src0;
+}
+
+MALI_STATIC_INLINE u32 mali_l2_cache_core_get_counter_src1(
+ struct mali_l2_cache_core *cache)
+{
+ MALI_DEBUG_ASSERT_POINTER(cache);
+ return cache->counter_src1;
+}
+
+void mali_l2_cache_core_get_counter_values(
+ struct mali_l2_cache_core *cache,
+ u32 *src0, u32 *value0, u32 *src1, u32 *value1);
-void mali_l2_cache_core_set_counter_src0(struct mali_l2_cache_core *cache, u32 counter);
-void mali_l2_cache_core_set_counter_src1(struct mali_l2_cache_core *cache, u32 counter);
-u32 mali_l2_cache_core_get_counter_src0(struct mali_l2_cache_core *cache);
-u32 mali_l2_cache_core_get_counter_src1(struct mali_l2_cache_core *cache);
-void mali_l2_cache_core_get_counter_values(struct mali_l2_cache_core *cache, u32 *src0, u32 *value0, u32 *src1, u32 *value1);
struct mali_l2_cache_core *mali_l2_cache_core_get_glob_l2_core(u32 index);
u32 mali_l2_cache_core_get_glob_num_l2_cores(void);
-void mali_l2_cache_reset(struct mali_l2_cache_core *cache);
-void mali_l2_cache_reset_all(void);
-
-struct mali_group *mali_l2_cache_get_group(struct mali_l2_cache_core *cache, u32 index);
+struct mali_group *mali_l2_cache_get_group(
+ struct mali_l2_cache_core *cache, u32 index);
void mali_l2_cache_invalidate(struct mali_l2_cache_core *cache);
-mali_bool mali_l2_cache_invalidate_conditional(struct mali_l2_cache_core *cache, u32 id);
+void mali_l2_cache_invalidate_conditional(
+ struct mali_l2_cache_core *cache, u32 id);
+
void mali_l2_cache_invalidate_all(void);
void mali_l2_cache_invalidate_all_pages(u32 *pages, u32 num_pages);
-mali_bool mali_l2_cache_lock_power_state(struct mali_l2_cache_core *cache);
-void mali_l2_cache_unlock_power_state(struct mali_l2_cache_core *cache);
-
#endif /* __MALI_KERNEL_L2_CACHE_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_mem_validation.h"
static _mali_mem_validation_t mali_mem_validator = { MALI_INVALID_MEM_ADDR, MALI_INVALID_MEM_ADDR };
-_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size_para)
+_mali_osk_errcode_t mali_mem_validation_add_range(u32 start, u32 size)
{
/* Check that no other MEM_VALIDATION resources exist */
if (MALI_INVALID_MEM_ADDR != mali_mem_validator.phys_base) {
/* Check restrictions on page alignment */
if ((0 != (start & (~_MALI_OSK_CPU_PAGE_MASK))) ||
- (0 != (size_para & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ (0 != (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
MALI_PRINT_ERROR(("Failed to add frame buffer memory; incorrect alignment\n"));
return _MALI_OSK_ERR_FAULT;
}
mali_mem_validator.phys_base = start;
- mali_mem_validator.size = size_para;
+ mali_mem_validator.size = size;
MALI_DEBUG_PRINT(2, ("Memory Validator installed for Mali physical address base=0x%08X, size=0x%08X\n",
- mali_mem_validator.phys_base, mali_mem_validator.size));
+ mali_mem_validator.phys_base, mali_mem_validator.size));
return _MALI_OSK_ERR_OK;
}
{
#if 0
if (phys_addr < (phys_addr + size)) { /* Don't allow overflow (or zero size) */
- if ((0 == ( phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
- (0 == ( size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
+ if ((0 == (phys_addr & (~_MALI_OSK_CPU_PAGE_MASK))) &&
+ (0 == (size & (~_MALI_OSK_CPU_PAGE_MASK)))) {
if ((phys_addr >= mali_mem_validator.phys_base) &&
((phys_addr + (size - 1)) >= mali_mem_validator.phys_base) &&
(phys_addr <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) &&
- ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1))) ) {
+ ((phys_addr + (size - 1)) <= (mali_mem_validator.phys_base + (mali_mem_validator.size - 1)))) {
MALI_DEBUG_PRINT(3, ("Accepted range 0x%08X + size 0x%08X (= 0x%08X)\n", phys_addr, size, (phys_addr + size - 1)));
return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_FAULT;
#endif
-/*
- MALI_PRINT(("abort mali mem validation check needed by little Q \r\n"));
-*/
return _MALI_OSK_ERR_OK;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MEM_VALIDATION_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
}
if (mmu_status & MALI_MMU_STATUS_BIT_PAGE_FAULT_ACTIVE) {
- MALI_DEBUG_PRINT(2, ("Aborting MMU stall request since it is in pagefault state.\n"));
+ MALI_DEBUG_PRINT(3, ("Aborting MMU stall request since it is in pagefault state.\n"));
return MALI_FALSE;
}
err = _MALI_OSK_ERR_BUSY;
}
- MALI_DEBUG_PRINT(2, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Mali MMU: mali_kernel_mmu_reset: %s\n", mmu->hw_core.description));
if (_MALI_OSK_ERR_OK == mali_mmu_raw_reset(mmu)) {
mali_hw_core_register_write(&mmu->hw_core, MALI_MMU_REGISTER_INT_MASK, MALI_MMU_INTERRUPT_PAGE_FAULT | MALI_MMU_INTERRUPT_READ_BUS_ERROR);
mali_bool stall_success;
MALI_DEBUG_ASSERT_POINTER(mmu);
- MALI_DEBUG_PRINT(2, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
+ MALI_DEBUG_PRINT(3, ("Activating the page fault flush page directory on MMU %s\n", mmu->hw_core.description));
stall_success = mali_mmu_enable_stall(mmu);
/* This function is expect to fail the stalling, since it might be in PageFault mode when it is called */
mali_mmu_activate_address_space(mmu, mali_page_fault_flush_page_directory);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MMU_H__
void mali_mmu_page_fault_done(struct mali_mmu_core *mmu);
-/*** Register reading/writing functions ***/
+MALI_STATIC_INLINE enum mali_interrupt_result mali_mmu_get_interrupt_result(struct mali_mmu_core *mmu)
+{
+ u32 rawstat_used = mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_RAWSTAT);
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
+}
+
+
MALI_STATIC_INLINE u32 mali_mmu_get_int_status(struct mali_mmu_core *mmu)
{
return mali_hw_core_register_read(&mmu->hw_core, MALI_MMU_REGISTER_INT_STATUS);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MMU_PAGE_DIRECTORY_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
*
* @param atom pointer to an atomic counter
* @param val the value to initialize the atomic counter.
- * @return _MALI_OSK_ERR_OK on success, otherwise, a suitable
- * _mali_osk_errcode_t on failure.
*/
-_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val);
/** @brief Read a value from an atomic counter
*
* @return On success, a Mali IO address through which the mapped-in
* memory/registers can be accessed. NULL on failure.
*/
-mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description);
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description);
/** @brief Unmap a physically contiguous address range from kernel space.
*
* @param mapping The Mali IO address through which the mapping is
* accessed.
*/
-void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address mapping);
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address mapping);
/** @brief Allocate and Map a physically contiguous region into kernel space
*
* @return _MALI_OSK_ERR_OK on success. Otherwise, a suitable
* _mali_osk_errcode_t on failure.
*/
-_mali_osk_errcode_t _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description);
+_mali_osk_errcode_t _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description);
/** @brief Un-request a region of physically contiguous memory
*
* @param size the number of bytes of physically contiguous address space to
* un-request.
*/
-void _mali_osk_mem_unreqregion(u32 phys, u32 size);
+void _mali_osk_mem_unreqregion(uintptr_t phys, u32 size);
/** @brief Read from a location currently mapped in through
* _mali_osk_mem_mapioregion
* @param ticks_to_expire the amount of time in ticks for the timer to run
* before triggering.
*/
-void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
/** @brief Modify a timer
*
* should trigger.
*
*/
-void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire);
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire);
/** @brief Stop a timer, and block on its completion.
*
*
* @{ */
-/** @brief Return whether ticka occurs after tickb
+/** @brief Return whether ticka occurs after or at the same time as tickb
*
- * Some OSs handle tick 'rollover' specially, and so can be more robust against
- * tick counters rolling-over. This function must therefore be called to
- * determine if a time (in ticks) really occurs after another time (in ticks).
+ * Systems where ticks can wrap must handle that.
*
* @param ticka ticka
* @param tickb tickb
- * @return non-zero if ticka represents a time that occurs after tickb.
- * Zero otherwise.
+ * @return MALI_TRUE if ticka represents a time that occurs at or after tickb.
*/
-int _mali_osk_time_after(u32 ticka, u32 tickb);
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb);
/** @brief Convert milliseconds to OS 'ticks'
*
* @param ms time interval in milliseconds
* @return the corresponding time interval in OS ticks.
*/
-u32 _mali_osk_time_mstoticks(u32 ms);
+unsigned long _mali_osk_time_mstoticks(u32 ms);
/** @brief Convert OS 'ticks' to milliseconds
*
* @param ticks time interval in OS ticks.
* @return the corresponding time interval in milliseconds
*/
-u32 _mali_osk_time_tickstoms(u32 ticks);
+u32 _mali_osk_time_tickstoms(unsigned long ticks);
/** @brief Get the current time in OS 'ticks'.
* @return the current time in OS 'ticks'.
*/
-u32 _mali_osk_time_tickcount(void);
+unsigned long _mali_osk_time_tickcount(void);
/** @brief Cause a microsecond delay
*
*/
u64 _mali_osk_time_get_ns(void);
+/** @brief Return time in nano seconds, since boot time.
+ *
+ * @return Time in nano seconds
+ */
+u64 _mali_osk_boot_time_get_ns(void);
/** @} */ /* end group _mali_osk_time */
*/
u32 _mali_osk_get_pid(void);
+/** @brief Return an name for calling process.
+ *
+ * @return name for calling process.
+ */
+char *_mali_osk_get_comm(void);
+
/** @brief Return an identificator for calling thread.
*
* @return Identificator for calling thread.
*/
u32 _mali_osk_get_tid(void);
-/** @brief Enable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_enable(void);
-
-/** @brief Disable OS controlled runtime power management
- */
-void _mali_osk_pm_dev_disable(void);
-
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the power manager system for the Mali device (synchronously).
*
* When function returns successfully, Mali is ON.
*
- * @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void);
-
-
-/** @brief Release the reference to the power manger system for the Mali device.
- *
- * When reference count reach zero, the cores can be off.
- *
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add().
+ * @note Call \a _mali_osk_pm_dev_ref_put() to release this reference.
*/
-void _mali_osk_pm_dev_ref_dec(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void);
-/** @brief Take a reference to the power manager system for the Mali device.
+/** @brief Take a reference to the external power manager system for the Mali device (asynchronously).
*
- * Will leave the cores powered off if they are already powered off.
+ * Mali might not yet be on after this function as returned.
+ * Please use \a _mali_osk_pm_dev_barrier() or \a _mali_osk_pm_dev_ref_get_sync()
+ * to wait for Mali to be powered on.
*
* @note Call \a _mali_osk_pm_dev_ref_dec() to release this reference.
- *
- * @return MALI_TRUE if the Mali GPU is powered on, otherwise MALI_FALSE.
*/
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void);
-
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void);
-/** @brief Releasing the reference to the power manger system for the Mali device.
+/** @brief Release the reference to the external power manger system for the Mali device.
*
* When reference count reach zero, the cores can be off.
*
- * @note This must be used to release references taken with \a _mali_osk_pm_dev_ref_add_no_power_on().
+ * @note This must be used to release references taken with
+ * \a _mali_osk_pm_dev_ref_get_sync() or \a _mali_osk_pm_dev_ref_get_sync().
*/
-void _mali_osk_pm_dev_ref_dec_no_power_on(void);
+void _mali_osk_pm_dev_ref_put(void);
-/** @brief Block untill pending PM operations are done
+/** @brief Block until pending PM operations are done
*/
void _mali_osk_pm_dev_barrier(void);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
*/
typedef struct mali_gpu_device_data _mali_osk_device_data;
+#ifdef CONFIG_MALI_DT
+/** @brief Initialize those device resources when we use device tree
+ *
+ * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ */
+_mali_osk_errcode_t _mali_osk_resource_initialize(void);
+#endif
+
/** @brief Find Mali GPU HW resource
*
* @param addr Address of Mali GPU resource to find
*
* @return 0 if resources are found, otherwise the Mali GPU component with lowest address.
*/
-u32 _mali_osk_resource_base_address(void);
+uintptr_t _mali_osk_resource_base_address(void);
+
+/** @brief Find the number of L2 cache cores.
+ *
+ * @return return the number of l2 cache cores we find in device resources.
+ */
+u32 _mali_osk_l2_resource_count(void);
/** @brief Retrieve the Mali GPU specific data
*
*/
_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data);
+/** @brief Find the pmu domain config from device data.
+ *
+ * @param domain_config_array used to store pmu domain config found in device data.
+ * @param array_size is the size of array domain_config_array.
+ */
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size);
+
+/** @brief Get Mali PMU switch delay
+ *
+ *@return pmu switch delay if it is configured
+ */
+u32 _mali_osk_get_pmu_switch_delay(void);
+
/** @brief Determines if Mali GPU has been configured with shared interrupts.
*
* @return MALI_TRUE if shared interrupts, MALI_FALSE if not.
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_OSK_PROFILING_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
_MALI_OSK_LOCK_ORDER_MEM_INFO,
_MALI_OSK_LOCK_ORDER_MEM_PT_CACHE,
_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP,
- _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL,
- _MALI_OSK_LOCK_ORDER_GROUP,
+ _MALI_OSK_LOCK_ORDER_PM_EXECUTION,
+ _MALI_OSK_LOCK_ORDER_EXECUTOR,
_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM,
_MALI_OSK_LOCK_ORDER_SCHEDULER,
_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED,
- _MALI_OSK_LOCK_ORDER_PM_CORE_STATE,
- _MALI_OSK_LOCK_ORDER_L2_COMMAND,
- _MALI_OSK_LOCK_ORDER_DMA_COMMAND,
_MALI_OSK_LOCK_ORDER_PROFILING,
- _MALI_OSK_LOCK_ORDER_L2_COUNTER,
+ _MALI_OSK_LOCK_ORDER_L2,
+ _MALI_OSK_LOCK_ORDER_L2_COMMAND,
_MALI_OSK_LOCK_ORDER_UTILIZATION,
- _MALI_OSK_LOCK_ORDER_PM_EXECUTE,
_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS,
- _MALI_OSK_LOCK_ORDER_PM_DOMAIN,
- _MALI_OSK_LOCK_ORDER_PMU,
+ _MALI_OSK_LOCK_ORDER_PM_STATE,
_MALI_OSK_LOCK_ORDER_LAST,
} _mali_osk_lock_order_t;
*/
typedef struct _mali_osk_resource {
const char *description; /**< short description of the resource */
- u32 base; /**< Physical base address of the resource, as seen by Mali resources. */
+ uintptr_t base; /**< Physical base address of the resource, as seen by Mali resources. */
+ const char *irq_name; /**< Name of irq belong to this resource */
u32 irq; /**< IRQ number delivered to the CPU, or -1 to tell the driver to probe for it (if possible) */
} _mali_osk_resource_t;
/** @} */ /* end group _mali_osk_miscellaneous */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_pm.h"
#include "mali_kernel_common.h"
#include "mali_osk.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_osk_mali.h"
#include "mali_scheduler.h"
-#include "mali_kernel_utilization.h"
#include "mali_group.h"
#include "mali_pm_domain.h"
#include "mali_pmu.h"
-static mali_bool mali_power_on = MALI_FALSE;
+#include "mali_executor.h"
+#include "mali_control_timer.h"
+
+#if defined(DEBUG)
+u32 num_pm_runtime_resume = 0;
+u32 num_pm_updates = 0;
+u32 num_pm_updates_up = 0;
+u32 num_pm_updates_down = 0;
+#endif
+
+#define MALI_PM_DOMAIN_DUMMY_MASK (1 << MALI_DOMAIN_INDEX_DUMMY)
+
+/* lock protecting power state (including pm_domains) */
+static _mali_osk_spinlock_irq_t *pm_lock_state = NULL;
+
+/* the wanted domain mask (protected by pm_lock_state) */
+static u32 pd_mask_wanted = 0;
+
+/* used to deferring the actual power changes */
+static _mali_osk_wq_work_t *pm_work = NULL;
+
+/* lock protecting power change execution */
+static _mali_osk_mutex_t *pm_lock_exec = NULL;
+
+/* PMU domains which are actually powered on (protected by pm_lock_exec) */
+static u32 pmu_mask_current = 0;
+
+/*
+ * domains which marked as powered on (protected by pm_lock_exec)
+ * This can be different from pmu_mask_current right after GPU power on
+ * if the PMU domains default to powered up.
+ */
+static u32 pd_mask_current = 0;
+
+static u16 domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 1 << MALI_DOMAIN_INDEX_DUMMY
+};
+
+/* The relative core power cost */
+#define MALI_GP_COST 3
+#define MALI_PP_COST 6
+#define MALI_L2_COST 1
+
+/*
+ *We have MALI_MAX_NUMBER_OF_PP_PHYSICAL_CORES + 1 rows in this matrix
+ *because we mush store the mask of different pp cores: 0, 1, 2, 3, 4, 5, 6, 7, 8.
+ */
+static int mali_pm_domain_power_cost_result[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1][MALI_MAX_NUMBER_OF_DOMAINS];
+/*
+ * Keep track of runtime PM state, so that we know
+ * how to resume during OS resume.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static mali_bool mali_pm_runtime_active = MALI_FALSE;
+#else
+/* when kernel don't enable PM_RUNTIME, set the flag always true,
+ * for GPU will not power off by runtime */
+static mali_bool mali_pm_runtime_active = MALI_TRUE;
+#endif
+
+static void mali_pm_state_lock(void);
+static void mali_pm_state_unlock(void);
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void);
+static void mali_pm_set_pmu_domain_config(void);
+static u32 mali_pm_get_registered_cores_mask(void);
+static void mali_pm_update_sync_internal(void);
+static mali_bool mali_pm_common_suspend(void);
+static void mali_pm_update_work(void *data);
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+const char *mali_pm_group_stats_to_string(void);
+#endif
_mali_osk_errcode_t mali_pm_initialize(void)
{
- _mali_osk_pm_dev_enable();
+ _mali_osk_errcode_t err;
+ struct mali_pmu_core *pmu;
+
+ pm_lock_state = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_state) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_lock_exec = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_PM_STATE);
+ if (NULL == pm_lock_exec) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pm_work = _mali_osk_wq_create_work(mali_pm_update_work, NULL);
+ if (NULL == pm_work) {
+ mali_pm_terminate();
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pmu = mali_pmu_get_global_pmu_core();
+ if (NULL != pmu) {
+ /*
+ * We have a Mali PMU, set the correct domain
+ * configuration (default or custom)
+ */
+
+ u32 registered_cores_mask;
+
+ mali_pm_set_pmu_domain_config();
+
+ registered_cores_mask = mali_pm_get_registered_cores_mask();
+ mali_pmu_set_registered_cores_mask(pmu, registered_cores_mask);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ }
+
+ /* Create all power domains needed (at least one dummy domain) */
+ err = mali_pm_create_pm_domains();
+ if (_MALI_OSK_ERR_OK != err) {
+ mali_pm_terminate();
+ return err;
+ }
+
return _MALI_OSK_ERR_OK;
}
void mali_pm_terminate(void)
{
+ if (NULL != pm_work) {
+ _mali_osk_wq_delete_work(pm_work);
+ pm_work = NULL;
+ }
+
mali_pm_domain_terminate();
- _mali_osk_pm_dev_disable();
+
+ if (NULL != pm_lock_exec) {
+ _mali_osk_mutex_term(pm_lock_exec);
+ pm_lock_exec = NULL;
+ }
+
+ if (NULL != pm_lock_state) {
+ _mali_osk_spinlock_irq_term(pm_lock_state);
+ pm_lock_state = NULL;
+ }
+}
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache)
+{
+ struct mali_pm_domain *domain;
+
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_l2_cache(domain, l2_cache);
+
+ return domain; /* return the actual domain this was registered in */
}
-/* Reset GPU after power up */
-static void mali_pm_reset_gpu(void)
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group)
{
- /* Reset all L2 caches */
- mali_l2_cache_reset_all();
+ struct mali_pm_domain *domain;
- /* Reset all groups */
- mali_scheduler_reset_all_groups();
+ domain = mali_pm_domain_get_from_mask(domain_config[domain_index]);
+ if (NULL == domain) {
+ MALI_DEBUG_ASSERT(0 == domain_config[domain_index]);
+ domain = mali_pm_domain_get_from_index(
+ MALI_DOMAIN_INDEX_DUMMY);
+ domain_config[domain_index] = MALI_PM_DOMAIN_DUMMY_MASK;
+ } else {
+ MALI_DEBUG_ASSERT(0 != domain_config[domain_index]);
+ }
+
+ MALI_DEBUG_ASSERT(NULL != domain);
+
+ mali_pm_domain_add_group(domain, group);
+
+ return domain; /* return the actual domain this was registered in */
}
-void mali_pm_os_suspend(void)
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains)
{
- MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
- mali_gp_scheduler_suspend();
- mali_pp_scheduler_suspend();
- mali_utilization_suspend();
- mali_group_power_off(MALI_TRUE);
- mali_power_on = MALI_FALSE;
+ mali_bool ret = MALI_TRUE; /* Assume all is powered on instantly */
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ pd_mask_wanted |= mali_pm_domain_ref_get(domains[i]);
+ if (MALI_FALSE == mali_pm_domain_power_is_on(domains[i])) {
+ /*
+ * Tell caller that the corresponding group
+ * was not already powered on.
+ */
+ ret = MALI_FALSE;
+ } else {
+ /*
+ * There is a time gap between we power on the domain and
+ * set the power state of the corresponding groups to be on.
+ */
+ if (NULL != groups[i] &&
+ MALI_FALSE == mali_group_power_is_on(groups[i])) {
+ ret = MALI_FALSE;
+ }
+ }
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (get refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
}
-void mali_pm_os_resume(void)
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains)
+{
+ u32 mask = 0;
+ mali_bool ret;
+ u32 i;
+
+ mali_pm_state_lock();
+
+ for (i = 0; i < num_domains; i++) {
+ MALI_DEBUG_ASSERT_POINTER(domains[i]);
+ mask |= mali_pm_domain_ref_put(domains[i]);
+ }
+
+ if (0 == mask) {
+ /* return false, all domains should still stay on */
+ ret = MALI_FALSE;
+ } else {
+ /* Assert that we are dealing with a change */
+ MALI_DEBUG_ASSERT((pd_mask_wanted & mask) == mask);
+
+ /* Update our desired domain mask */
+ pd_mask_wanted &= ~mask;
+
+ /* return true; one or more domains can now be powered down */
+ ret = MALI_TRUE;
+ }
+
+ MALI_DEBUG_PRINT(3, ("PM: wanted domain mask = 0x%08X (put refs)\n", pd_mask_wanted));
+
+ mali_pm_state_unlock();
+
+ return ret;
+}
+
+void mali_pm_init_begin(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- mali_bool do_reset = MALI_FALSE;
- MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+ _mali_osk_pm_dev_ref_get_sync();
- if (MALI_TRUE != mali_power_on) {
- do_reset = MALI_TRUE;
+ /* Ensure all PMU domains are on */
+ if (NULL != pmu) {
+ mali_pmu_power_up_all(pmu);
}
+}
+void mali_pm_init_end(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ /* Ensure all PMU domains are off */
if (NULL != pmu) {
- mali_pmu_reset(pmu);
+ mali_pmu_power_down_all(pmu);
}
- mali_power_on = MALI_TRUE;
- _mali_osk_write_mem_barrier();
+ _mali_osk_pm_dev_ref_put();
+}
+
+void mali_pm_update_sync(void)
+{
+ mali_pm_exec_lock();
- if (do_reset) {
- mali_pm_reset_gpu();
- mali_group_power_on();
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /*
+ * Only update if GPU is powered on.
+ * Deactivation of the last group will result in both a
+ * deferred runtime PM suspend operation and
+ * deferred execution of this function.
+ * mali_pm_runtime_active will be false if runtime PM
+ * executed first and thus the GPU is now fully powered off.
+ */
+ mali_pm_update_sync_internal();
}
- mali_gp_scheduler_resume();
- mali_pp_scheduler_resume();
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_update_async(void)
+{
+ _mali_osk_wq_schedule_work(pm_work);
}
-void mali_pm_runtime_suspend(void)
+void mali_pm_os_suspend(mali_bool os_suspend)
{
+ int ret;
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS suspend\n"));
+
+ /* Suspend execution of all jobs, and go to inactive state */
+ mali_executor_suspend();
+
+ if (os_suspend) {
+ mali_control_timer_suspend(MALI_TRUE);
+ }
+
+ mali_pm_exec_lock();
+
+ ret = mali_pm_common_suspend();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == ret);
+ MALI_IGNORE(ret);
+
+ mali_pm_exec_unlock();
+}
+
+void mali_pm_os_resume(void)
+{
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume\n"));
+
+ mali_pm_exec_lock();
+
+#if defined(DEBUG)
+ mali_pm_state_lock();
+
+ /* Assert that things are as we left them in os_suspend(). */
+ MALI_DEBUG_ASSERT(0 == pd_mask_wanted);
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+#endif
+
+ if (MALI_TRUE == mali_pm_runtime_active) {
+ /* Runtime PM was active, so reset PMU */
+ if (NULL != pmu) {
+ mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+
+ MALI_DEBUG_PRINT(3, ("Mali PM: OS resume 0x%x \n", pmu_mask_current));
+ }
+
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ /* Start executing jobs again */
+ mali_executor_resume();
+}
+
+mali_bool mali_pm_runtime_suspend(void)
+{
+ mali_bool ret;
+
MALI_DEBUG_PRINT(3, ("Mali PM: Runtime suspend\n"));
- mali_group_power_off(MALI_TRUE);
- mali_power_on = MALI_FALSE;
+
+ mali_pm_exec_lock();
+
+ /*
+ * Put SW state directly into "off" state, and do not bother to power
+ * down each power domain, because entire GPU will be powered off
+ * when we return.
+ * For runtime PM suspend, in contrast to OS suspend, there is a race
+ * between this function and the mali_pm_update_sync_internal(), which
+ * is fine...
+ */
+ ret = mali_pm_common_suspend();
+ if (MALI_TRUE == ret) {
+ mali_pm_runtime_active = MALI_FALSE;
+ } else {
+ /*
+ * Process the "power up" instead,
+ * which could have been "lost"
+ */
+ mali_pm_update_sync_internal();
+ }
+
+ mali_pm_exec_unlock();
+
+ return ret;
}
void mali_pm_runtime_resume(void)
{
struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
- mali_bool do_reset = MALI_FALSE;
- MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume\n"));
+ mali_pm_exec_lock();
- if (MALI_TRUE != mali_power_on) {
- do_reset = MALI_TRUE;
- }
+ mali_pm_runtime_active = MALI_TRUE;
+
+#if defined(DEBUG)
+ ++num_pm_runtime_resume;
+
+ mali_pm_state_lock();
+
+ /*
+ * Assert that things are as we left them in runtime_suspend(),
+ * except for pd_mask_wanted which normally will be the reason we
+ * got here (job queued => domains wanted)
+ */
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ mali_pm_state_unlock();
+#endif
if (NULL != pmu) {
mali_pmu_reset(pmu);
+ pmu_mask_current = mali_pmu_get_mask(pmu);
+ MALI_DEBUG_PRINT(3, ("Mali PM: Runtime resume 0x%x \n", pmu_mask_current));
}
- mali_power_on = MALI_TRUE;
- _mali_osk_write_mem_barrier();
+ /*
+ * Normally we are resumed because a job has just been queued.
+ * pd_mask_wanted should thus be != 0.
+ * It is however possible for others to take a Mali Runtime PM ref
+ * without having a job queued.
+ * We should however always call mali_pm_update_sync_internal(),
+ * because this will take care of any potential mismatch between
+ * pmu_mask_current and pd_mask_current.
+ */
+ mali_pm_update_sync_internal();
+
+ mali_pm_exec_unlock();
+}
- if (do_reset) {
- mali_pm_reset_gpu();
- mali_group_power_on();
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tPower domain: id %u\n",
+ mali_pm_domain_get_id(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tMask: 0x%04x\n",
+ mali_pm_domain_get_mask(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tUse count: %u\n",
+ mali_pm_domain_get_use_count(domain));
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tCurrent power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_current) ?
+ "On" : "Off");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\t\tWanted power state: %s\n",
+ (mali_pm_domain_get_mask(domain) & pd_mask_wanted) ?
+ "On" : "Off");
+
+ return n;
+}
+#endif
+
+static void mali_pm_state_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(pm_lock_state);
+}
+
+static void mali_pm_state_unlock(void)
+{
+ _mali_osk_spinlock_irq_unlock(pm_lock_state);
+}
+
+void mali_pm_exec_lock(void)
+{
+ _mali_osk_mutex_wait(pm_lock_exec);
+}
+
+void mali_pm_exec_unlock(void)
+{
+ _mali_osk_mutex_signal(pm_lock_exec);
+}
+
+static void mali_pm_domain_power_up(u32 power_up_mask,
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_up,
+ struct mali_l2_cache_core *l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_up)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_up_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_up_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_up);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_up);
+ MALI_DEBUG_ASSERT(0 == *num_groups_up);
+ MALI_DEBUG_ASSERT_POINTER(l2_up);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_up);
+ MALI_DEBUG_ASSERT(0 == *num_l2_up);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering up domains: . [%s]\n",
+ mali_pm_mask_to_string(power_up_mask)));
+
+ pd_mask_current |= power_up_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(
+ domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered up */
+ mali_pm_domain_set_power_on(domain, MALI_TRUE);
+
+ /*
+ * Make a note of the L2 and/or group(s) to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(
+ domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_up <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_up[*num_l2_up] = l2_cache;
+ (*num_l2_up)++;
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_up <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_up[*num_groups_up] = group;
+
+ (*num_groups_up)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
}
}
+static void mali_pm_domain_power_down(u32 power_down_mask,
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS],
+ u32 *num_groups_down,
+ struct mali_l2_cache_core *l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES],
+ u32 *num_l2_down)
+{
+ u32 domain_bit;
+ u32 notify_mask = power_down_mask;
+
+ MALI_DEBUG_ASSERT(0 != power_down_mask);
+ MALI_DEBUG_ASSERT_POINTER(groups_down);
+ MALI_DEBUG_ASSERT_POINTER(num_groups_down);
+ MALI_DEBUG_ASSERT(0 == *num_groups_down);
+ MALI_DEBUG_ASSERT_POINTER(l2_down);
+ MALI_DEBUG_ASSERT_POINTER(num_l2_down);
+ MALI_DEBUG_ASSERT(0 == *num_l2_down);
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_state);
+
+ MALI_DEBUG_PRINT(5,
+ ("PM update: Powering down domains: [%s]\n",
+ mali_pm_mask_to_string(power_down_mask)));
+
+ pd_mask_current &= ~power_down_mask;
+
+ domain_bit = _mali_osk_fls(notify_mask);
+ while (0 != domain_bit) {
+ u32 domain_id = domain_bit - 1;
+ struct mali_pm_domain *domain =
+ mali_pm_domain_get_from_index(domain_id);
+ struct mali_l2_cache_core *l2_cache;
+ struct mali_l2_cache_core *l2_cache_tmp;
+ struct mali_group *group;
+ struct mali_group *group_tmp;
+
+ /* Mark domain as powered down */
+ mali_pm_domain_set_power_on(domain, MALI_FALSE);
+
+ /*
+ * Make a note of the L2s and/or groups to notify
+ * (need to release the PM state lock before doing so)
+ */
+
+ _MALI_OSK_LIST_FOREACHENTRY(l2_cache,
+ l2_cache_tmp,
+ mali_pm_domain_get_l2_cache_list(domain),
+ struct mali_l2_cache_core,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_l2_down <
+ MALI_MAX_NUMBER_OF_L2_CACHE_CORES);
+ l2_down[*num_l2_down] = l2_cache;
+ (*num_l2_down)++;
+ }
-void mali_pm_set_power_is_on(void)
+ _MALI_OSK_LIST_FOREACHENTRY(group,
+ group_tmp,
+ mali_pm_domain_get_group_list(domain),
+ struct mali_group,
+ pm_domain_list) {
+ MALI_DEBUG_ASSERT(*num_groups_down <
+ MALI_MAX_NUMBER_OF_GROUPS);
+ groups_down[*num_groups_down] = group;
+ (*num_groups_down)++;
+ }
+
+ /* Remove current bit and find next */
+ notify_mask &= ~(1 << (domain_id));
+ domain_bit = _mali_osk_fls(notify_mask);
+ }
+}
+
+/*
+ * Execute pending power domain changes
+ * pm_lock_exec lock must be taken by caller.
+ */
+static void mali_pm_update_sync_internal(void)
+{
+ /*
+ * This should only be called in non-atomic context
+ * (normally as deferred work)
+ *
+ * Look at the pending power domain changes, and execute these.
+ * Make sure group and schedulers are notified about changes.
+ */
+
+ struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+
+ u32 power_down_mask;
+ u32 power_up_mask;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+
+#if defined(DEBUG)
+ ++num_pm_updates;
+#endif
+
+ /* Hold PM state lock while we look at (and obey) the wanted state */
+ mali_pm_state_lock();
+
+ MALI_DEBUG_PRINT(5, ("PM update pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ /* Figure out which cores we need to power on */
+ power_up_mask = pd_mask_wanted &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ if (0 != power_up_mask) {
+ u32 power_up_mask_pmu;
+ struct mali_group *groups_up[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_up = 0;
+ struct mali_l2_cache_core *
+ l2_up[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_up = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_up;
+#endif
+
+ /*
+ * Make sure dummy/global domain is always included when
+ * powering up, since this is controlled by runtime PM,
+ * and device power is on at this stage.
+ */
+ power_up_mask |= MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* Power up only real PMU domains */
+ power_up_mask_pmu = power_up_mask & ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ /* But not those that happen to be powered on already */
+ power_up_mask_pmu &= (power_up_mask ^ pmu_mask_current) &
+ power_up_mask;
+
+ if (0 != power_up_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current |= power_up_mask_pmu;
+ mali_pmu_power_up(pmu, power_up_mask_pmu);
+ }
+
+ /*
+ * Put the domains themselves in power up state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_up(power_up_mask,
+ groups_up, &num_groups_up,
+ l2_up, &num_l2_up);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /* Notify each L2 cache that we have be powered up */
+ for (i = 0; i < num_l2_up; i++) {
+ mali_l2_cache_power_up(l2_up[i]);
+ }
+
+ /*
+ * Tell execution module about all the groups we have
+ * powered up. Groups will be notified as a result of this.
+ */
+ mali_executor_group_power_up(groups_up, num_groups_up);
+
+ /* Lock state again before checking for power down */
+ mali_pm_state_lock();
+ }
+
+ /* Figure out which cores we need to power off */
+ power_down_mask = pd_mask_current &
+ (pd_mask_wanted ^ pd_mask_current);
+
+ /*
+ * Never power down the dummy/global domain here. This is to be done
+ * from a suspend request (since this domain is only physicall powered
+ * down at that point)
+ */
+ power_down_mask &= ~MALI_PM_DOMAIN_DUMMY_MASK;
+
+ if (0 != power_down_mask) {
+ u32 power_down_mask_pmu;
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+#if defined(DEBUG)
+ ++num_pm_updates_down;
+#endif
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(power_down_mask,
+ groups_down, &num_groups_down,
+ l2_down, &num_l2_down);
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+
+ }
+ } else {
+ /*
+ * Power down only PMU domains which should not stay on
+ * Some domains might for instance currently be incorrectly
+ * powered up if default domain power state is all on.
+ */
+ u32 power_down_mask_pmu;
+
+ /* No need for state lock since we'll only update PMU */
+ mali_pm_state_unlock();
+
+ power_down_mask_pmu = pmu_mask_current & (~pd_mask_current);
+
+ if (0 != power_down_mask_pmu) {
+ MALI_DEBUG_ASSERT(NULL != pmu);
+ pmu_mask_current &= ~power_down_mask_pmu;
+ mali_pmu_power_down(pmu, power_down_mask_pmu);
+ }
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM update post: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM update post: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+}
+
+static mali_bool mali_pm_common_suspend(void)
+{
+ mali_pm_state_lock();
+
+ if (0 != pd_mask_wanted) {
+ MALI_DEBUG_PRINT(5, ("PM: Aborting suspend operation\n\n\n"));
+ mali_pm_state_unlock();
+ return MALI_FALSE;
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Wanted domain mask: .. [%s]\n",
+ mali_pm_mask_to_string(pd_mask_wanted)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current domain mask: . [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Current PMU mask: .... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend pre: Group power stats: ... <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ if (0 != pd_mask_current) {
+ /*
+ * We have still some domains powered on.
+ * It is for instance very normal that at least the
+ * dummy/global domain is marked as powered on at this point.
+ * (because it is physically powered on until this function
+ * returns)
+ */
+
+ struct mali_group *groups_down[MALI_MAX_NUMBER_OF_GROUPS];
+ u32 num_groups_down = 0;
+ struct mali_l2_cache_core *
+ l2_down[MALI_MAX_NUMBER_OF_L2_CACHE_CORES];
+ u32 num_l2_down = 0;
+ u32 i;
+
+ /*
+ * Put the domains themselves in power down state.
+ * We get the groups and L2s to notify in return.
+ */
+ mali_pm_domain_power_down(pd_mask_current,
+ groups_down,
+ &num_groups_down,
+ l2_down,
+ &num_l2_down);
+
+ MALI_DEBUG_ASSERT(0 == pd_mask_current);
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ /* Need to unlock PM state lock before notifying L2 + groups */
+ mali_pm_state_unlock();
+
+ /*
+ * Tell execution module about all the groups we will be
+ * powering down. Groups will be notified as a result of this.
+ */
+ if (0 < num_groups_down) {
+ mali_executor_group_power_down(groups_down, num_groups_down);
+ }
+
+ /* Notify each L2 cache that we will be powering down */
+ for (i = 0; i < num_l2_down; i++) {
+ mali_l2_cache_power_down(l2_down[i]);
+ }
+
+ pmu_mask_current = 0;
+ } else {
+ MALI_DEBUG_ASSERT(0 == pmu_mask_current);
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pm_domain_all_unused());
+
+ mali_pm_state_unlock();
+ }
+
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current domain mask: [%s]\n",
+ mali_pm_mask_to_string(pd_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Current PMU mask: ... [%s]\n",
+ mali_pm_mask_to_string(pmu_mask_current)));
+ MALI_DEBUG_PRINT(5, ("PM suspend post: Group power stats: .. <%s>\n",
+ mali_pm_group_stats_to_string()));
+
+ return MALI_TRUE;
+}
+
+static void mali_pm_update_work(void *data)
+{
+ MALI_IGNORE(data);
+ mali_pm_update_sync();
+}
+
+static _mali_osk_errcode_t mali_pm_create_pm_domains(void)
+{
+ int i;
+
+ /* Create all domains (including dummy domain) */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (0x0 == domain_config[i]) continue;
+
+ if (NULL == mali_pm_domain_create(domain_config[i])) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+static void mali_pm_set_default_pm_domain_config(void)
+{
+ MALI_DEBUG_ASSERT(0 != _mali_osk_resource_base_address());
+
+ /* GP core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_GP, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_GP] = 0x01;
+ }
+
+ /* PP0 - PP3 core */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP0, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 2;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP0] = 0x01 << 1;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP1, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 3;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP1] = 0x01 << 2;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP2, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 4;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP2] = 0x01 << 2;
+ }
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP3, NULL)) {
+ if (mali_is_mali400()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 5;
+ } else if (mali_is_mali450()) {
+ domain_config[MALI_DOMAIN_INDEX_PP3] = 0x01 << 2;
+ }
+ }
+
+ /* PP4 - PP7 */
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP4, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP4] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP5, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP5] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP6, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP6] = 0x01 << 3;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI_OFFSET_PP7, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_PP7] = 0x01 << 3;
+ }
+
+ /* L2gp/L2PP0/L2PP4 */
+ if (mali_is_mali400()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI400_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 1;
+ }
+ } else if (mali_is_mali450()) {
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE0, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L20] = 0x01 << 0;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE1, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L21] = 0x01 << 1;
+ }
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(
+ MALI450_OFFSET_L2_CACHE2, NULL)) {
+ domain_config[MALI_DOMAIN_INDEX_L22] = 0x01 << 3;
+ }
+ }
+}
+
+static u32 mali_pm_get_registered_cores_mask(void)
{
- mali_power_on = MALI_TRUE;
+ int i = 0;
+ u32 mask = 0;
+
+ for (i = 0; i < MALI_DOMAIN_INDEX_DUMMY; i++) {
+ mask |= domain_config[i];
+ }
+
+ return mask;
}
-mali_bool mali_pm_is_power_on(void)
+static void mali_pm_set_pmu_domain_config(void)
{
- return mali_power_on;
+ int i = 0;
+
+ _mali_osk_device_data_pmu_config_get(domain_config, MALI_MAX_NUMBER_OF_DOMAINS - 1);
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS - 1; i++) {
+ if (0 != domain_config[i]) {
+ break;
+ }
+ }
+
+ if (MALI_MAX_NUMBER_OF_DOMAINS - 1 == i) {
+ mali_pm_set_default_pm_domain_config();
+ }
+
+ /* Can't override dummy domain mask */
+ domain_config[MALI_DOMAIN_INDEX_DUMMY] =
+ 1 << MALI_DOMAIN_INDEX_DUMMY;
+}
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_DOMAINS + 1];
+ int bit;
+ int str_pos = 0;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (bit = MALI_MAX_NUMBER_OF_DOMAINS - 1; bit >= 0; bit--) {
+ if (mask & (1 << bit)) {
+ bit_str[str_pos] = 'X';
+ } else {
+ bit_str[str_pos] = '-';
+ }
+ str_pos++;
+ }
+
+ bit_str[MALI_MAX_NUMBER_OF_DOMAINS] = '\0';
+
+ return bit_str;
+}
+
+const char *mali_pm_group_stats_to_string(void)
+{
+ static char bit_str[MALI_MAX_NUMBER_OF_GROUPS + 1];
+ u32 num_groups = mali_group_get_glob_num_groups();
+ u32 i;
+
+ /* Must be protected by lock since we use shared string buffer */
+ if (NULL != pm_lock_exec) {
+ MALI_DEBUG_ASSERT_LOCK_HELD(pm_lock_exec);
+ }
+
+ for (i = 0; i < num_groups && i < MALI_MAX_NUMBER_OF_GROUPS; i++) {
+ struct mali_group *group;
+
+ group = mali_group_get_glob_group(i);
+
+ if (MALI_TRUE == mali_group_power_is_on(group)) {
+ bit_str[i] = 'X';
+ } else {
+ bit_str[i] = '-';
+ }
+ }
+
+ bit_str[i] = '\0';
+
+ return bit_str;
+}
+#endif
+
+/*
+ * num_pp is the number of PP cores which will be powered on given this mask
+ * cost is the total power cost of cores which will be powered on given this mask
+ */
+static void mali_pm_stat_from_mask(u32 mask, u32 *num_pp, u32 *cost)
+{
+ u32 i;
+
+ /* loop through all cores */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (!(domain_config[i] & mask)) {
+ continue;
+ }
+
+ switch (i) {
+ case MALI_DOMAIN_INDEX_GP:
+ *cost += MALI_GP_COST;
+
+ break;
+ case MALI_DOMAIN_INDEX_PP0: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP1: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP2: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP3:
+ if (mali_is_mali400()) {
+ if ((domain_config[MALI_DOMAIN_INDEX_L20] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L20])) {
+ *num_pp += 1;
+ }
+ } else {
+ if ((domain_config[MALI_DOMAIN_INDEX_L21] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L21])) {
+ *num_pp += 1;
+ }
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_PP4: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP5: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP6: /* Fall through */
+ case MALI_DOMAIN_INDEX_PP7:
+ MALI_DEBUG_ASSERT(mali_is_mali450());
+
+ if ((domain_config[MALI_DOMAIN_INDEX_L22] & mask)
+ || (domain_config[MALI_DOMAIN_INDEX_DUMMY]
+ == domain_config[MALI_DOMAIN_INDEX_L22])) {
+ *num_pp += 1;
+ }
+
+ *cost += MALI_PP_COST;
+ break;
+ case MALI_DOMAIN_INDEX_L20: /* Fall through */
+ case MALI_DOMAIN_INDEX_L21: /* Fall through */
+ case MALI_DOMAIN_INDEX_L22:
+ *cost += MALI_L2_COST;
+
+ break;
+ }
+ }
+}
+
+void mali_pm_power_cost_setup(void)
+{
+ /*
+ * Two parallel arrays which store the best domain mask and its cost
+ * The index is the number of PP cores, E.g. Index 0 is for 1 PP option,
+ * might have mask 0x2 and with cost of 1, lower cost is better
+ */
+ u32 best_mask[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ u32 best_cost[MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS] = { 0 };
+ /* Array cores_in_domain is used to store the total pp cores in each pm domain. */
+ u32 cores_in_domain[MALI_MAX_NUMBER_OF_DOMAINS] = { 0 };
+ /* Domain_count is used to represent the max domain we have.*/
+ u32 max_domain_mask = 0;
+ u32 max_domain_id = 0;
+ u32 always_on_pp_cores = 0;
+
+ u32 num_pp, cost, mask;
+ u32 i, j , k;
+
+ /* Initialize statistics */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS; i++) {
+ best_mask[i] = 0;
+ best_cost[i] = 0xFFFFFFFF; /* lower cost is better */
+ }
+
+ for (i = 0; i < MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS + 1; i++) {
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ mali_pm_domain_power_cost_result[i][j] = 0;
+ }
+ }
+
+ /* Caculate number of pp cores of a given domain config. */
+ for (i = MALI_DOMAIN_INDEX_PP0; i <= MALI_DOMAIN_INDEX_PP7; i++) {
+ if (0 < domain_config[i]) {
+ /* Get the max domain mask value used to caculate power cost
+ * and we don't count in always on pp cores. */
+ if (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i]
+ && max_domain_mask < domain_config[i]) {
+ max_domain_mask = domain_config[i];
+ }
+
+ if (MALI_PM_DOMAIN_DUMMY_MASK == domain_config[i]) {
+ always_on_pp_cores++;
+ }
+ }
+ }
+ max_domain_id = _mali_osk_fls(max_domain_mask);
+
+ /*
+ * Try all combinations of power domains and check how many PP cores
+ * they have and their power cost.
+ */
+ for (mask = 0; mask < (1 << max_domain_id); mask++) {
+ num_pp = 0;
+ cost = 0;
+
+ mali_pm_stat_from_mask(mask, &num_pp, &cost);
+
+ /* This mask is usable for all MP1 up to num_pp PP cores, check statistics for all */
+ for (i = 0; i < num_pp; i++) {
+ if (best_cost[i] >= cost) {
+ best_cost[i] = cost;
+ best_mask[i] = mask;
+ }
+ }
+ }
+
+ /*
+ * If we want to enable x pp cores, if x is less than number of always_on pp cores,
+ * all of pp cores we will enable must be always_on pp cores.
+ */
+ for (i = 0; i < mali_executor_get_num_cores_total(); i++) {
+ if (i < always_on_pp_cores) {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = i + 1;
+ } else {
+ mali_pm_domain_power_cost_result[i + 1][MALI_MAX_NUMBER_OF_DOMAINS - 1]
+ = always_on_pp_cores;
+ }
+ }
+
+ /* In this loop, variable i represent for the number of non-always on pp cores we want to enabled. */
+ for (i = 0; i < (mali_executor_get_num_cores_total() - always_on_pp_cores); i++) {
+ if (best_mask[i] == 0) {
+ /* This MP variant is not available */
+ continue;
+ }
+
+ for (j = 0; j < MALI_MAX_NUMBER_OF_DOMAINS; j++) {
+ cores_in_domain[j] = 0;
+ }
+
+ for (j = MALI_DOMAIN_INDEX_PP0; j <= MALI_DOMAIN_INDEX_PP7; j++) {
+ if (0 < domain_config[j]
+ && (MALI_PM_DOMAIN_DUMMY_MASK != domain_config[i])) {
+ cores_in_domain[_mali_osk_fls(domain_config[j]) - 1]++;
+ }
+ }
+
+ /* In this loop, j represent for the number we have already enabled.*/
+ for (j = 0; j <= i;) {
+ /* j used to visit all of domain to get the number of pp cores remained in it. */
+ for (k = 0; k < max_domain_id; k++) {
+ /* If domain k in best_mask[i] is enabled and this domain has extra pp cores,
+ * we know we must pick at least one pp core from this domain.
+ * And then we move to next enabled pm domain. */
+ if ((best_mask[i] & (0x1 << k)) && (0 < cores_in_domain[k])) {
+ cores_in_domain[k]--;
+ mali_pm_domain_power_cost_result[always_on_pp_cores + i + 1][k]++;
+ j++;
+ if (j > i) {
+ break;
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+ * When we are doing core scaling,
+ * this function is called to return the best mask to
+ * achieve the best pp group power cost.
+ */
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst)
+{
+ MALI_DEBUG_ASSERT((mali_executor_get_num_cores_total() >= num_requested) && (0 <= num_requested));
+
+ _mali_osk_memcpy(dst, mali_pm_domain_power_cost_result[num_requested], MALI_MAX_NUMBER_OF_DOMAINS * sizeof(int));
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PM_H__
#define __MALI_PM_H__
#include "mali_osk.h"
+#include "mali_pm_domain.h"
+#define MALI_DOMAIN_INDEX_GP 0
+#define MALI_DOMAIN_INDEX_PP0 1
+#define MALI_DOMAIN_INDEX_PP1 2
+#define MALI_DOMAIN_INDEX_PP2 3
+#define MALI_DOMAIN_INDEX_PP3 4
+#define MALI_DOMAIN_INDEX_PP4 5
+#define MALI_DOMAIN_INDEX_PP5 6
+#define MALI_DOMAIN_INDEX_PP6 7
+#define MALI_DOMAIN_INDEX_PP7 8
+#define MALI_DOMAIN_INDEX_L20 9
+#define MALI_DOMAIN_INDEX_L21 10
+#define MALI_DOMAIN_INDEX_L22 11
+/*
+ * The dummy domain is used when there is no physical power domain
+ * (e.g. no PMU or always on cores)
+ */
+#define MALI_DOMAIN_INDEX_DUMMY 12
+#define MALI_MAX_NUMBER_OF_DOMAINS 13
+
+/**
+ * Initialize the Mali PM module
+ *
+ * PM module covers Mali PM core, PM domains and Mali PMU
+ */
_mali_osk_errcode_t mali_pm_initialize(void);
+
+/**
+ * Terminate the Mali PM module
+ */
void mali_pm_terminate(void);
-/* Callback functions registered for the runtime PMM system */
-void mali_pm_os_suspend(void);
+void mali_pm_exec_lock(void);
+void mali_pm_exec_unlock(void);
+
+
+struct mali_pm_domain *mali_pm_register_l2_cache(u32 domain_index,
+ struct mali_l2_cache_core *l2_cache);
+struct mali_pm_domain *mali_pm_register_group(u32 domain_index,
+ struct mali_group *group);
+
+mali_bool mali_pm_get_domain_refs(struct mali_pm_domain **domains,
+ struct mali_group **groups,
+ u32 num_domains);
+mali_bool mali_pm_put_domain_refs(struct mali_pm_domain **domains,
+ u32 num_domains);
+
+void mali_pm_init_begin(void);
+void mali_pm_init_end(void);
+
+void mali_pm_update_sync(void);
+void mali_pm_update_async(void);
+
+/* Callback functions for system power management */
+void mali_pm_os_suspend(mali_bool os_suspend);
void mali_pm_os_resume(void);
-void mali_pm_runtime_suspend(void);
+
+mali_bool mali_pm_runtime_suspend(void);
void mali_pm_runtime_resume(void);
-void mali_pm_set_power_is_on(void);
-mali_bool mali_pm_is_power_on(void);
+#if MALI_STATE_TRACKING
+u32 mali_pm_dump_state_domain(struct mali_pm_domain *domain,
+ char *buf, u32 size);
+#endif
+
+void mali_pm_power_cost_setup(void);
+
+void mali_pm_get_best_power_cost_mask(int num_requested, int *dst);
+
+#if defined(DEBUG)
+const char *mali_pm_mask_to_string(u32 mask);
+#endif
#endif /* __MALI_PM_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_pm_domain.h"
#include "mali_pmu.h"
#include "mali_group.h"
+#include "mali_pm.h"
-static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] = { NULL, };
+static struct mali_pm_domain *mali_pm_domains[MALI_MAX_NUMBER_OF_DOMAINS] =
+{ NULL, };
-static void mali_pm_domain_lock(struct mali_pm_domain *domain)
+void mali_pm_domain_initialize(void)
{
- _mali_osk_spinlock_irq_lock(domain->lock);
+ /* Domains will be initialized/created on demand */
}
-static void mali_pm_domain_unlock(struct mali_pm_domain *domain)
+void mali_pm_domain_terminate(void)
{
- _mali_osk_spinlock_irq_unlock(domain->lock);
-}
+ int i;
-MALI_STATIC_INLINE void mali_pm_domain_state_set(struct mali_pm_domain *domain, mali_pm_domain_state state)
-{
- domain->state = state;
+ /* Delete all domains that has been created */
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ mali_pm_domain_delete(mali_pm_domains[i]);
+ mali_pm_domains[i] = NULL;
+ }
}
struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask)
domain = mali_pm_domain_get_from_mask(pmu_mask);
if (NULL != domain) return domain;
- MALI_DEBUG_PRINT(2, ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n", pmu_mask));
+ MALI_DEBUG_PRINT(2,
+ ("Mali PM domain: Creating Mali PM domain (mask=0x%08X)\n",
+ pmu_mask));
- domain = (struct mali_pm_domain *)_mali_osk_malloc(sizeof(struct mali_pm_domain));
+ domain = (struct mali_pm_domain *)_mali_osk_malloc(
+ sizeof(struct mali_pm_domain));
if (NULL != domain) {
- domain->lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PM_DOMAIN);
- if (NULL == domain->lock) {
- _mali_osk_free(domain);
- return NULL;
- }
-
- domain->state = MALI_PM_DOMAIN_ON;
+ domain->power_is_on = MALI_FALSE;
domain->pmu_mask = pmu_mask;
domain->use_count = 0;
- domain->group_list = NULL;
- domain->group_count = 0;
- domain->l2 = NULL;
+ _mali_osk_list_init(&domain->group_list);
+ _mali_osk_list_init(&domain->l2_cache_list);
domain_id = _mali_osk_fls(pmu_mask) - 1;
/* Verify the domain_id */
if (NULL == domain) {
return;
}
- _mali_osk_spinlock_irq_term(domain->lock);
-
- _mali_osk_free(domain);
-}
-void mali_pm_domain_terminate(void)
-{
- int i;
+ _mali_osk_list_delinit(&domain->group_list);
+ _mali_osk_list_delinit(&domain->l2_cache_list);
- /* Delete all domains */
- for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
- mali_pm_domain_delete(mali_pm_domains[i]);
- }
+ _mali_osk_free(domain);
}
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group)
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group)
{
- struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
- struct mali_group *next;
-
- if (NULL == domain) return;
-
+ MALI_DEBUG_ASSERT_POINTER(domain);
MALI_DEBUG_ASSERT_POINTER(group);
- ++domain->group_count;
- next = domain->group_list;
-
- domain->group_list = group;
-
- group->pm_domain_list = next;
-
- mali_group_set_pm_domain(group, domain);
-
- /* Get pm domain ref after mali_group_set_pm_domain */
- mali_group_get_pm_domain_ref(group);
+ /*
+ * Use addtail because virtual group is created last and it needs
+ * to be at the end of the list (in order to be activated after
+ * all children.
+ */
+ _mali_osk_list_addtail(&group->pm_domain_list, &domain->group_list);
}
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2)
+void mali_pm_domain_add_l2_cache(struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache)
{
- struct mali_pm_domain *domain = mali_pm_domain_get_from_mask(mask);
-
- if (NULL == domain) return;
-
- MALI_DEBUG_ASSERT(NULL == domain->l2);
- MALI_DEBUG_ASSERT(NULL != l2);
-
- domain->l2 = l2;
-
- mali_l2_cache_set_pm_domain(l2, domain);
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT_POINTER(l2_cache);
+ _mali_osk_list_add(&l2_cache->pm_domain_list, &domain->l2_cache_list);
}
struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask)
{
u32 id = 0;
- if (0 == mask) return NULL;
+ if (0 == mask) {
+ return NULL;
+ }
id = _mali_osk_fls(mask) - 1;
return mali_pm_domains[id];
}
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain)
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain)
{
- if (NULL == domain) return;
+ MALI_DEBUG_ASSERT_POINTER(domain);
- mali_pm_domain_lock(domain);
- ++domain->use_count;
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_get_async();
+ }
- if (MALI_PM_DOMAIN_ON != domain->state) {
- /* Power on */
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
+ ++domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_get, use_count => %u\n", domain, domain->use_count));
- MALI_DEBUG_PRINT(3, ("PM Domain: Powering on 0x%08x\n", domain->pmu_mask));
+ /* Return our mask so caller can check this against wanted mask */
+ return domain->pmu_mask;
+}
- if (NULL != pmu) {
- _mali_osk_errcode_t err;
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
- err = mali_pmu_power_up(pmu, domain->pmu_mask);
+ --domain->use_count;
+ MALI_DEBUG_PRINT(4, ("PM domain %p: ref_put, use_count => %u\n", domain, domain->use_count));
- if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
- MALI_PRINT_ERROR(("PM Domain: Failed to power up PM domain 0x%08x\n",
- domain->pmu_mask));
- }
- }
- mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_ON);
- } else {
- MALI_DEBUG_ASSERT(MALI_PM_DOMAIN_ON == mali_pm_domain_state_get(domain));
+ if (0 == domain->use_count) {
+ _mali_osk_pm_dev_ref_put();
}
- mali_pm_domain_unlock(domain);
+ /*
+ * Return the PMU mask which now could be be powered down
+ * (the bit for this domain).
+ * This is the responsibility of the caller (mali_pm)
+ */
+ return (0 == domain->use_count ? domain->pmu_mask : 0);
}
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain)
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain)
{
- if (NULL == domain) return;
-
- mali_pm_domain_lock(domain);
- --domain->use_count;
-
- if (0 == domain->use_count && MALI_PM_DOMAIN_OFF != domain->state) {
- /* Power off */
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(3, ("PM Domain: Powering off 0x%08x\n", domain->pmu_mask));
+ u32 id = 0;
- mali_pm_domain_state_set(domain, MALI_PM_DOMAIN_OFF);
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ MALI_DEBUG_ASSERT(0 != domain->pmu_mask);
- if (NULL != pmu) {
- _mali_osk_errcode_t err;
+ id = _mali_osk_fls(domain->pmu_mask) - 1;
- err = mali_pmu_power_down(pmu, domain->pmu_mask);
+ MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > id);
+ /* Verify that pmu_mask only one bit is set */
+ MALI_DEBUG_ASSERT((1 << id) == domain->pmu_mask);
+ /* Verify that we have stored the domain at right id/index */
+ MALI_DEBUG_ASSERT(domain == mali_pm_domains[id]);
- if (_MALI_OSK_ERR_OK != err && _MALI_OSK_ERR_BUSY != err) {
- MALI_PRINT_ERROR(("PM Domain: Failed to power down PM domain 0x%08x\n",
- domain->pmu_mask));
- }
- }
- }
- mali_pm_domain_unlock(domain);
+ return id;
}
+#endif
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void)
{
- mali_bool is_powered = MALI_TRUE;
+ int i;
- /* Take a reference without powering on */
- if (NULL != domain) {
- mali_pm_domain_lock(domain);
- ++domain->use_count;
+ for (i = 0; i < MALI_MAX_NUMBER_OF_DOMAINS; i++) {
+ if (NULL == mali_pm_domains[i]) {
+ /* Nothing to check */
+ continue;
+ }
- if (MALI_PM_DOMAIN_ON != domain->state) {
- is_powered = MALI_FALSE;
+ if (MALI_TRUE == mali_pm_domains[i]->power_is_on) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
}
- mali_pm_domain_unlock(domain);
- }
- if (!_mali_osk_pm_dev_ref_add_no_power_on()) {
- is_powered = MALI_FALSE;
+ if (0 != mali_pm_domains[i]->use_count) {
+ /* Not ready for suspend! */
+ return MALI_FALSE;
+ }
}
- return is_powered;
-}
-
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain)
-{
- _mali_osk_pm_dev_ref_dec_no_power_on();
-
- if (NULL != domain) {
- mali_pm_domain_ref_put(domain);
- }
+ return MALI_TRUE;
}
+#endif
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PM_DOMAIN_H__
#include "mali_group.h"
#include "mali_pmu.h"
-typedef enum {
- MALI_PM_DOMAIN_ON,
- MALI_PM_DOMAIN_OFF,
-} mali_pm_domain_state;
-
+/* Instances are protected by PM state lock */
struct mali_pm_domain {
- mali_pm_domain_state state;
- _mali_osk_spinlock_irq_t *lock;
-
+ mali_bool power_is_on;
s32 use_count;
-
u32 pmu_mask;
- int group_count;
- struct mali_group *group_list;
+ /* Zero or more groups can belong to this domain */
+ _mali_osk_list_t group_list;
- struct mali_l2_cache_core *l2;
+ /* Zero or more L2 caches can belong to this domain */
+ _mali_osk_list_t l2_cache_list;
};
-struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
-void mali_pm_domain_add_group(u32 mask, struct mali_group *group);
+void mali_pm_domain_initialize(void);
+void mali_pm_domain_terminate(void);
-void mali_pm_domain_add_l2(u32 mask, struct mali_l2_cache_core *l2);
+struct mali_pm_domain *mali_pm_domain_create(u32 pmu_mask);
void mali_pm_domain_delete(struct mali_pm_domain *domain);
-void mali_pm_domain_terminate(void);
+void mali_pm_domain_add_l2_cache(
+ struct mali_pm_domain *domain,
+ struct mali_l2_cache_core *l2_cache);
+void mali_pm_domain_add_group(struct mali_pm_domain *domain,
+ struct mali_group *group);
-/** Get PM domain from domain ID
- */
struct mali_pm_domain *mali_pm_domain_get_from_mask(u32 mask);
struct mali_pm_domain *mali_pm_domain_get_from_index(u32 id);
/* Ref counting */
-void mali_pm_domain_ref_get(struct mali_pm_domain *domain);
-void mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_get(struct mali_pm_domain *domain);
+u32 mali_pm_domain_ref_put(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_group_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->group_list;
+}
+
+MALI_STATIC_INLINE _mali_osk_list_t *mali_pm_domain_get_l2_cache_list(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return &domain->l2_cache_list;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pm_domain_power_is_on(
+ struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->power_is_on;
+}
-MALI_STATIC_INLINE struct mali_l2_cache_core *mali_pm_domain_l2_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE void mali_pm_domain_set_power_on(
+ struct mali_pm_domain *domain,
+ mali_bool power_is_on)
{
- return domain->l2;
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ domain->power_is_on = power_is_on;
}
-MALI_STATIC_INLINE mali_pm_domain_state mali_pm_domain_state_get(struct mali_pm_domain *domain)
+MALI_STATIC_INLINE u32 mali_pm_domain_get_use_count(
+ struct mali_pm_domain *domain)
{
- return domain->state;
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->use_count;
}
-mali_bool mali_pm_domain_lock_state(struct mali_pm_domain *domain);
-void mali_pm_domain_unlock_state(struct mali_pm_domain *domain);
+#if MALI_STATE_TRACKING
+u32 mali_pm_domain_get_id(struct mali_pm_domain *domain);
+
+MALI_STATIC_INLINE u32 mali_pm_domain_get_mask(struct mali_pm_domain *domain)
+{
+ MALI_DEBUG_ASSERT_POINTER(domain);
+ return domain->pmu_mask;
+}
+#endif
-#define MALI_PM_DOMAIN_FOR_EACH_GROUP(group, domain) for ((group) = (domain)->group_list;\
- NULL != (group); (group) = (group)->pm_domain_list)
+#if defined(DEBUG)
+mali_bool mali_pm_domain_all_unused(void);
+#endif
#endif /* __MALI_PM_DOMAIN_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "mali_pm.h"
#include "mali_osk_mali.h"
-u16 mali_pmu_global_domain_config[MALI_MAX_NUMBER_OF_DOMAINS] = {0};
+struct mali_pmu_core *mali_global_pmu_core = NULL;
-static u32 mali_pmu_detect_mask(void);
-
-/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
- */
-struct mali_pmu_core {
- struct mali_hw_core hw_core;
- _mali_osk_spinlock_t *lock;
- u32 registered_cores_mask;
- u32 active_cores_mask;
- u32 switch_delay;
-};
-
-static struct mali_pmu_core *mali_global_pmu_core = NULL;
-
-/** @brief Register layout for hardware PMU
- */
-typedef enum {
- PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
- PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
- PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
- PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
- PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
- PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
- PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
- PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
-} pmu_reg_addr_mgmt_addr;
-
-#define PMU_REG_VAL_IRQ 1
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu);
struct mali_pmu_core *mali_pmu_create(_mali_osk_resource_t *resource)
{
MALI_DEBUG_ASSERT(NULL == mali_global_pmu_core);
MALI_DEBUG_PRINT(2, ("Mali PMU: Creating Mali PMU core\n"));
- pmu = (struct mali_pmu_core *)_mali_osk_malloc(sizeof(struct mali_pmu_core));
+ pmu = (struct mali_pmu_core *)_mali_osk_malloc(
+ sizeof(struct mali_pmu_core));
if (NULL != pmu) {
- pmu->lock = _mali_osk_spinlock_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_PMU);
- if (NULL != pmu->lock) {
- pmu->registered_cores_mask = mali_pmu_detect_mask();
- pmu->active_cores_mask = pmu->registered_cores_mask;
-
- if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core, resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
- _mali_osk_errcode_t err;
- _mali_osk_device_data data = { 0, };
-
- err = _mali_osk_device_data_get(&data);
- if (_MALI_OSK_ERR_OK == err) {
- pmu->switch_delay = data.pmu_switch_delay;
- mali_global_pmu_core = pmu;
- return pmu;
- }
- mali_hw_core_delete(&pmu->hw_core);
- }
- _mali_osk_spinlock_term(pmu->lock);
+ pmu->registered_cores_mask = 0; /* to be set later */
+
+ if (_MALI_OSK_ERR_OK == mali_hw_core_create(&pmu->hw_core,
+ resource, PMU_REGISTER_ADDRESS_SPACE_SIZE)) {
+
+ pmu->switch_delay = _mali_osk_get_pmu_switch_delay();
+
+ mali_global_pmu_core = pmu;
+
+ return pmu;
}
_mali_osk_free(pmu);
}
{
MALI_DEBUG_ASSERT_POINTER(pmu);
MALI_DEBUG_ASSERT(pmu == mali_global_pmu_core);
+
MALI_DEBUG_PRINT(2, ("Mali PMU: Deleting Mali PMU core\n"));
- _mali_osk_spinlock_term(pmu->lock);
+ mali_global_pmu_core = NULL;
+
mali_hw_core_delete(&pmu->hw_core);
_mali_osk_free(pmu);
- mali_global_pmu_core = NULL;
}
-static void mali_pmu_lock(struct mali_pmu_core *pmu)
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask)
{
- _mali_osk_spinlock_lock(pmu->lock);
+ pmu->registered_cores_mask = mask;
}
-static void mali_pmu_unlock(struct mali_pmu_core *pmu)
+
+void mali_pmu_reset(struct mali_pmu_core *pmu)
{
- _mali_osk_spinlock_unlock(pmu->lock);
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+
+ /* Setup the desired defaults */
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_MASK, 0);
+ mali_hw_core_register_write_relaxed(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
}
-static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(struct mali_pmu_core *pmu)
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu)
{
- u32 rawstat;
- u32 timeout = MALI_REG_POLL_COUNT_SLOW;
+ u32 stat;
- MALI_DEBUG_ASSERT(pmu);
+ MALI_DEBUG_ASSERT_POINTER(pmu);
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
- /* Wait for the command to complete */
- do {
- rawstat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT);
- --timeout;
- } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
+ mali_pm_exec_lock();
- MALI_DEBUG_ASSERT(0 < timeout);
- if (0 == timeout) {
- return _MALI_OSK_ERR_TIMEOUT;
- }
+ mali_pmu_reset(pmu);
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ /* Now simply power up the domains which are marked as powered down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_up(pmu, stat);
- return _MALI_OSK_ERR_OK;
+ mali_pm_exec_unlock();
}
-static _mali_osk_errcode_t mali_pmu_power_up_internal(struct mali_pmu_core *pmu, const u32 mask)
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu)
{
u32 stat;
- _mali_osk_errcode_t err;
-#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
- u32 current_domain;
-#endif
MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
- & PMU_REG_VAL_IRQ));
-
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
- if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
-
-#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, mask);
-
- err = mali_pmu_wait_for_command_finish(pmu);
- if (_MALI_OSK_ERR_OK != err) {
- return err;
- }
-#else
- for (current_domain = 1; current_domain <= pmu->registered_cores_mask; current_domain <<= 1) {
- if (current_domain & mask & stat) {
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_UP, current_domain);
-
- err = mali_pmu_wait_for_command_finish(pmu);
- if (_MALI_OSK_ERR_OK != err) {
- return err;
- }
- }
- }
-#endif
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-#if defined(DEBUG)
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
+ mali_pm_exec_lock();
- MALI_DEBUG_ASSERT(0 == (stat & mask));
- MALI_DEBUG_ASSERT(0 == (stat & pmu->active_cores_mask));
-#endif /* defined(DEBUG) */
+ /* Now simply power down the domains which are marked as powered up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ mali_pmu_power_down(pmu, (~stat) & pmu->registered_cores_mask);
- return _MALI_OSK_ERR_OK;
+ mali_pm_exec_unlock();
}
-static _mali_osk_errcode_t mali_pmu_power_down_internal(struct mali_pmu_core *pmu, const u32 mask)
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
{
u32 stat;
_mali_osk_errcode_t err;
MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_RAWSTAT)
- & PMU_REG_VAL_IRQ));
-
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
+ MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
+
+ MALI_DEBUG_PRINT(3,
+ ("PMU power down: ...................... [%s]\n",
+ mali_pm_mask_to_string(mask)));
+
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+
+ /*
+ * Assert that we are not powering down domains which are already
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
if (0 == mask || 0 == ((~stat) & mask)) return _MALI_OSK_ERR_OK;
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_DOWN, mask);
- /* Do not wait for interrupt on Mali-300/400 if all domains are powered off
- * by our power down command, because the HW will simply not generate an
- * interrupt in this case.*/
+ /*
+ * Do not wait for interrupt on Mali-300/400 if all domains are
+ * powered off by our power down command, because the HW will simply
+ * not generate an interrupt in this case.
+ */
if (mali_is_mali450() || pmu->registered_cores_mask != (mask | stat)) {
err = mali_pmu_wait_for_command_finish(pmu);
if (_MALI_OSK_ERR_OK != err) {
return err;
}
} else {
- mali_hw_core_register_write(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
}
-#if defined(DEBUG)
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
+#if defined(DEBUG)
+ /* Verify power status of domains after power down */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
MALI_DEBUG_ASSERT(mask == (stat & mask));
#endif
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
- u32 cores_off_mask, cores_on_mask, stat;
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- /* Get power status of cores */
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
-
- cores_off_mask = pmu->registered_cores_mask & ~(stat | pmu->active_cores_mask);
- cores_on_mask = pmu->registered_cores_mask & (stat & pmu->active_cores_mask);
-
- if (0 != cores_off_mask) {
- err = mali_pmu_power_down_internal(pmu, cores_off_mask);
- if (_MALI_OSK_ERR_OK != err) return err;
- }
-
- if (0 != cores_on_mask) {
- err = mali_pmu_power_up_internal(pmu, cores_on_mask);
- if (_MALI_OSK_ERR_OK != err) return err;
- }
-
-#if defined(DEBUG)
- {
- stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
- stat &= pmu->registered_cores_mask;
-
- MALI_DEBUG_ASSERT(stat == (pmu->registered_cores_mask & ~pmu->active_cores_mask));
- }
-#endif /* defined(DEBUG) */
-
- mali_pmu_unlock(pmu);
-
- return _MALI_OSK_ERR_OK;
-}
-
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- /* Make sure we have a valid power domain mask */
- if (mask > pmu->registered_cores_mask) {
- return _MALI_OSK_ERR_INVALID_ARGS;
- }
-
- mali_pmu_lock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Power down (0x%08X)\n", mask));
-
- pmu->active_cores_mask &= ~mask;
-
- _mali_osk_pm_dev_ref_add_no_power_on();
- if (!mali_pm_is_power_on()) {
- /* Don't touch hardware if all of Mali is powered off. */
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power down (0x%08X) since Mali is off\n", mask));
-
- return _MALI_OSK_ERR_BUSY;
- }
-
- err = mali_pmu_power_down_internal(pmu, mask);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask)
{
+ u32 stat;
_mali_osk_errcode_t err;
+#if !defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ u32 current_domain;
+#endif
MALI_DEBUG_ASSERT_POINTER(pmu);
MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
+ MALI_DEBUG_ASSERT(mask <= pmu->registered_cores_mask);
+ MALI_DEBUG_ASSERT(0 == (mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT) &
+ PMU_REG_VAL_IRQ));
- /* Make sure we have a valid power domain mask */
- if (mask & ~pmu->registered_cores_mask) {
- return _MALI_OSK_ERR_INVALID_ARGS;
- }
-
- mali_pmu_lock(pmu);
-
- MALI_DEBUG_PRINT(4, ("Mali PMU: Power up (0x%08X)\n", mask));
+ MALI_DEBUG_PRINT(3,
+ ("PMU power up: ........................ [%s]\n",
+ mali_pm_mask_to_string(mask)));
- pmu->active_cores_mask |= mask;
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ stat &= pmu->registered_cores_mask;
+ if (0 == mask || 0 == (stat & mask)) return _MALI_OSK_ERR_OK;
- _mali_osk_pm_dev_ref_add_no_power_on();
- if (!mali_pm_is_power_on()) {
- /* Don't touch hardware if all of Mali is powered off. */
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
+ /*
+ * Assert that we are only powering up domains which are currently
+ * powered down.
+ */
+ MALI_DEBUG_ASSERT(mask == (stat & mask));
- MALI_DEBUG_PRINT(4, ("Mali PMU: Skipping power up (0x%08X) since Mali is off\n", mask));
+#if defined(CONFIG_MALI_PMU_PARALLEL_POWER_UP)
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP, mask);
- return _MALI_OSK_ERR_BUSY;
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
}
+#else
+ for (current_domain = 1;
+ current_domain <= pmu->registered_cores_mask;
+ current_domain <<= 1) {
+ if (current_domain & mask & stat) {
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_POWER_UP,
+ current_domain);
- err = mali_pmu_power_up_internal(pmu, mask);
-
- _mali_osk_pm_dev_ref_dec_no_power_on();
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults in case we were called before mali_pmu_reset() */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- err = mali_pmu_power_down_internal(pmu, pmu->registered_cores_mask);
-
- mali_pmu_unlock(pmu);
-
- return err;
-}
-
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu)
-{
- _mali_osk_errcode_t err;
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- MALI_DEBUG_ASSERT(pmu->registered_cores_mask != 0);
-
- mali_pmu_lock(pmu);
-
- /* Setup the desired defaults in case we were called before mali_pmu_reset() */
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_INT_MASK, 0);
- mali_hw_core_register_write_relaxed(&pmu->hw_core, PMU_REG_ADDR_MGMT_SW_DELAY, pmu->switch_delay);
-
- err = mali_pmu_power_up_internal(pmu, pmu->active_cores_mask);
+ err = mali_pmu_wait_for_command_finish(pmu);
+ if (_MALI_OSK_ERR_OK != err) {
+ return err;
+ }
+ }
+ }
+#endif
- mali_pmu_unlock(pmu);
- return err;
-}
+#if defined(DEBUG)
+ /* Verify power status of domains after power up */
+ stat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_STATUS);
+ MALI_DEBUG_ASSERT(0 == (stat & mask));
+#endif /* defined(DEBUG) */
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
-{
- return mali_global_pmu_core;
+ return _MALI_OSK_ERR_OK;
}
-static u32 mali_pmu_detect_mask(void)
+static _mali_osk_errcode_t mali_pmu_wait_for_command_finish(
+ struct mali_pmu_core *pmu)
{
- int dynamic_config_pp = 0;
- int dynamic_config_l2 = 0;
- int i = 0;
- u32 mask = 0;
-
- /* Check if PM domain compatible with actually pp core and l2 cache and collection info about domain */
- mask = mali_pmu_get_domain_mask(MALI_GP_DOMAIN_INDEX);
+ u32 rawstat;
+ u32 timeout = MALI_REG_POLL_COUNT_SLOW;
- for (i = MALI_PP0_DOMAIN_INDEX; i <= MALI_PP7_DOMAIN_INDEX; i++) {
- mask |= mali_pmu_get_domain_mask(i);
+ MALI_DEBUG_ASSERT(pmu);
- if (0x0 != mali_pmu_get_domain_mask(i)) {
- dynamic_config_pp++;
- }
- }
+ /* Wait for the command to complete */
+ do {
+ rawstat = mali_hw_core_register_read(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT);
+ --timeout;
+ } while (0 == (rawstat & PMU_REG_VAL_IRQ) && 0 < timeout);
- for (i = MALI_L20_DOMAIN_INDEX; i <= MALI_L22_DOMAIN_INDEX; i++) {
- mask |= mali_pmu_get_domain_mask(i);
+ MALI_DEBUG_ASSERT(0 < timeout);
- if (0x0 != mali_pmu_get_domain_mask(i)) {
- dynamic_config_l2++;
- }
+ if (0 == timeout) {
+ return _MALI_OSK_ERR_TIMEOUT;
}
- MALI_DEBUG_PRINT(2, ("Mali PMU: mask 0x%x, pp_core %d, l2_core %d \n", mask, dynamic_config_pp, dynamic_config_l2));
+ mali_hw_core_register_write(&pmu->hw_core,
+ PMU_REG_ADDR_MGMT_INT_CLEAR, PMU_REG_VAL_IRQ);
- return mask;
+ return _MALI_OSK_ERR_OK;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#define __MALI_PMU_H__
#include "mali_osk.h"
+#include "mali_kernel_common.h"
+#include "mali_hw_core.h"
-#define MALI_GP_DOMAIN_INDEX 0
-#define MALI_PP0_DOMAIN_INDEX 1
-#define MALI_PP1_DOMAIN_INDEX 2
-#define MALI_PP2_DOMAIN_INDEX 3
-#define MALI_PP3_DOMAIN_INDEX 4
-#define MALI_PP4_DOMAIN_INDEX 5
-#define MALI_PP5_DOMAIN_INDEX 6
-#define MALI_PP6_DOMAIN_INDEX 7
-#define MALI_PP7_DOMAIN_INDEX 8
-#define MALI_L20_DOMAIN_INDEX 9
-#define MALI_L21_DOMAIN_INDEX 10
-#define MALI_L22_DOMAIN_INDEX 11
-
-#define MALI_MAX_NUMBER_OF_DOMAINS 12
-
-/* Record the domain config from the customer or default config */
-extern u16 mali_pmu_global_domain_config[];
-
-static inline u16 mali_pmu_get_domain_mask(u32 index)
-{
- MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
-
- return mali_pmu_global_domain_config[index];
-}
-
-static inline void mali_pmu_set_domain_mask(u32 index, u16 value)
-{
- MALI_DEBUG_ASSERT(MALI_MAX_NUMBER_OF_DOMAINS > index);
+/** @brief MALI inbuilt PMU hardware info and PMU hardware has knowledge of cores power mask
+ */
+struct mali_pmu_core {
+ struct mali_hw_core hw_core;
+ u32 registered_cores_mask;
+ u32 switch_delay;
+};
- mali_pmu_global_domain_config[index] = value;
-}
+/** @brief Register layout for hardware PMU
+ */
+typedef enum {
+ PMU_REG_ADDR_MGMT_POWER_UP = 0x00, /*< Power up register */
+ PMU_REG_ADDR_MGMT_POWER_DOWN = 0x04, /*< Power down register */
+ PMU_REG_ADDR_MGMT_STATUS = 0x08, /*< Core sleep status register */
+ PMU_REG_ADDR_MGMT_INT_MASK = 0x0C, /*< Interrupt mask register */
+ PMU_REG_ADDR_MGMT_INT_RAWSTAT = 0x10, /*< Interrupt raw status register */
+ PMU_REG_ADDR_MGMT_INT_CLEAR = 0x18, /*< Interrupt clear register */
+ PMU_REG_ADDR_MGMT_SW_DELAY = 0x1C, /*< Switch delay register */
+ PMU_REGISTER_ADDRESS_SPACE_SIZE = 0x28, /*< Size of register space */
+} pmu_reg_addr_mgmt_addr;
-static inline void mali_pmu_copy_domain_mask(void *src, u32 len)
-{
- _mali_osk_memcpy(mali_pmu_global_domain_config, src, len);
-}
+#define PMU_REG_VAL_IRQ 1
-struct mali_pmu_core;
+extern struct mali_pmu_core *mali_global_pmu_core;
/** @brief Initialisation of MALI PMU
*
*/
void mali_pmu_delete(struct mali_pmu_core *pmu);
-/** @brief Reset PMU core
+/** @brief Set registered cores mask
*
- * @param pmu Pointer to PMU core object to reset
- * @return _MALI_OSK_ERR_OK on success, otherwise failure.
+ * @param pmu Pointer to PMU core object
+ * @param mask All available/valid domain bits
*/
-_mali_osk_errcode_t mali_pmu_reset(struct mali_pmu_core *pmu);
+void mali_pmu_set_registered_cores_mask(struct mali_pmu_core *pmu, u32 mask);
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * Called to power down the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_up.
+/** @brief Retrieves the Mali PMU core object (if any)
*
- * @param pmu Pointer to PMU core object to power down
- * @param mask Mask specifying which power domains to power down
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @return The Mali PMU object, or NULL if no PMU exists.
*/
-_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE struct mali_pmu_core *mali_pmu_get_global_pmu_core(void)
+{
+ return mali_global_pmu_core;
+}
-/** @brief MALI GPU power up using MALI in-built PMU
+/** @brief Reset PMU core
*
- * Called to power up the specified cores. The mask will be saved so that \a
- * mali_pmu_power_up_all will bring the PMU back to the previous state set with
- * this function or \a mali_pmu_power_down.
+ * @param pmu Pointer to PMU core object to reset
+ */
+void mali_pmu_reset(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_up_all(struct mali_pmu_core *pmu);
+
+void mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+
+/** @brief Returns a mask of the currently powered up domains
*
- * @param pmu Pointer to PMU core object to power up
- * @param mask Mask specifying which power domains to power up
- * @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
+ * @param pmu Pointer to PMU core object
*/
-_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
+MALI_STATIC_INLINE u32 mali_pmu_get_mask(struct mali_pmu_core *pmu)
+{
+ u32 stat = mali_hw_core_register_read(&pmu->hw_core, PMU_REG_ADDR_MGMT_STATUS);
+ return ((~stat) & pmu->registered_cores_mask);
+}
/** @brief MALI GPU power down using MALI in-built PMU
*
- * called to power down all cores
+ * Called to power down the specified cores.
*
* @param pmu Pointer to PMU core object to power down
+ * @param mask Mask specifying which power domains to power down
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_pmu_power_down_all(struct mali_pmu_core *pmu);
+_mali_osk_errcode_t mali_pmu_power_down(struct mali_pmu_core *pmu, u32 mask);
/** @brief MALI GPU power up using MALI in-built PMU
*
- * called to power up all cores
+ * Called to power up the specified cores.
*
* @param pmu Pointer to PMU core object to power up
+ * @param mask Mask specifying which power domains to power up
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_pmu_power_up_all(struct mali_pmu_core *pmu);
-
-/** @brief Retrieves the Mali PMU core object (if any)
- *
- * @return The Mali PMU object, or NULL if no PMU exists.
- */
-struct mali_pmu_core *mali_pmu_get_global_pmu_core(void);
+_mali_osk_errcode_t mali_pmu_power_up(struct mali_pmu_core *pmu, u32 mask);
#endif /* __MALI_PMU_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_pp_job.h"
#include "regs/mali_200_regs.h"
#include "mali_kernel_common.h"
#include "mali_kernel_core.h"
-#include "mali_dma.h"
#if defined(CONFIG_MALI400_PROFILING)
#include "mali_osk_profiling.h"
#endif
u32 rawstat = 0;
for (i = 0; i < MALI_REG_POLL_COUNT_FAST; i++) {
- if (!(mali_pp_read_status(core) & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ if (!(status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE)) {
rawstat = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
if (rawstat == MALI400PP_REG_VAL_IRQ_RESET_COMPLETED) {
break;
return mali_pp_reset_wait(core);
}
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
- mali_dma_cmd_buf *buf)
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual)
{
u32 relative_address;
u32 start_index;
MALI_DEBUG_ASSERT_POINTER(core);
- /* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
- relative_address = MALI200_REG_ADDR_RSW;
- start_index = MALI200_REG_ADDR_RSW / sizeof(u32);
- nr_of_regs = (MALI200_REG_ADDR_STACK - MALI200_REG_ADDR_RSW) / sizeof(u32);
-
- mali_dma_write_array_conditional(buf, &core->hw_core,
- relative_address, &frame_registers[start_index],
- nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
- /* MALI200_REG_ADDR_STACK_SIZE */
- relative_address = MALI200_REG_ADDR_STACK_SIZE;
- start_index = MALI200_REG_ADDR_STACK_SIZE / sizeof(u32);
-
- mali_dma_write_conditional(buf, &core->hw_core,
- relative_address, frame_registers[start_index],
- mali_frame_registers_reset_values[start_index]);
-
- /* Skip 2 reserved registers */
-
- /* Write remaining registers */
- relative_address = MALI200_REG_ADDR_ORIGIN_OFFSET_X;
- start_index = MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
- nr_of_regs = MALI_PP_MALI400_NUM_FRAME_REGISTERS - MALI200_REG_ADDR_ORIGIN_OFFSET_X / sizeof(u32);
-
- mali_dma_write_array_conditional(buf, &core->hw_core,
- relative_address, &frame_registers[start_index],
- nr_of_regs, &mali_frame_registers_reset_values[start_index]);
-
- /* Write WBx registers */
- if (wb0_registers[0]) { /* M200_WB0_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB0, wb0_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
-
- if (wb1_registers[0]) { /* M200_WB1_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB1, wb1_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
-
- if (wb2_registers[0]) { /* M200_WB2_REG_SOURCE_SELECT register */
- mali_dma_write_array_conditional(buf, &core->hw_core, MALI200_REG_ADDR_WB2, wb2_registers, _MALI_PP_MAX_WB_REGISTERS, mali_wb_registers_reset_values);
- }
+ /* Write frame registers */
- if (MALI_HW_CORE_NO_COUNTER != counter_src0) {
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_SRC, counter_src0);
- mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_0_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
- }
- if (MALI_HW_CORE_NO_COUNTER != counter_src1) {
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_SRC, counter_src1);
- mali_dma_write_conditional(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_PERF_CNT_1_ENABLE, MALI200_REG_VAL_PERF_CNT_ENABLE, mali_perf_cnt_enable_reset_value);
- }
-
- /* This is the command that starts the core.
- *
- * Don't actually run the job if PROFILING_SKIP_PP_JOBS are set, just
- * force core to assert the completion interrupt.
+ /*
+ * There are two frame registers which are different for each sub job:
+ * 1. The Renderer List Address Register (MALI200_REG_ADDR_FRAME)
+ * 2. The FS Stack Address Register (MALI200_REG_ADDR_STACK)
*/
-#if !defined(PROFILING_SKIP_PP_JOBS)
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_CTRL_MGMT, MALI200_REG_VAL_CTRL_MGMT_START_RENDERING);
-#else
- mali_dma_write(buf, &core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT, MALI200_REG_VAL_IRQ_END_OF_FRAME);
-#endif
-}
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_FRAME, mali_pp_job_get_addr_frame(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_FRAME / sizeof(u32)]);
-void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job)
-{
- u32 relative_address;
- u32 start_index;
- u32 nr_of_regs;
- u32 *frame_registers = mali_pp_job_get_frame_registers(job);
- u32 *wb0_registers = mali_pp_job_get_wb0_registers(job);
- u32 *wb1_registers = mali_pp_job_get_wb1_registers(job);
- u32 *wb2_registers = mali_pp_job_get_wb2_registers(job);
- u32 counter_src0 = mali_pp_job_get_perf_counter_src0(job, sub_job);
- u32 counter_src1 = mali_pp_job_get_perf_counter_src1(job, sub_job);
-
- MALI_DEBUG_ASSERT_POINTER(core);
+ /* For virtual jobs, the stack address shouldn't be broadcast but written individually */
+ if (!mali_pp_job_is_virtual(job) || restart_virtual) {
+ mali_hw_core_register_write_relaxed_conditional(&core->hw_core, MALI200_REG_ADDR_STACK, mali_pp_job_get_addr_stack(job, sub_job), mali_frame_registers_reset_values[MALI200_REG_ADDR_STACK / sizeof(u32)]);
+ }
/* Write registers between MALI200_REG_ADDR_FRAME and MALI200_REG_ADDR_STACK */
relative_address = MALI200_REG_ADDR_RSW;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PP_H__
#include "mali_osk.h"
#include "mali_pp_job.h"
#include "mali_hw_core.h"
-#include "mali_dma.h"
struct mali_group;
_mali_osk_errcode_t mali_pp_reset(struct mali_pp_core *core);
_mali_osk_errcode_t mali_pp_hard_reset(struct mali_pp_core *core);
-void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job);
-
-/**
- * @brief Add commands to DMA command buffer to start PP job on core.
- */
-void mali_pp_job_dma_cmd_prepare(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job,
- mali_dma_cmd_buf *buf);
+void mali_pp_job_start(struct mali_pp_core *core, struct mali_pp_job *job, u32 sub_job, mali_bool restart_virtual);
u32 mali_pp_core_get_version(struct mali_pp_core *core);
*/
void mali_pp_update_performance_counters(struct mali_pp_core *parent, struct mali_pp_core *child, struct mali_pp_job *job, u32 subjob);
-MALI_STATIC_INLINE const char *mali_pp_get_hw_core_desc(struct mali_pp_core *core)
+MALI_STATIC_INLINE const char *mali_pp_core_description(struct mali_pp_core *core)
{
return core->hw_core.description;
}
-/*** Register reading/writing functions ***/
-MALI_STATIC_INLINE u32 mali_pp_get_int_stat(struct mali_pp_core *core)
+MALI_STATIC_INLINE enum mali_interrupt_result mali_pp_get_interrupt_result(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_STATUS);
+ u32 rawstat_used = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) &
+ MALI200_REG_VAL_IRQ_MASK_USED;
+ if (0 == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_NONE;
+ } else if (MALI200_REG_VAL_IRQ_END_OF_FRAME == rawstat_used) {
+ return MALI_INTERRUPT_RESULT_SUCCESS;
+ }
+ return MALI_INTERRUPT_RESULT_ERROR;
}
-MALI_STATIC_INLINE u32 mali_pp_read_rawstat(struct mali_pp_core *core)
+MALI_STATIC_INLINE u32 mali_pp_get_rawstat(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_RAWSTAT) & MALI200_REG_VAL_IRQ_MASK_USED;
+ MALI_DEBUG_ASSERT_POINTER(core);
+ return mali_hw_core_register_read(&core->hw_core,
+ MALI200_REG_ADDR_MGMT_INT_RAWSTAT);
}
-MALI_STATIC_INLINE u32 mali_pp_read_status(struct mali_pp_core *core)
+
+MALI_STATIC_INLINE u32 mali_pp_is_active(struct mali_pp_core *core)
{
- return mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ u32 status = mali_hw_core_register_read(&core->hw_core, MALI200_REG_ADDR_MGMT_STATUS);
+ return (status & MALI200_REG_VAL_STATUS_RENDERING_ACTIVE) ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE void mali_pp_mask_all_interrupts(struct mali_pp_core *core)
mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_NONE);
}
-MALI_STATIC_INLINE void mali_pp_clear_hang_interrupt(struct mali_pp_core *core)
-{
- mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_CLEAR, MALI200_REG_VAL_IRQ_HANG);
-}
-
MALI_STATIC_INLINE void mali_pp_enable_interrupts(struct mali_pp_core *core)
{
mali_hw_core_register_write(&core->hw_core, MALI200_REG_ADDR_MGMT_INT_MASK, MALI200_REG_VAL_IRQ_MASK_USED);
}
-MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job, u32 subjob)
+MALI_STATIC_INLINE void mali_pp_write_addr_stack(struct mali_pp_core *core, struct mali_pp_job *job)
{
- u32 addr = mali_pp_job_get_addr_stack(job, subjob);
+ u32 addr = mali_pp_job_get_addr_stack(job, core->core_id);
mali_hw_core_register_write_relaxed(&core->hw_core, MALI200_REG_ADDR_STACK, addr);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_pp.h"
#include "mali_pp_job.h"
-#include "mali_dma.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
#include "mali_kernel_common.h"
#include "mali_uk_types.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
#include "linux/mali_memory_dma_buf.h"
#endif
_mali_osk_list_init(&job->list);
job->session = session;
- _mali_osk_list_init(&job->session_list);
job->id = id;
job->sub_jobs_num = job->uargs.num_cores ? job->uargs.num_cores : 1;
job->pid = _mali_osk_get_pid();
job->tid = _mali_osk_get_tid();
- job->num_memory_cookies = job->uargs.num_memory_cookies;
- if (job->num_memory_cookies > 0) {
+ _mali_osk_atomic_init(&job->sub_jobs_completed, 0);
+ _mali_osk_atomic_init(&job->sub_job_errors, 0);
+
+ if (job->uargs.num_memory_cookies > 0) {
u32 size;
u32 __user *memory_cookies = (u32 __user *)(uintptr_t)job->uargs.memory_cookies;
goto fail;
}
- size = sizeof(*memory_cookies) * job->num_memory_cookies;
+ size = sizeof(*memory_cookies) * job->uargs.num_memory_cookies;
job->memory_cookies = _mali_osk_malloc(size);
if (NULL == job->memory_cookies) {
}
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
- job->num_dma_bufs = job->num_memory_cookies;
- job->dma_bufs = _mali_osk_calloc(job->num_dma_bufs, sizeof(struct mali_dma_buf_attachment *));
- if (NULL == job->dma_bufs) {
- MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
- goto fail;
+ if (0 < job->uargs.num_memory_cookies) {
+ job->dma_bufs = _mali_osk_calloc(job->uargs.num_memory_cookies,
+ sizeof(struct mali_dma_buf_attachment *));
+ if (NULL == job->dma_bufs) {
+ MALI_PRINT_ERROR(("Mali PP job: Failed to allocate dma_bufs array!\n"));
+ goto fail;
+ }
}
#endif
}
- /* Prepare DMA command buffer to start job, if it is virtual. */
- if (mali_pp_job_is_virtual_group_job(job)) {
- struct mali_pp_core *core;
- _mali_osk_errcode_t err = mali_dma_get_cmd_buf(&job->dma_cmd_buf);
-
- if (_MALI_OSK_ERR_OK != err) {
- MALI_PRINT_ERROR(("Mali PP job: Failed to allocate DMA command buffer\n"));
- goto fail;
- }
-
- core = mali_pp_scheduler_get_virtual_pp();
- MALI_DEBUG_ASSERT_POINTER(core);
-
- mali_pp_job_dma_cmd_prepare(core, job, 0, &job->dma_cmd_buf);
- }
-
if (_MALI_OSK_ERR_OK != mali_pp_job_check(job)) {
/* Not a valid job. */
goto fail;
void mali_pp_job_delete(struct mali_pp_job *job)
{
- mali_dma_put_cmd_buf(&job->dma_cmd_buf);
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->list));
+ MALI_DEBUG_ASSERT(_mali_osk_list_empty(&job->session_fb_lookup_list));
+
if (NULL != job->finished_notification) {
_mali_osk_notification_delete(job->finished_notification);
}
- _mali_osk_free(job->memory_cookies);
-
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
/* Unmap buffers attached to job */
- if (0 < job->num_dma_bufs) {
+ if (0 < job->uargs.num_memory_cookies) {
mali_dma_buf_unmap_job(job);
+ if (NULL != job->dma_bufs) {
+ _mali_osk_free(job->dma_bufs);
+ }
}
-
- _mali_osk_free(job->dma_bufs);
#endif /* CONFIG_DMA_SHARED_BUFFER */
+ if (NULL != job->memory_cookies) {
+ _mali_osk_free(job->memory_cookies);
+ }
+
+ _mali_osk_atomic_term(&job->sub_jobs_completed);
+ _mali_osk_atomic_term(&job->sub_job_errors);
+
_mali_osk_free(job);
}
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list)
+{
+ struct mali_pp_job *iter;
+ struct mali_pp_job *tmp;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ /* Find position in list/queue where job should be added. */
+ _MALI_OSK_LIST_FOREACHENTRY_REVERSE(iter, tmp, list,
+ struct mali_pp_job, list) {
+ /* job should be started after iter if iter is in progress. */
+ if (0 < iter->sub_jobs_started) {
+ break;
+ }
+
+ /*
+ * job should be started after iter if it has a higher
+ * job id. A span is used to handle job id wrapping.
+ */
+ if ((mali_pp_job_get_id(job) -
+ mali_pp_job_get_id(iter)) <
+ MALI_SCHEDULER_JOB_ID_SPAN) {
+ break;
+ }
+ }
+
+ _mali_osk_list_add(&job->list, &iter->list);
+}
+
+
u32 mali_pp_job_get_perf_counter_src0(struct mali_pp_job *job, u32 sub_job)
{
/* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
- if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
return job->uargs.perf_counter_src0;
}
u32 mali_pp_job_get_perf_counter_src1(struct mali_pp_job *job, u32 sub_job)
{
/* Virtual jobs always use the global job counter (or if there are per sub job counters at all) */
- if (mali_pp_job_is_virtual_group_job(job) || 0 == job->perf_counter_per_sub_job_count) {
+ if (mali_pp_job_is_virtual(job) || 0 == job->perf_counter_per_sub_job_count) {
/* Virtual jobs always use the global job counter */
return job->uargs.perf_counter_src1;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PP_JOB_H__
#include "mali_kernel_common.h"
#include "regs/mali_200_regs.h"
#include "mali_kernel_core.h"
-#include "mali_dma.h"
#include "mali_dlbu.h"
#include "mali_timeline.h"
+#include "mali_scheduler.h"
+#include "mali_executor.h"
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
#include "linux/mali_memory_dma_buf.h"
#endif
/**
- * The structure represents a PP job, including all sub-jobs
- * (This struct unfortunately needs to be public because of how the _mali_osk_list_*
- * mechanism works)
+ * This structure represents a PP job, including all sub jobs.
+ *
+ * The PP job object itself is not protected by any single lock,
+ * but relies on other locks instead (scheduler, executor and timeline lock).
+ * Think of the job object as moving between these sub systems through-out
+ * its lifetime. Different part of the PP job struct is used by different
+ * subsystems. Accessor functions ensure that correct lock is taken.
+ * Do NOT access any data members directly from outside this module!
*/
struct mali_pp_job {
- _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
- struct mali_session_data *session; /**< Session which submitted this job */
- _mali_osk_list_t session_list; /**< Used to link jobs together in the session job list */
- _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ /*
+ * These members are typically only set at creation,
+ * and only read later on.
+ * They do not require any lock protection.
+ */
_mali_uk_pp_start_job_s uargs; /**< Arguments from user space */
- mali_dma_cmd_buf dma_cmd_buf; /**< Command buffer for starting job using Mali-450 DMA unit */
- u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
- u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
- u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
- u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
- u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
- u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
- u32 sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
- u32 sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+ struct mali_session_data *session; /**< Session which submitted this job */
u32 pid; /**< Process ID of submitting process */
u32 tid; /**< Thread ID of submitting thread */
+ u32 id; /**< Identifier for this job in kernel space (sequential numbering) */
+ u32 cache_order; /**< Cache order used for L2 cache flushing (sequential numbering) */
+ struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
_mali_osk_notification_t *finished_notification; /**< Notification sent back to userspace on job complete */
- u32 num_memory_cookies; /**< Number of memory cookies attached to job */
+ u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
+ u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
+ u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+ u32 sub_jobs_num; /**< Number of subjobs; set to 1 for Mali-450 if DLBU is used, otherwise equals number of PP cores */
+
+ /*
+ * These members are used by both scheduler and executor.
+ * They are "protected" by atomic operations.
+ */
+ _mali_osk_atomic_t sub_jobs_completed; /**< Number of completed sub-jobs in this superjob */
+ _mali_osk_atomic_t sub_job_errors; /**< Bitfield with errors (errors for each single sub-job is or'ed together) */
+
+ /*
+ * These members are used by scheduler, but only when no one else
+ * knows about this job object but the working function.
+ * No lock is thus needed for these.
+ */
u32 *memory_cookies; /**< Memory cookies attached to job */
#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
struct mali_dma_buf_attachment **dma_bufs; /**< Array of DMA-bufs used by job */
- u32 num_dma_bufs; /**< Number of DMA-bufs used by job */
#endif
- struct mali_timeline_tracker tracker; /**< Timeline tracker for this job */
- u32 perf_counter_per_sub_job_count; /**< Number of values in the two arrays which is != MALI_HW_CORE_NO_COUNTER */
- u32 perf_counter_per_sub_job_src0[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src0 */
- u32 perf_counter_per_sub_job_src1[_MALI_PP_MAX_SUB_JOBS]; /**< Per sub job counters src1 */
+
+ /*
+ * These members are used by the scheduler,
+ * protected by scheduler lock
+ */
+ _mali_osk_list_t list; /**< Used to link jobs together in the scheduler queue */
+ _mali_osk_list_t session_fb_lookup_list; /**< Used to link jobs together from the same frame builder in the session */
+ u32 sub_jobs_started; /**< Total number of sub-jobs started (always started in ascending order) */
+
+ /*
+ * Set by executor/group on job completion, read by scheduler when
+ * returning job to user. Hold executor lock when setting,
+ * no lock needed when reading
+ */
+ u32 perf_counter_value0[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 0 (to be returned to user space), one for each sub job */
+ u32 perf_counter_value1[_MALI_PP_MAX_SUB_JOBS]; /**< Value of performance counter 1 (to be returned to user space), one for each sub job */
};
void mali_pp_job_initialize(void);
MALI_STATIC_INLINE u32 mali_pp_job_get_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->id;
}
+MALI_STATIC_INLINE void mali_pp_job_set_cache_order(struct mali_pp_job *job,
+ u32 cache_order)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ job->cache_order = cache_order;
+}
+
MALI_STATIC_INLINE u32 mali_pp_job_get_cache_order(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return (NULL == job) ? 0 : job->cache_order;
}
MALI_STATIC_INLINE u64 mali_pp_job_get_user_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.user_job_ptr;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_frame_builder_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_builder_id;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_flush_id(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.flush_id;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_pid(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->pid;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_tid(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->tid;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_frame_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.frame_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_dlbu_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.dlbu_registers;
}
-MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual_group_job(struct mali_pp_job *job)
-{
- if (mali_is_mali450()) {
- return 1 != job->uargs.num_cores;
- }
-
- return MALI_FALSE;
-}
-
-MALI_STATIC_INLINE mali_bool mali_pp_job_is_with_dlbu(struct mali_pp_job *job)
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_virtual(struct mali_pp_job *job)
{
#if defined(CONFIG_MALI450)
- return 0 == job->uargs.num_cores;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (0 == job->uargs.num_cores) ? MALI_TRUE : MALI_FALSE;
#else
return MALI_FALSE;
#endif
MALI_STATIC_INLINE u32 mali_pp_job_get_addr_frame(struct mali_pp_job *job, u32 sub_job)
{
- if (mali_pp_job_is_with_dlbu(job)) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (mali_pp_job_is_virtual(job)) {
return MALI_DLBU_VIRT_ADDR;
} else if (0 == sub_job) {
return job->uargs.frame_registers[MALI200_REG_ADDR_FRAME / sizeof(u32)];
MALI_STATIC_INLINE u32 mali_pp_job_get_addr_stack(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
if (0 == sub_job) {
return job->uargs.frame_registers[MALI200_REG_ADDR_STACK / sizeof(u32)];
} else if (sub_job < _MALI_PP_MAX_SUB_JOBS) {
return 0;
}
+void mali_pp_job_list_add(struct mali_pp_job *job, _mali_osk_list_t *list);
+
+MALI_STATIC_INLINE void mali_pp_job_list_addtail(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ _mali_osk_list_addtail(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_move(struct mali_pp_job *job,
+ _mali_osk_list_t *list)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(&job->list));
+ _mali_osk_list_move(&job->list, list);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_list_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->list);
+}
+
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb0_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb0_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb1_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb1_registers;
}
MALI_STATIC_INLINE u32 *mali_pp_job_get_wb2_registers(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.wb2_registers;
}
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb0_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb1_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_wb2_source_addr(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_ADDR / sizeof(u32)];
+}
+
MALI_STATIC_INLINE void mali_pp_job_disable_wb0(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb0_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
MALI_STATIC_INLINE void mali_pp_job_disable_wb1(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb1_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
MALI_STATIC_INLINE void mali_pp_job_disable_wb2(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
job->uargs.wb2_registers[MALI200_REG_ADDR_WB_SOURCE_SELECT] = 0;
}
return MALI_TRUE;
}
-MALI_STATIC_INLINE u32 mali_pp_job_get_fb_lookup_id(struct mali_pp_job *job)
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_add(struct mali_pp_job *job)
{
+ u32 fb_lookup_id;
+
MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ fb_lookup_id = MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+
+ MALI_DEBUG_ASSERT(MALI_PP_JOB_FB_LOOKUP_LIST_SIZE > fb_lookup_id);
- return MALI_PP_JOB_FB_LOOKUP_LIST_MASK & job->uargs.frame_builder_id;
+ _mali_osk_list_addtail(&job->session_fb_lookup_list,
+ &job->session->pp_job_fb_lookup_list[fb_lookup_id]);
+}
+
+MALI_STATIC_INLINE void mali_pp_job_fb_lookup_remove(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ _mali_osk_list_delinit(&job->session_fb_lookup_list);
}
MALI_STATIC_INLINE struct mali_session_data *mali_pp_job_get_session(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->session;
}
+MALI_STATIC_INLINE mali_bool mali_pp_job_has_started_sub_jobs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ return (0 < job->sub_jobs_started) ? MALI_TRUE : MALI_FALSE;
+}
+
MALI_STATIC_INLINE mali_bool mali_pp_job_has_unstarted_sub_jobs(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
return (job->sub_jobs_started < job->sub_jobs_num) ? MALI_TRUE : MALI_FALSE;
}
Makes sure that no new subjobs are started. */
MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_failed(struct mali_pp_job *job)
{
- u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
- job->sub_jobs_started += jobs_remaining;
- job->sub_jobs_completed += jobs_remaining;
- job->sub_job_errors += jobs_remaining;
-}
+ u32 jobs_remaining;
+ u32 i;
-MALI_STATIC_INLINE void mali_pp_job_mark_unstarted_success(struct mali_pp_job *job)
-{
- u32 jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
- job->sub_jobs_started += jobs_remaining;
- job->sub_jobs_completed += jobs_remaining;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+
+ jobs_remaining = job->sub_jobs_num - job->sub_jobs_started;
+ job->sub_jobs_started += jobs_remaining;
+
+ /* Not the most optimal way, but this is only used in error cases */
+ for (i = 0; i < jobs_remaining; i++) {
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
+ _mali_osk_atomic_inc(&job->sub_job_errors);
+ }
}
MALI_STATIC_INLINE mali_bool mali_pp_job_is_complete(struct mali_pp_job *job)
{
- return (job->sub_jobs_num == job->sub_jobs_completed) ? MALI_TRUE : MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->sub_jobs_num ==
+ _mali_osk_atomic_read(&job->sub_jobs_completed)) ?
+ MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_first_unstarted_sub_job(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
return job->sub_jobs_started;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_sub_job_count(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->sub_jobs_num;
}
+MALI_STATIC_INLINE u32 mali_pp_job_unstarted_sub_job_count(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(job->sub_jobs_num >= job->sub_jobs_started);
+ return (job->sub_jobs_num - job->sub_jobs_started);
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_num_memory_cookies(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE u32 mali_pp_job_get_memory_cookie(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->memory_cookies);
+ return job->memory_cookies[index];
+}
+
MALI_STATIC_INLINE mali_bool mali_pp_job_needs_dma_buf_mapping(struct mali_pp_job *job)
{
- MALI_DEBUG_ASSERT(job);
+ MALI_DEBUG_ASSERT_POINTER(job);
- if (0 != job->num_memory_cookies) {
+ if (0 < job->uargs.num_memory_cookies) {
return MALI_TRUE;
}
return MALI_FALSE;
}
+#if defined(CONFIG_DMA_SHARED_BUFFER) && !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+MALI_STATIC_INLINE u32 mali_pp_job_num_dma_bufs(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return job->uargs.num_memory_cookies;
+}
+
+MALI_STATIC_INLINE struct mali_dma_buf_attachment *mali_pp_job_get_dma_buf(
+ struct mali_pp_job *job, u32 index)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->dma_bufs);
+ return job->dma_bufs[index];
+}
+
+MALI_STATIC_INLINE void mali_pp_job_set_dma_buf(struct mali_pp_job *job,
+ u32 index, struct mali_dma_buf_attachment *mem)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT(index < job->uargs.num_memory_cookies);
+ MALI_DEBUG_ASSERT_POINTER(job->dma_bufs);
+ job->dma_bufs[index] = mem;
+}
+#endif
+
MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_started(struct mali_pp_job *job, u32 sub_job)
{
MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
/* Assert that we are marking the "first unstarted sub job" as started */
MALI_DEBUG_ASSERT(job->sub_jobs_started == sub_job);
MALI_STATIC_INLINE void mali_pp_job_mark_sub_job_completed(struct mali_pp_job *job, mali_bool success)
{
- job->sub_jobs_completed++;
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_atomic_inc(&job->sub_jobs_completed);
if (MALI_FALSE == success) {
- job->sub_job_errors++;
+ _mali_osk_atomic_inc(&job->sub_job_errors);
}
}
MALI_STATIC_INLINE mali_bool mali_pp_job_was_success(struct mali_pp_job *job)
{
- if (0 == job->sub_job_errors) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (0 == _mali_osk_atomic_read(&job->sub_job_errors)) {
return MALI_TRUE;
}
return MALI_FALSE;
}
-MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(struct mali_pp_job *job)
+MALI_STATIC_INLINE mali_bool mali_pp_job_use_no_notification(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION) ?
+ MALI_TRUE : MALI_FALSE;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_pilot_job(struct mali_pp_job *job)
+{
+ /*
+ * A pilot job is currently identified as jobs which
+ * require no callback notification.
+ */
+ return mali_pp_job_use_no_notification(job);
+}
+
+MALI_STATIC_INLINE _mali_osk_notification_t *
+mali_pp_job_get_finished_notification(struct mali_pp_job *job)
+{
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_POINTER(job->finished_notification);
+
+ notification = job->finished_notification;
+ job->finished_notification = NULL;
+
+ return notification;
+}
+
+MALI_STATIC_INLINE mali_bool mali_pp_job_is_window_surface(
+ struct mali_pp_job *job)
{
- return job->uargs.flags & _MALI_PP_JOB_FLAG_NO_NOTIFICATION ? MALI_TRUE : MALI_FALSE;
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (job->uargs.flags & _MALI_PP_JOB_FLAG_IS_WINDOW_SURFACE)
+ ? MALI_TRUE : MALI_FALSE;
}
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_flag(struct mali_pp_job *job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->uargs.perf_counter_flag;
}
-
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value0(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value0[sub_job];
}
MALI_STATIC_INLINE u32 mali_pp_job_get_perf_counter_value1(struct mali_pp_job *job, u32 sub_job)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
return job->perf_counter_value1[sub_job];
}
MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value0(struct mali_pp_job *job, u32 sub_job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value0[sub_job] = value;
}
MALI_STATIC_INLINE void mali_pp_job_set_perf_counter_value1(struct mali_pp_job *job, u32 sub_job, u32 value)
{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ MALI_DEBUG_ASSERT_EXECUTOR_LOCK_HELD();
job->perf_counter_value1[sub_job] = value;
}
MALI_STATIC_INLINE _mali_osk_errcode_t mali_pp_job_check(struct mali_pp_job *job)
{
- if (mali_pp_job_is_with_dlbu(job) && job->sub_jobs_num != 1) {
+ MALI_DEBUG_ASSERT_POINTER(job);
+ if (mali_pp_job_is_virtual(job) && job->sub_jobs_num != 1) {
return _MALI_OSK_ERR_FAULT;
}
return _MALI_OSK_ERR_OK;
}
-/**
- * Returns MALI_TRUE if first job should be started after second job.
- *
- * @param first First job.
- * @param second Second job.
- * @return MALI_TRUE if first job should be started after second job, MALI_FALSE if not.
- */
-MALI_STATIC_INLINE mali_bool mali_pp_job_should_start_after(struct mali_pp_job *first, struct mali_pp_job *second)
-{
- MALI_DEBUG_ASSERT_POINTER(first);
- MALI_DEBUG_ASSERT_POINTER(second);
-
- /* First job should be started after second job if second job is in progress. */
- if (0 < second->sub_jobs_started) {
- return MALI_TRUE;
- }
-
- /* First job should be started after second job if first job has a higher job id. A span is
- used to handle job id wrapping. */
- if ((mali_pp_job_get_id(first) - mali_pp_job_get_id(second)) < MALI_SCHEDULER_JOB_ID_SPAN) {
- return MALI_TRUE;
- }
-
- /* Second job should be started after first job. */
- return MALI_FALSE;
-}
-
/**
* Returns MALI_TRUE if this job has more than two sub jobs and all sub jobs are unstarted.
*
MALI_STATIC_INLINE mali_bool mali_pp_job_is_large_and_unstarted(struct mali_pp_job *job)
{
MALI_DEBUG_ASSERT_POINTER(job);
- MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual_group_job(job));
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT(!mali_pp_job_is_virtual(job));
return (0 == job->sub_jobs_started && 2 < job->sub_jobs_num);
}
return &(job->tracker);
}
+MALI_STATIC_INLINE u32 *mali_pp_job_get_timeline_point_ptr(
+ struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+ return (u32 __user *)(uintptr_t)job->uargs.timeline_point_ptr;
+}
+
+
#endif /* __MALI_PP_JOB_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_scheduler.h"
-
#include "mali_kernel_common.h"
#include "mali_osk.h"
+#include "mali_osk_profiling.h"
+#include "mali_kernel_utilization.h"
+#include "mali_timeline.h"
+#include "mali_gp_job.h"
+#include "mali_pp_job.h"
+#include "mali_executor.h"
+#include "mali_group.h"
-mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#include "mali_memory_dma_buf.h"
+#endif
-static _mali_osk_atomic_t mali_job_id_autonumber;
-static _mali_osk_atomic_t mali_job_cache_order_autonumber;
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+#include <linux/sched.h>
+#include <trace/events/gpu.h>
+#endif
+/*
+ * ---------- static defines/constants ----------
+ */
-static _mali_osk_wq_work_t *pp_scheduler_wq_high_pri = NULL;
-static _mali_osk_wq_work_t *gp_scheduler_wq_high_pri = NULL;
+/*
+ * If dma_buf with map on demand is used, we defer job deletion and job queue
+ * if in atomic context, since both might sleep.
+ */
+#if defined(CONFIG_DMA_SHARED_BUFFER)
+#if !defined(CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH)
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE 1
+#define MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE 1
+#endif
+#endif
-static void mali_scheduler_wq_schedule_pp(void *arg)
-{
- MALI_IGNORE(arg);
+/*
+ * ---------- global variables (exported due to inline functions) ----------
+ */
- mali_pp_scheduler_schedule();
-}
+/* Lock protecting this module */
+_mali_osk_spinlock_irq_t *mali_scheduler_lock_obj = NULL;
-static void mali_scheduler_wq_schedule_gp(void *arg)
-{
- MALI_IGNORE(arg);
+/* Queue of jobs to be executed on the GP group */
+struct mali_scheduler_job_queue job_queue_gp;
- mali_gp_scheduler_schedule();
-}
+/* Queue of PP jobs */
+struct mali_scheduler_job_queue job_queue_pp;
+
+_mali_osk_atomic_t mali_job_id_autonumber;
+_mali_osk_atomic_t mali_job_cache_order_autonumber;
+/*
+ * ---------- static variables ----------
+ */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_delete = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_delete_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_deletion_queue);
+#endif
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static _mali_osk_wq_work_t *scheduler_wq_pp_job_queue = NULL;
+static _mali_osk_spinlock_irq_t *scheduler_pp_job_queue_lock = NULL;
+static _MALI_OSK_LIST_HEAD_STATIC_INIT(scheduler_pp_job_queue_list);
+#endif
+
+/*
+ * ---------- Forward declaration of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job);
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job);
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job);
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job);
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success);
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual);
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_delete(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job);
+static void mali_scheduler_do_pp_job_queue(void *arg);
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+/*
+ * ---------- Actual implementation ----------
+ */
_mali_osk_errcode_t mali_scheduler_initialize(void)
{
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_id_autonumber, 0)) {
- MALI_DEBUG_PRINT(1, ("Initialization of atomic job id counter failed.\n"));
+ _mali_osk_atomic_init(&mali_job_id_autonumber, 0);
+ _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0);
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_gp.high_pri);
+ job_queue_gp.depth = 0;
+
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.normal_pri);
+ _MALI_OSK_INIT_LIST_HEAD(&job_queue_pp.high_pri);
+ job_queue_pp.depth = 0;
+
+ mali_scheduler_lock_obj = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER);
+ if (NULL == mali_scheduler_lock_obj) {
+ mali_scheduler_terminate();
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ scheduler_wq_pp_job_delete = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_delete, NULL);
+ if (NULL == scheduler_wq_pp_job_delete) {
+ mali_scheduler_terminate();
return _MALI_OSK_ERR_FAULT;
}
- if (_MALI_OSK_ERR_OK != _mali_osk_atomic_init(&mali_job_cache_order_autonumber, 0)) {
- MALI_DEBUG_PRINT(1, ("Initialization of atomic job cache order counter failed.\n"));
- _mali_osk_atomic_term(&mali_job_id_autonumber);
+ scheduler_pp_job_delete_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_delete_lock) {
+ mali_scheduler_terminate();
return _MALI_OSK_ERR_FAULT;
}
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
- pp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_pp, NULL);
- if (NULL == pp_scheduler_wq_high_pri) {
- _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
- _mali_osk_atomic_term(&mali_job_id_autonumber);
- return _MALI_OSK_ERR_NOMEM;
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ scheduler_wq_pp_job_queue = _mali_osk_wq_create_work(
+ mali_scheduler_do_pp_job_queue, NULL);
+ if (NULL == scheduler_wq_pp_job_queue) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
}
- gp_scheduler_wq_high_pri = _mali_osk_wq_create_work_high_pri(mali_scheduler_wq_schedule_gp, NULL);
- if (NULL == gp_scheduler_wq_high_pri) {
- _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
- _mali_osk_atomic_term(&mali_job_cache_order_autonumber);
- _mali_osk_atomic_term(&mali_job_id_autonumber);
- return _MALI_OSK_ERR_NOMEM;
+ scheduler_pp_job_queue_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED);
+ if (NULL == scheduler_pp_job_queue_lock) {
+ mali_scheduler_terminate();
+ return _MALI_OSK_ERR_FAULT;
}
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
return _MALI_OSK_ERR_OK;
}
void mali_scheduler_terminate(void)
{
- _mali_osk_wq_delete_work(gp_scheduler_wq_high_pri);
- _mali_osk_wq_delete_work(pp_scheduler_wq_high_pri);
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (NULL != scheduler_pp_job_queue_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_queue_lock);
+ scheduler_pp_job_queue_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_queue) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_queue);
+ scheduler_wq_pp_job_queue = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ if (NULL != scheduler_pp_job_delete_lock) {
+ _mali_osk_spinlock_irq_term(scheduler_pp_job_delete_lock);
+ scheduler_pp_job_delete_lock = NULL;
+ }
+
+ if (NULL != scheduler_wq_pp_job_delete) {
+ _mali_osk_wq_delete_work(scheduler_wq_pp_job_delete);
+ scheduler_wq_pp_job_delete = NULL;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+ if (NULL != mali_scheduler_lock_obj) {
+ _mali_osk_spinlock_irq_term(mali_scheduler_lock_obj);
+ mali_scheduler_lock_obj = NULL;
+ }
+
_mali_osk_atomic_term(&mali_job_cache_order_autonumber);
_mali_osk_atomic_term(&mali_job_id_autonumber);
}
-u32 mali_scheduler_get_new_id(void)
+u32 mali_scheduler_job_physical_head_count(void)
{
- u32 job_id = _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
- return job_id;
+ /*
+ * Count how many physical sub jobs are present from the head of queue
+ * until the first virtual job is present.
+ * Early out when we have reached maximum number of PP cores (8)
+ */
+ u32 count = 0;
+ struct mali_pp_job *job;
+ struct mali_pp_job *temp;
+
+ /* Check for partially started normal pri jobs */
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ if (MALI_TRUE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * Remember; virtual jobs can't be queued and started
+ * at the same time, so this must be a physical job
+ */
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <= count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, temp, &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (MALI_FALSE == mali_pp_job_is_virtual(job)) {
+ /* any partially started is already counted */
+ if (MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ count += mali_pp_job_unstarted_sub_job_count(job);
+ if (MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS <=
+ count) {
+ return MALI_MAX_NUMBER_OF_PHYSICAL_PP_GROUPS;
+ }
+ }
+ } else {
+ /* Came across a virtual job, so stop counting */
+ return count;
+ }
+ }
+
+ return count;
}
-u32 mali_scheduler_get_new_cache_order(void)
+mali_bool mali_scheduler_job_next_is_virtual(void)
{
- u32 job_cache_order = _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
- return job_cache_order;
+ struct mali_pp_job *job;
+
+ job = mali_scheduler_job_pp_virtual_peek();
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(mali_pp_job_is_virtual(job));
+
+ return MALI_TRUE;
+ }
+
+ return MALI_FALSE;
}
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule)
+struct mali_gp_job *mali_scheduler_job_gp_get(void)
{
- if (MALI_SCHEDULER_MASK_GP & mask) {
- /* GP needs scheduling. */
- if (deferred_schedule) {
- /* Schedule GP deferred. */
- _mali_osk_wq_schedule_work_high_pri(gp_scheduler_wq_high_pri);
- } else {
- /* Schedule GP now. */
- mali_gp_scheduler_schedule();
+ _mali_osk_list_t *queue;
+ struct mali_gp_job *job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+ MALI_DEBUG_ASSERT(0 < job_queue_gp.depth);
+
+ if (!_mali_osk_list_empty(&job_queue_gp.high_pri)) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ MALI_DEBUG_ASSERT(!_mali_osk_list_empty(queue));
+ }
+
+ job = _MALI_OSK_LIST_ENTRY(queue->next, struct mali_gp_job, list);
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_gp_job_list_remove(job);
+ job_queue_gp.depth--;
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ /*
+ * For PP jobs we favour partially started jobs in normal
+ * priority queue over unstarted jobs in high priority queue
+ */
+
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job ||
+ MALI_FALSE == mali_pp_job_has_started_sub_jobs(job)) {
+ /*
+ * There isn't a partially started job in normal queue, so
+ * look in high priority queue.
+ */
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+ MALI_DEBUG_ASSERT(NULL != tmp_job);
+
+ if (MALI_FALSE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void)
+{
+ struct mali_pp_job *job = NULL;
+ struct mali_pp_job *tmp_job = NULL;
+
+ MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
+
+ if (!_mali_osk_list_empty(&job_queue_pp.high_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.high_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+
+ if (NULL == job) {
+ if (!_mali_osk_list_empty(&job_queue_pp.normal_pri)) {
+ MALI_DEBUG_ASSERT(0 < job_queue_pp.depth);
+
+ tmp_job = _MALI_OSK_LIST_ENTRY(job_queue_pp.normal_pri.next,
+ struct mali_pp_job, list);
+
+ if (MALI_TRUE == mali_pp_job_is_virtual(tmp_job)) {
+ job = tmp_job;
+ }
+ }
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_physical_peek();
+
+ MALI_DEBUG_ASSERT(MALI_FALSE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ *sub_job = mali_pp_job_get_first_unstarted_sub_job(job);
+
+ mali_pp_job_mark_sub_job_started(job, *sub_job);
+ if (MALI_FALSE == mali_pp_job_has_unstarted_sub_jobs(job)) {
+ /* Remove from queue when last sub job has been retrieved */
+ mali_pp_job_list_remove(job);
+ }
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void)
+{
+ struct mali_pp_job *job = mali_scheduler_job_pp_virtual_peek();
+
+ MALI_DEBUG_ASSERT(MALI_TRUE == mali_pp_job_is_virtual(job));
+
+ if (NULL != job) {
+ MALI_DEBUG_ASSERT(0 ==
+ mali_pp_job_get_first_unstarted_sub_job(job));
+ MALI_DEBUG_ASSERT(1 ==
+ mali_pp_job_get_sub_job_count(job));
+
+ mali_pp_job_mark_sub_job_started(job, 0);
+
+ mali_pp_job_list_remove(job);
+
+ job_queue_pp.depth--;
+
+ /*
+ * Job about to start so it is no longer be
+ * possible to discard WB
+ */
+ mali_pp_job_fb_lookup_remove(job);
+ }
+
+ return job;
+}
+
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_gp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_gp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(job));
+ mali_gp_job_signal_pp_tracker(job, MALI_FALSE);
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_gp_job(job, MALI_FALSE,
+ MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+
+ return MALI_SCHEDULER_MASK_GP;
+}
+
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Timeline activation for job %u (0x%08X).\n",
+ mali_pp_job_get_id(job), job));
+
+ if (MALI_TRUE == mali_timeline_tracker_activation_error(
+ mali_pp_job_get_tracker(job))) {
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Job %u (0x%08X) activated with error, aborting.\n",
+ mali_pp_job_get_id(job), job));
+
+ mali_scheduler_lock();
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ mali_scheduler_deferred_pp_job_queue(job);
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
+
+ mali_scheduler_lock();
+
+ if (!mali_scheduler_queue_pp_job(job)) {
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(job));
+
+ /* This will notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE, MALI_FALSE);
+
+ return MALI_SCHEDULER_MASK_EMPTY;
+ }
+
+ mali_scheduler_unlock();
+ return MALI_SCHEDULER_MASK_PP;
+}
+
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_gp_job_to_user(job, success);
+ }
+
+ if (dequeued) {
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_gp_end();
+ }
+ }
+
+ mali_gp_job_delete(job);
+}
+
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued)
+{
+ if (user_notification) {
+ mali_scheduler_return_pp_job_to_user(job,
+ num_cores_in_virtual);
+ }
+
+ if (dequeued) {
+#if defined(CONFIG_MALI_DVFS)
+ if (mali_pp_job_is_window_surface(job)) {
+ struct mali_session_data *session;
+ session = mali_pp_job_get_session(job);
+ mali_session_inc_num_window_jobs(session);
+ }
+#endif
+
+ _mali_osk_pm_dev_ref_put();
+
+ if (mali_utilization_enabled()) {
+ mali_utilization_pp_end();
+ }
+ }
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+ /*
+ * The deletion of the job object (releasing sync refs etc)
+ * must be done in a different context
+ */
+ mali_scheduler_deferred_pp_job_delete(job);
+#else
+ /* no use cases need this in this configuration */
+ mali_pp_job_delete(job);
+#endif
+}
+
+void mali_scheduler_abort_session(struct mali_session_data *session)
+{
+ struct mali_gp_job *gp_job;
+ struct mali_gp_job *gp_tmp;
+ struct mali_pp_job *pp_job;
+ struct mali_pp_job *pp_tmp;
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_gp);
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(removed_jobs_pp);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT(session->is_aborting);
+
+ MALI_DEBUG_PRINT(3, ("Mali scheduler: Aborting all queued jobs from session 0x%08X.\n",
+ session));
+
+ mali_scheduler_lock();
+
+ /* Remove from GP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.normal_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ }
+ }
+
+ /* Remove from GP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &job_queue_gp.high_pri,
+ struct mali_gp_job, list) {
+ if (mali_gp_job_get_session(gp_job) == session) {
+ mali_gp_job_list_move(gp_job, &removed_jobs_gp);
+ job_queue_gp.depth--;
+ }
+ }
+
+ /* Remove from PP normal priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.normal_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ }
+ }
+ }
+
+ /* Remove from PP high priority queue */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp,
+ &job_queue_pp.high_pri,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_get_session(pp_job) == session) {
+ mali_pp_job_fb_lookup_remove(pp_job);
+
+ job_queue_pp.depth -=
+ mali_pp_job_unstarted_sub_job_count(
+ pp_job);
+ mali_pp_job_mark_unstarted_failed(pp_job);
+
+ if (mali_pp_job_is_complete(pp_job)) {
+ mali_pp_job_list_move(pp_job,
+ &removed_jobs_pp);
+ }
+ }
+ }
+
+ /*
+ * Release scheduler lock so we can release trackers
+ * (which will potentially queue new jobs)
+ */
+ mali_scheduler_unlock();
+
+ /* Release and complete all (non-running) found GP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(gp_job, gp_tmp, &removed_jobs_gp,
+ struct mali_gp_job, list) {
+ mali_timeline_tracker_release(mali_gp_job_get_tracker(gp_job));
+ mali_gp_job_signal_pp_tracker(gp_job, MALI_FALSE);
+ _mali_osk_list_delinit(&gp_job->list);
+ mali_scheduler_complete_gp_job(gp_job,
+ MALI_FALSE, MALI_FALSE, MALI_TRUE);
+ }
+
+ /* Release and complete non-running PP jobs */
+ _MALI_OSK_LIST_FOREACHENTRY(pp_job, pp_tmp, &removed_jobs_pp,
+ struct mali_pp_job, list) {
+ mali_timeline_tracker_release(mali_pp_job_get_tracker(pp_job));
+ _mali_osk_list_delinit(&pp_job->list);
+ mali_scheduler_complete_pp_job(pp_job, 0,
+ MALI_FALSE, MALI_TRUE);
+ }
+}
+
+_mali_osk_errcode_t _mali_ukk_gp_start_job(void *ctx,
+ _mali_uk_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_gp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_gp_job_create(session, uargs, mali_scheduler_get_new_id(),
+ NULL);
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_gp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_gp_job(session, job);
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_start_job(void *ctx,
+ _mali_uk_pp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ mali_timeline_point point;
+ u32 __user *point_ptr = NULL;
+
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)ctx;
+
+ job = mali_pp_job_create(session, uargs, mali_scheduler_get_new_id());
+ if (NULL == job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(job);
+
+ point = mali_scheduler_submit_pp_job(session, job);
+ job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the job was started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_ukk_pp_and_gp_start_job(void *ctx,
+ _mali_uk_pp_and_gp_start_job_s *uargs)
+{
+ struct mali_session_data *session;
+ _mali_uk_pp_and_gp_start_job_s kargs;
+ struct mali_pp_job *pp_job;
+ struct mali_gp_job *gp_job;
+ u32 __user *point_ptr = NULL;
+ mali_timeline_point point;
+ _mali_uk_pp_start_job_s __user *pp_args;
+ _mali_uk_gp_start_job_s __user *gp_args;
+
+ MALI_DEBUG_ASSERT_POINTER(ctx);
+ MALI_DEBUG_ASSERT_POINTER(uargs);
+
+ session = (struct mali_session_data *) ctx;
+
+ if (0 != _mali_osk_copy_from_user(&kargs, uargs,
+ sizeof(_mali_uk_pp_and_gp_start_job_s))) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ pp_args = (_mali_uk_pp_start_job_s __user *)(uintptr_t)kargs.pp_args;
+ gp_args = (_mali_uk_gp_start_job_s __user *)(uintptr_t)kargs.gp_args;
+
+ pp_job = mali_pp_job_create(session, pp_args,
+ mali_scheduler_get_new_id());
+ if (NULL == pp_job) {
+ MALI_PRINT_ERROR(("Failed to create PP job.\n"));
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ gp_job = mali_gp_job_create(session, gp_args,
+ mali_scheduler_get_new_id(),
+ mali_pp_job_get_tracker(pp_job));
+ if (NULL == gp_job) {
+ MALI_PRINT_ERROR(("Failed to create GP job.\n"));
+ mali_pp_job_delete(pp_job);
+ return _MALI_OSK_ERR_NOMEM;
+ }
+
+ point_ptr = (u32 __user *)(uintptr_t)mali_pp_job_get_timeline_point_ptr(pp_job);
+
+ /* Submit GP job. */
+ mali_scheduler_submit_gp_job(session, gp_job);
+ gp_job = NULL;
+
+ /* Submit PP job. */
+ point = mali_scheduler_submit_pp_job(session, pp_job);
+ pp_job = NULL;
+
+ if (0 != _mali_osk_put_user(((u32) point), point_ptr)) {
+ /*
+ * Let user space know that something failed
+ * after the jobs were started.
+ */
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+void _mali_ukk_pp_job_disable_wb(_mali_uk_pp_disable_wb_s *args)
+{
+ struct mali_session_data *session;
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ u32 fb_lookup_id;
+
+ MALI_DEBUG_ASSERT_POINTER(args);
+ MALI_DEBUG_ASSERT(NULL != (void *)(uintptr_t)args->ctx);
+
+ session = (struct mali_session_data *)(uintptr_t)args->ctx;
+
+ fb_lookup_id = args->fb_id & MALI_PP_JOB_FB_LOOKUP_LIST_MASK;
+
+ mali_scheduler_lock();
+
+ /* Iterate over all jobs for given frame builder_id. */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp,
+ &session->pp_job_fb_lookup_list[fb_lookup_id],
+ struct mali_pp_job, session_fb_lookup_list) {
+ MALI_DEBUG_CODE(u32 disable_mask = 0);
+
+ if (mali_pp_job_get_frame_builder_id(job) !=
+ (u32) args->fb_id) {
+ MALI_DEBUG_PRINT(4, ("Mali PP scheduler: Disable WB mismatching FB.\n"));
+ continue;
+ }
+
+ MALI_DEBUG_CODE(disable_mask |= 0xD << (4 * 3));
+
+ if (mali_pp_job_get_wb0_source_addr(job) == args->wb0_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x1 << (4 * 1));
+ mali_pp_job_disable_wb0(job);
+ }
+
+ if (mali_pp_job_get_wb1_source_addr(job) == args->wb1_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x2 << (4 * 2));
+ mali_pp_job_disable_wb1(job);
}
+
+ if (mali_pp_job_get_wb2_source_addr(job) == args->wb2_memory) {
+ MALI_DEBUG_CODE(disable_mask |= 0x3 << (4 * 3));
+ mali_pp_job_disable_wb2(job);
+ }
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: Disable WB: 0x%X.\n",
+ disable_mask));
}
- if (MALI_SCHEDULER_MASK_PP & mask) {
- /* PP needs scheduling. */
- if (deferred_schedule) {
- /* Schedule PP deferred. */
- _mali_osk_wq_schedule_work_high_pri(pp_scheduler_wq_high_pri);
+ mali_scheduler_unlock();
+}
+
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size)
+{
+ int n = 0;
+
+ n += _mali_osk_snprintf(buf + n, size - n, "GP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_gp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.normal_pri) ?
+ "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_gp.high_pri) ?
+ "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "PP queues\n");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tQueue depth: %u\n", job_queue_pp.depth);
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tNormal priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.normal_pri)
+ ? "empty" : "not empty");
+ n += _mali_osk_snprintf(buf + n, size - n,
+ "\tHigh priority queue is %s\n",
+ _mali_osk_list_empty(&job_queue_pp.high_pri)
+ ? "empty" : "not empty");
+
+ n += _mali_osk_snprintf(buf + n, size - n, "\n");
+
+ return n;
+}
+#endif
+
+/*
+ * ---------- Implementation of static functions ----------
+ */
+
+static mali_timeline_point mali_scheduler_submit_gp_job(
+ struct mali_session_data *session, struct mali_gp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_gp_job_get_tracker(job), MALI_TIMELINE_GP);
+
+ return point;
+}
+
+static mali_timeline_point mali_scheduler_submit_pp_job(
+ struct mali_session_data *session, struct mali_pp_job *job)
+{
+ mali_timeline_point point;
+
+ MALI_DEBUG_ASSERT_POINTER(session);
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ mali_scheduler_lock();
+ /*
+ * Adding job to the lookup list used to quickly discard
+ * writeback units of queued jobs.
+ */
+ mali_pp_job_fb_lookup_add(job);
+ mali_scheduler_unlock();
+
+ /* Add job to Timeline system. */
+ point = mali_timeline_system_add_tracker(session->timeline_system,
+ mali_pp_job_get_tracker(job), MALI_TIMELINE_PP);
+
+ return point;
+}
+
+static mali_bool mali_scheduler_queue_gp_job(struct mali_gp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(4, ("Mali GP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_gp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_gp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ /* Determine which queue the job should be added to. */
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_gp.high_pri;
+ } else {
+ queue = &job_queue_gp.normal_pri;
+ }
+
+ job_queue_gp.depth += 1;
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_gp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the GP as busy from the
+ * time a GP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_gp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_GP_ENQUEUE,
+ mali_gp_job_get_pid(job),
+ mali_gp_job_get_tid(job),
+ mali_gp_job_get_frame_builder_id(job),
+ mali_gp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_gp_job_get_tid(job),
+ mali_gp_job_get_id(job), "GP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali GP scheduler: Job %u (0x%08X) queued\n",
+ mali_gp_job_get_id(job), job));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static mali_bool mali_scheduler_queue_pp_job(struct mali_pp_job *job)
+{
+ struct mali_session_data *session;
+ _mali_osk_list_t *queue = NULL;
+
+ MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD();
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ if (unlikely(session->is_aborting)) {
+ MALI_DEBUG_PRINT(2, ("Mali PP scheduler: Job %u (0x%08X) queued while session is aborting.\n",
+ mali_pp_job_get_id(job), job));
+ return MALI_FALSE; /* job not queued */
+ }
+
+ mali_pp_job_set_cache_order(job, mali_scheduler_get_new_cache_order());
+
+ if (session->use_high_priority_job_queue) {
+ queue = &job_queue_pp.high_pri;
+ } else {
+ queue = &job_queue_pp.normal_pri;
+ }
+
+ job_queue_pp.depth +=
+ mali_pp_job_get_sub_job_count(job);
+
+ /* Add job to queue (mali_gp_job_queue_add find correct place). */
+ mali_pp_job_list_add(job, queue);
+
+ /*
+ * We hold a PM reference for every job we hold queued (and running)
+ * It is important that we take this reference after job has been
+ * added the the queue so that any runtime resume could schedule this
+ * job right there and then.
+ */
+ _mali_osk_pm_dev_ref_get_async();
+
+ if (mali_utilization_enabled()) {
+ /*
+ * We cheat a little bit by counting the PP as busy from the
+ * time a PP job is queued. This will be fine because we only
+ * loose the tiny idle gap between jobs, but we will instead
+ * get less utilization work to do (less locks taken)
+ */
+ mali_utilization_pp_start();
+ }
+
+ /* Add profiling events for job enqueued */
+
+ _mali_osk_profiling_add_event(
+ MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_SOFTWARE |
+ MALI_PROFILING_EVENT_REASON_SINGLE_SW_PP_ENQUEUE,
+ mali_pp_job_get_pid(job),
+ mali_pp_job_get_tid(job),
+ mali_pp_job_get_frame_builder_id(job),
+ mali_pp_job_get_flush_id(job),
+ 0);
+
+#if defined(CONFIG_GPU_TRACEPOINTS) && defined(CONFIG_TRACEPOINTS)
+ trace_gpu_job_enqueue(mali_pp_job_get_tid(job),
+ mali_pp_job_get_id(job), "PP");
+#endif
+
+ MALI_DEBUG_PRINT(3, ("Mali PP scheduler: %s job %u (0x%08X) with %u parts queued.\n",
+ mali_pp_job_is_virtual(job)
+ ? "Virtual" : "Physical",
+ mali_pp_job_get_id(job), job,
+ mali_pp_job_get_sub_job_count(job)));
+
+ return MALI_TRUE; /* job queued */
+}
+
+static void mali_scheduler_return_gp_job_to_user(struct mali_gp_job *job,
+ mali_bool success)
+{
+ _mali_uk_gp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_gp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_gp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_gp_job_get_user_id(job);
+ if (MALI_TRUE == success) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+ jobres->heap_current_addr = mali_gp_job_get_current_heap_addr(job);
+ jobres->perf_counter0 = mali_gp_job_get_perf_counter_value0(job);
+ jobres->perf_counter1 = mali_gp_job_get_perf_counter_value1(job);
+
+ mali_session_send_notification(session, notification);
+}
+
+static void mali_scheduler_return_pp_job_to_user(struct mali_pp_job *job,
+ u32 num_cores_in_virtual)
+{
+ u32 i;
+ u32 num_counters_to_copy;
+ _mali_uk_pp_job_finished_s *jobres;
+ struct mali_session_data *session;
+ _mali_osk_notification_t *notification;
+
+ if (MALI_TRUE == mali_pp_job_use_no_notification(job)) {
+ return;
+ }
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ session = mali_pp_job_get_session(job);
+ MALI_DEBUG_ASSERT_POINTER(session);
+
+ notification = mali_pp_job_get_finished_notification(job);
+ MALI_DEBUG_ASSERT_POINTER(notification);
+
+ jobres = notification->result_buffer;
+ MALI_DEBUG_ASSERT_POINTER(jobres);
+
+ jobres->user_job_ptr = mali_pp_job_get_user_id(job);
+ if (MALI_TRUE == mali_pp_job_was_success(job)) {
+ jobres->status = _MALI_UK_JOB_STATUS_END_SUCCESS;
+ } else {
+ jobres->status = _MALI_UK_JOB_STATUS_END_UNKNOWN_ERR;
+ }
+
+ if (mali_pp_job_is_virtual(job)) {
+ num_counters_to_copy = num_cores_in_virtual;
+ } else {
+ num_counters_to_copy = mali_pp_job_get_sub_job_count(job);
+ }
+
+ for (i = 0; i < num_counters_to_copy; i++) {
+ jobres->perf_counter0[i] =
+ mali_pp_job_get_perf_counter_value0(job, i);
+ jobres->perf_counter1[i] =
+ mali_pp_job_get_perf_counter_value1(job, i);
+ jobres->perf_counter_src0 =
+ mali_pp_job_get_pp_counter_global_src0();
+ jobres->perf_counter_src1 =
+ mali_pp_job_get_pp_counter_global_src1();
+ }
+
+ mali_session_send_notification(session, notification);
+}
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE)
+
+static void mali_scheduler_deferred_pp_job_delete(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_deletion_queue);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_delete);
+}
+
+static void mali_scheduler_do_pp_job_delete(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be deleted, so we can release
+ * the lock before we start deleting the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_delete_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_deletion_queue, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_delete_lock);
+
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ _mali_osk_list_delinit(&job->list);
+ mali_pp_job_delete(job); /* delete the job object itself */
+ }
+}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_DELETE) */
+
+#if defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE)
+
+static void mali_scheduler_deferred_pp_job_queue(struct mali_pp_job *job)
+{
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ mali_pp_job_list_addtail(job, &scheduler_pp_job_queue_list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ _mali_osk_wq_schedule_work(scheduler_wq_pp_job_queue);
+}
+
+static void mali_scheduler_do_pp_job_queue(void *arg)
+{
+ _MALI_OSK_LIST_HEAD_STATIC_INIT(list);
+ struct mali_pp_job *job;
+ struct mali_pp_job *tmp;
+ mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
+
+ MALI_IGNORE(arg);
+
+ /*
+ * Quickly "unhook" the jobs pending to be queued, so we can release
+ * the lock before we start queueing the job objects
+ * (without any locks held)
+ */
+ _mali_osk_spinlock_irq_lock(scheduler_pp_job_queue_lock);
+ _mali_osk_list_move_list(&scheduler_pp_job_queue_list, &list);
+ _mali_osk_spinlock_irq_unlock(scheduler_pp_job_queue_lock);
+
+ /* First loop through all jobs and do the pre-work (no locks needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+ if (mali_pp_job_needs_dma_buf_mapping(job)) {
+ /*
+ * This operation could fail, but we continue anyway,
+ * because the worst that could happen is that this
+ * job will fail due to a Mali page fault.
+ */
+ mali_dma_buf_map_job(job);
+ }
+ }
+
+ mali_scheduler_lock();
+
+ /* Then loop through all jobs again to queue them (lock needed) */
+ _MALI_OSK_LIST_FOREACHENTRY(job, tmp, &list,
+ struct mali_pp_job, list) {
+
+ /* Remove from scheduler_pp_job_queue_list before queueing */
+ mali_pp_job_list_remove(job);
+
+ if (mali_scheduler_queue_pp_job(job)) {
+ /* Job queued successfully */
+ schedule_mask |= MALI_SCHEDULER_MASK_PP;
} else {
- /* Schedule PP now. */
- mali_pp_scheduler_schedule();
+ /* Failed to enqueue job, release job (with error) */
+ mali_pp_job_fb_lookup_remove(job);
+ mali_pp_job_mark_unstarted_failed(job);
+
+ /* unlock scheduler in this uncommon case */
+ mali_scheduler_unlock();
+
+ mali_timeline_tracker_release(
+ mali_pp_job_get_tracker(job));
+
+ /* Notify user space and close the job object */
+ mali_scheduler_complete_pp_job(job, 0, MALI_TRUE,
+ MALI_FALSE);
+
+ mali_scheduler_lock();
}
}
+
+ mali_scheduler_unlock();
+
+ /* Trigger scheduling of jobs */
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
+
+#endif /* defined(MALI_SCHEDULER_USE_DEFERRED_PP_JOB_QUEUE) */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_SCHEDULER_H__
#define __MALI_SCHEDULER_H__
#include "mali_osk.h"
+#include "mali_osk_list.h"
#include "mali_scheduler_types.h"
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_session.h"
+
+struct mali_scheduler_job_queue {
+ _MALI_OSK_LIST_HEAD(normal_pri); /* Queued jobs with normal priority */
+ _MALI_OSK_LIST_HEAD(high_pri); /* Queued jobs with high priority */
+ u32 depth; /* Depth of combined queues. */
+};
+
+extern _mali_osk_spinlock_irq_t *mali_scheduler_lock_obj;
+
+/* Queue of jobs to be executed on the GP group */
+extern struct mali_scheduler_job_queue job_queue_gp;
+
+/* Queue of PP jobs */
+extern struct mali_scheduler_job_queue job_queue_pp;
+
+extern _mali_osk_atomic_t mali_job_id_autonumber;
+extern _mali_osk_atomic_t mali_job_cache_order_autonumber;
+
+#define MALI_DEBUG_ASSERT_SCHEDULER_LOCK_HELD() MALI_DEBUG_ASSERT_LOCK_HELD(mali_scheduler_lock_obj);
_mali_osk_errcode_t mali_scheduler_initialize(void);
void mali_scheduler_terminate(void);
-u32 mali_scheduler_get_new_id(void);
-u32 mali_scheduler_get_new_cache_order(void);
+MALI_STATIC_INLINE void mali_scheduler_lock(void)
+{
+ _mali_osk_spinlock_irq_lock(mali_scheduler_lock_obj);
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: scheduler lock taken.\n"));
+}
-/**
- * @brief Reset all groups
- *
- * This function resets all groups known by the both the PP and GP scheuduler.
- * This must be called after the Mali HW has been powered on in order to reset
- * the HW.
- */
-MALI_STATIC_INLINE void mali_scheduler_reset_all_groups(void)
+MALI_STATIC_INLINE void mali_scheduler_unlock(void)
{
- mali_gp_scheduler_reset_all_groups();
- mali_pp_scheduler_reset_all_groups();
+ MALI_DEBUG_PRINT(5, ("Mali scheduler: Releasing scheduler lock.\n"));
+ _mali_osk_spinlock_irq_unlock(mali_scheduler_lock_obj);
}
-/**
- * @brief Zap TLB on all active groups running \a session
- *
- * @param session Pointer to the session to zap
- */
-MALI_STATIC_INLINE void mali_scheduler_zap_all_active(struct mali_session_data *session)
+MALI_STATIC_INLINE u32 mali_scheduler_job_gp_count(void)
{
- mali_gp_scheduler_zap_all_active(session);
- mali_pp_scheduler_zap_all_active(session);
+ return job_queue_gp.depth;
+}
+
+u32 mali_scheduler_job_physical_head_count(void);
+
+mali_bool mali_scheduler_job_next_is_virtual(void);
+
+struct mali_gp_job *mali_scheduler_job_gp_get(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_peek(void);
+struct mali_pp_job *mali_scheduler_job_pp_physical_get(u32 *sub_job);
+struct mali_pp_job *mali_scheduler_job_pp_virtual_get(void);
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_id(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_id_autonumber);
+}
+
+MALI_STATIC_INLINE u32 mali_scheduler_get_new_cache_order(void)
+{
+ return _mali_osk_atomic_inc_return(&mali_job_cache_order_autonumber);
}
/**
- * Check if bit is set in scheduler mask.
+ * @brief Used by the Timeline system to queue a GP job.
*
- * @param mask Scheduler mask to check.
- * @param bit Bit to check.
- * @return MALI_TRUE if bit is set in scheduler mask, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The GP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
*/
-MALI_STATIC_INLINE mali_bool mali_scheduler_mask_is_set(mali_scheduler_mask mask, mali_scheduler_mask bit)
-{
- return MALI_SCHEDULER_MASK_EMPTY != (bit & mask);
-}
+mali_scheduler_mask mali_scheduler_activate_gp_job(struct mali_gp_job *job);
/**
- * Schedule GP and PP according to bitmask.
+ * @brief Used by the Timeline system to queue a PP job.
*
- * @param mask A scheduling bitmask.
- * @param deferred_schedule MALI_TRUE if schedule should be deferred, MALI_FALSE if not.
+ * @note @ref mali_executor_schedule_from_mask() should be called if this
+ * function returns non-zero.
+ *
+ * @param job The PP job that is being activated.
+ *
+ * @return A scheduling bitmask that can be used to decide if scheduling is
+ * necessary after this call.
*/
-void mali_scheduler_schedule_from_mask(mali_scheduler_mask mask, mali_bool deferred_schedule);
+mali_scheduler_mask mali_scheduler_activate_pp_job(struct mali_pp_job *job);
-/* Enable or disable scheduler hint. */
-extern mali_bool mali_scheduler_hints[MALI_SCHEDULER_HINT_MAX];
+void mali_scheduler_complete_gp_job(struct mali_gp_job *job,
+ mali_bool success,
+ mali_bool user_notification,
+ mali_bool dequeued);
-MALI_STATIC_INLINE void mali_scheduler_hint_enable(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- mali_scheduler_hints[hint] = MALI_TRUE;
-}
+void mali_scheduler_complete_pp_job(struct mali_pp_job *job,
+ u32 num_cores_in_virtual,
+ mali_bool user_notification,
+ mali_bool dequeued);
-MALI_STATIC_INLINE void mali_scheduler_hint_disable(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- mali_scheduler_hints[hint] = MALI_FALSE;
-}
+void mali_scheduler_abort_session(struct mali_session_data *session);
-MALI_STATIC_INLINE mali_bool mali_scheduler_hint_is_enabled(mali_scheduler_hint hint)
-{
- MALI_DEBUG_ASSERT(hint < MALI_SCHEDULER_HINT_MAX);
- return mali_scheduler_hints[hint];
-}
+#if MALI_STATE_TRACKING
+u32 mali_scheduler_dump_state(char *buf, u32 size);
+#endif
#endif /* __MALI_SCHEDULER_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_SCHEDULER_TYPES_H__
#define MALI_SCHEDULER_MASK_EMPTY 0
#define MALI_SCHEDULER_MASK_ALL (MALI_SCHEDULER_MASK_GP | MALI_SCHEDULER_MASK_PP)
-typedef enum {
- MALI_SCHEDULER_HINT_GP_BOUND = 0
-#define MALI_SCHEDULER_HINT_MAX 1
-} mali_scheduler_hint;
-
#endif /* __MALI_SCHEDULER_TYPES_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_osk.h"
#include "mali_osk_list.h"
#include "mali_session.h"
+#include "mali_ukk.h"
_MALI_OSK_LIST_HEAD(mali_sessions);
static u32 mali_session_count = 0;
-_mali_osk_spinlock_irq_t *mali_sessions_lock;
+_mali_osk_spinlock_irq_t *mali_sessions_lock = NULL;
_mali_osk_errcode_t mali_session_initialize(void)
{
_MALI_OSK_INIT_LIST_HEAD(&mali_sessions);
- mali_sessions_lock = _mali_osk_spinlock_irq_init(_MALI_OSK_LOCKFLAG_ORDERED, _MALI_OSK_LOCK_ORDER_SESSIONS);
-
- if (NULL == mali_sessions_lock) return _MALI_OSK_ERR_NOMEM;
+ mali_sessions_lock = _mali_osk_spinlock_irq_init(
+ _MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_SESSIONS);
+ if (NULL == mali_sessions_lock) {
+ return _MALI_OSK_ERR_NOMEM;
+ }
return _MALI_OSK_ERR_OK;
}
void mali_session_terminate(void)
{
- _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ if (NULL != mali_sessions_lock) {
+ _mali_osk_spinlock_irq_term(mali_sessions_lock);
+ mali_sessions_lock = NULL;
+ }
}
void mali_session_add(struct mali_session_data *session)
* Get the max completed window jobs from all active session,
* which will be used in window render frame per sec calculate
*/
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
u32 mali_session_max_window_num(void)
{
struct mali_session_data *session, *tmp;
mali_session_lock();
MALI_SESSION_FOREACH(session, tmp, link) {
- tmp_number = _mali_osk_atomic_xchg(&session->number_of_window_jobs, 0);
+ tmp_number = _mali_osk_atomic_xchg(
+ &session->number_of_window_jobs, 0);
if (max_window_num < tmp_number) {
max_window_num = tmp_number;
}
return max_window_num;
}
#endif
+
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx)
+{
+ struct mali_session_data *session, *tmp;
+ u32 mali_mem_usage;
+ u32 total_mali_mem_size;
+
+ MALI_DEBUG_ASSERT_POINTER(print_ctx);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ _mali_osk_ctxprintf(print_ctx, " %-25s %-10u %-10u %-15u %-15u %-10u %-10u\n",
+ session->comm, session->pid,
+ session->mali_mem_array[MALI_MEM_OS] + session->mali_mem_array[MALI_MEM_BLOCK], session->max_mali_mem_allocated,
+ session->mali_mem_array[MALI_MEM_EXTERNAL], session->mali_mem_array[MALI_MEM_UMP],
+ session->mali_mem_array[MALI_MEM_DMA_BUF]);
+ }
+ mali_session_unlock();
+ mali_mem_usage = _mali_ukk_report_memory_usage();
+ total_mali_mem_size = _mali_ukk_report_total_memory_size();
+ _mali_osk_ctxprintf(print_ctx, "Mali mem usage: %u\nMali mem limit: %u\n", mali_mem_usage, total_mali_mem_size);
+}
\ No newline at end of file
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_SESSION_H__
#include "mali_kernel_descriptor_mapping.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
+#include "mali_memory_types.h"
struct mali_timeline_system;
struct mali_soft_system;
_MALI_OSK_LIST_HEAD(link); /**< Link for list of all sessions */
_MALI_OSK_LIST_HEAD(pp_job_list); /**< List of all PP jobs on this session */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
_mali_osk_atomic_t number_of_window_jobs; /**< Record the window jobs completed on this session in a period */
#endif
mali_bool is_aborting; /**< MALI_TRUE if the session is aborting, MALI_FALSE if not. */
mali_bool use_high_priority_job_queue; /**< If MALI_TRUE, jobs added from this session will use the high priority job queues. */
+ u32 pid;
+ char *comm;
+ size_t mali_mem_array[MALI_MEM_TYPE_MAX]; /**< The array to record all mali mem types' usage for this session. */
+ size_t max_mali_mem_allocated; /**< The past max mali memory usage for this session. */
};
_mali_osk_errcode_t mali_session_initialize(void);
return session->page_directory;
}
+MALI_STATIC_INLINE void mali_session_memory_lock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_wait(session->memory_lock);
+}
+
+MALI_STATIC_INLINE void mali_session_memory_unlock(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_mutex_signal(session->memory_lock);
+}
+
MALI_STATIC_INLINE void mali_session_send_notification(struct mali_session_data *session, _mali_osk_notification_t *object)
{
_mali_osk_notification_queue_send(session->ioctl_queue, object);
}
+#if defined(CONFIG_MALI_DVFS)
+
+MALI_STATIC_INLINE void mali_session_inc_num_window_jobs(struct mali_session_data *session)
+{
+ MALI_DEBUG_ASSERT_POINTER(session);
+ _mali_osk_atomic_inc(&session->number_of_window_jobs);
+}
+
/*
* Get the max completed window jobs from all active session,
* which will be used in window render frame per sec calculate
*/
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
u32 mali_session_max_window_num(void);
+
#endif
+void mali_session_memory_tracking(_mali_osk_print_ctx *print_ctx);
+
#endif /* __MALI_SESSION_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_soft_job.h"
#include "mali_osk.h"
-#include "mali_osk_mali.h"
#include "mali_timeline.h"
#include "mali_session.h"
#include "mali_kernel_common.h"
#include "mali_uk_types.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
MALI_STATIC_INLINE void mali_soft_job_system_lock(struct mali_soft_job_system *system)
{
MALI_DEBUG_PRINT(4, ("Mali Soft Job: signaling soft job %u (0x%08X)\n", job->id, job));
schedule_mask = mali_timeline_tracker_release(&job->tracker);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
mali_soft_job_destroy(job);
mali_soft_job_system_unlock(job->system);
schedule_mask = mali_timeline_tracker_release(&job->tracker);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
mali_soft_job_destroy(job);
} else {
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_SOFT_JOB_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_spinlock_reentrant.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_SPINLOCK_REENTRANT_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_timeline.h"
#include "mali_kernel_common.h"
-#include "mali_osk_mali.h"
#include "mali_scheduler.h"
#include "mali_soft_job.h"
#include "mali_timeline_fence_wait.h"
#include "mali_timeline_sync_fence.h"
+#include "mali_executor.h"
+#include "mali_pp_job.h"
#define MALI_TIMELINE_SYSTEM_LOCKED(system) (mali_spinlock_reentrant_is_held((system)->spinlock, _mali_osk_get_tid()))
+/*
+ * Following three elements are used to record how many
+ * gp, physical pp or virtual pp jobs are delayed in the whole
+ * timeline system, we can use these three value to decide
+ * if need to deactivate idle group.
+ */
+_mali_osk_atomic_t gp_tracker_count;
+_mali_osk_atomic_t phy_pp_tracker_count;
+_mali_osk_atomic_t virt_pp_tracker_count;
+
static mali_scheduler_mask mali_timeline_system_release_waiter(struct mali_timeline_system *system,
struct mali_timeline_waiter *waiter);
mali_scheduler_mask schedule_mask = MALI_SCHEDULER_MASK_EMPTY;
u32 tid = _mali_osk_get_tid();
mali_bool is_aborting = MALI_FALSE;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
int fence_status = sync_fence->status;
+#else
+ int fence_status = atomic_read(&sync_fence->status);
+#endif
MALI_DEBUG_ASSERT_POINTER(sync_fence);
MALI_DEBUG_ASSERT_POINTER(sync_fence_waiter);
MALI_DEBUG_ASSERT_POINTER(waiter);
tracker->sync_fence = NULL;
+ tracker->fence.sync_fd = -1;
+
schedule_mask |= mali_timeline_system_release_waiter(system, waiter);
/* If aborting, wake up sleepers that are waiting for sync fence callbacks to complete. */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) */
if (!is_aborting) {
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_TRUE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_TRUE);
}
}
#endif /* defined(CONFIG_SYNC) */
mali_spinlock_reentrant_signal(system->spinlock, tid);
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
void mali_timeline_system_stop_timer(struct mali_timeline_system *system)
return NULL;
}
- timeline->sync_tl = mali_sync_timeline_create(timeline_name);
+ timeline->sync_tl = mali_sync_timeline_create(timeline, timeline_name);
if (NULL == timeline->sync_tl) {
mali_timeline_destroy(timeline);
return NULL;
MALI_DEBUG_ASSERT(!mali_timeline_is_empty(timeline));
+ if (MALI_TIMELINE_TRACKER_GP == tracker->type) {
+ _mali_osk_atomic_inc(&gp_tracker_count);
+ } else if (MALI_TIMELINE_TRACKER_PP == tracker->type) {
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_inc(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_inc(&phy_pp_tracker_count);
+ }
+ }
+
/* Add tracker as new head on timeline's tracker list. */
if (NULL == timeline->tracker_head) {
/* Tracker list is empty. */
switch (tracker->type) {
case MALI_TIMELINE_TRACKER_GP:
- schedule_mask = mali_gp_scheduler_activate_job((struct mali_gp_job *) tracker->job);
+ schedule_mask = mali_scheduler_activate_gp_job((struct mali_gp_job *) tracker->job);
+
+ _mali_osk_atomic_dec(&gp_tracker_count);
break;
case MALI_TIMELINE_TRACKER_PP:
- schedule_mask = mali_pp_scheduler_activate_job((struct mali_pp_job *) tracker->job);
+ schedule_mask = mali_scheduler_activate_pp_job((struct mali_pp_job *) tracker->job);
+
+ if (mali_pp_job_is_virtual((struct mali_pp_job *)tracker->job)) {
+ _mali_osk_atomic_dec(&virt_pp_tracker_count);
+ } else {
+ _mali_osk_atomic_dec(&phy_pp_tracker_count);
+ }
break;
case MALI_TIMELINE_TRACKER_SOFT:
timeline = tracker->timeline;
}
#if defined(CONFIG_SYNC)
- system->signaled_sync_tl = mali_sync_timeline_create("mali-always-signaled");
+ system->signaled_sync_tl = mali_sync_timeline_create(NULL, "mali-always-signaled");
if (NULL == system->signaled_sync_tl) {
mali_timeline_system_destroy(system);
return NULL;
ret = sync_fence_wait_async(sync_fence, &tracker->sync_fence_waiter);
if (1 == ret) {
/* Fence already signaled, no waiter needed. */
+ tracker->fence.sync_fd = -1;
goto exit;
} else if (0 != ret) {
MALI_PRINT_ERROR(("Mali Timeline: sync fence fd %d signaled with error %d\n", tracker->fence.sync_fd, ret));
}
#endif /* defined(CONFIG_SYNC) */
- mali_scheduler_schedule_from_mask(schedule_mask, MALI_FALSE);
+ mali_executor_schedule_from_mask(schedule_mask, MALI_FALSE);
}
mali_timeline_point mali_timeline_system_add_tracker(struct mali_timeline_system *system,
return point;
}
+void mali_timeline_initialize(void)
+{
+ _mali_osk_atomic_init(&gp_tracker_count, 0);
+ _mali_osk_atomic_init(&phy_pp_tracker_count, 0);
+ _mali_osk_atomic_init(&virt_pp_tracker_count, 0);
+}
+
+void mali_timeline_terminate(void)
+{
+ _mali_osk_atomic_term(&gp_tracker_count);
+ _mali_osk_atomic_term(&phy_pp_tracker_count);
+ _mali_osk_atomic_term(&virt_pp_tracker_count);
+}
+
#if defined(MALI_TIMELINE_DEBUG_FUNCTIONS)
static mali_bool is_waiting_on_timeline(struct mali_timeline_tracker *tracker, enum mali_timeline_id id)
MALI_DEBUG_ASSERT_POINTER(timeline->system);
system = timeline->system;
- if (MALI_TIMELINE_MAX > id ) {
- if(MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
+ if (MALI_TIMELINE_MAX > id) {
+ if (MALI_TIMELINE_NO_POINT != tracker->fence.points[id]) {
return mali_timeline_is_point_on(system->timelines[id], tracker->fence.points[id]);
} else {
return MALI_FALSE;
state_char = *(tracker_state + mali_timeline_debug_get_tracker_state(tracker));
_mali_osk_snprintf(tracker_type, sizeof(tracker_type), "%s", timeline_tracker_type_to_string(tracker->type));
- if (0 != tracker->trigger_ref_count) {
#if defined(CONFIG_SYNC)
- _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u, fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
- tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u), fd:%d, fence:(0x%08X)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
} else {
_mali_osk_ctxprintf(print_ctx, "TL: %s %u %c fd:%d fence:(0x%08X) job:(0x%08X)\n",
- tracker_type, tracker->point, state_char,
- tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
+ tracker_type, tracker->point, state_char,
+ tracker->fence.sync_fd, tracker->sync_fence, tracker->job);
}
#else
- _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s%u,%s%u,%s%u] job:(0x%08X)\n",
- tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
- is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "W" : " ", tracker->fence.points[0],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "W" : " ", tracker->fence.points[1],
- is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "W" : " ", tracker->fence.points[2],
- tracker->job);
+ if (0 != tracker->trigger_ref_count) {
+ _mali_osk_ctxprintf(print_ctx, "TL: %s %u %c - ref_wait:%u [%s(%u),%s(%u),%s(%u)] job:(0x%08X)\n",
+ tracker_type, tracker->point, state_char, tracker->trigger_ref_count,
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_GP) ? "WaitGP" : " ", tracker->fence.points[0],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_PP) ? "WaitPP" : " ", tracker->fence.points[1],
+ is_waiting_on_timeline(tracker, MALI_TIMELINE_SOFT) ? "WaitSOFT" : " ", tracker->fence.points[2],
+ tracker->job);
} else {
_mali_osk_ctxprintf(print_ctx, "TL: %s %u %c job:(0x%08X)\n",
- tracker_type, tracker->point, state_char,
- tracker->job);
+ tracker_type, tracker->point, state_char,
+ tracker->job);
}
#endif
}
if (NULL == timeline->tracker_head) continue;
_mali_osk_ctxprintf(print_ctx, "TL: Timeline %s:\n",
- timeline_id_to_string((enum mali_timeline_id)i));
+ timeline_id_to_string((enum mali_timeline_id)i));
mali_timeline_debug_print_timeline(timeline, print_ctx);
num_printed++;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_TIMELINE_H__
* Soft jobs have to be signaled as complete after activation. Normally this is done by user space,
* but in order to guarantee that every soft job is completed, we also have a timer.
*/
-#define MALI_TIMELINE_TIMEOUT_HZ ((u32) (HZ * 3 / 2)) /* 1500 ms. */
+#define MALI_TIMELINE_TIMEOUT_HZ ((unsigned long) (HZ * 3 / 2)) /* 1500 ms. */
/**
* Timeline type.
void *job; /**< Owner of tracker. */
/* The following fields are used to time out soft job trackers. */
- u32 os_tick_create;
- u32 os_tick_activate;
+ unsigned long os_tick_create;
+ unsigned long os_tick_activate;
mali_bool timer_active;
};
+extern _mali_osk_atomic_t gp_tracker_count;
+extern _mali_osk_atomic_t phy_pp_tracker_count;
+extern _mali_osk_atomic_t virt_pp_tracker_count;
+
/**
* What follows is a set of functions to check the state of a timeline and to determine where on a
* timeline a given point is. Most of these checks will translate the timeline so the oldest point
*/
mali_scheduler_mask mali_timeline_tracker_release(struct mali_timeline_tracker *tracker);
+MALI_STATIC_INLINE mali_bool mali_timeline_tracker_activation_error(
+ struct mali_timeline_tracker *tracker)
+{
+ MALI_DEBUG_ASSERT_POINTER(tracker);
+ return (MALI_TIMELINE_ACTIVATION_ERROR_FATAL_BIT &
+ tracker->activation_error) ? MALI_TRUE : MALI_FALSE;
+}
+
/**
* Copy data from a UK fence to a Timeline fence.
*
*/
void mali_timeline_fence_copy_uk_fence(struct mali_timeline_fence *fence, _mali_uk_fence_t *uk_fence);
+void mali_timeline_initialize(void);
+
+void mali_timeline_terminate(void);
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_gp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&gp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_physical_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&phy_pp_tracker_count);
+}
+
+MALI_STATIC_INLINE mali_bool mali_timeline_has_virtual_pp_job(void)
+{
+ return 0 < _mali_osk_atomic_read(&virt_pp_tracker_count);
+}
+
#if defined(DEBUG)
#define MALI_TIMELINE_DEBUG_FUNCTIONS
#endif /* DEBUG */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_timeline_fence_wait.h"
if (-1 != fence->sync_fd) {
sync_fence = sync_fence_fdget(fence->sync_fd);
if (likely(NULL != sync_fence)) {
- if (0 == sync_fence->status) {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
+ if( 0 == sync_fence->status) {
+#else
+ if (0 == atomic_read(&sync_fence->status)) {
+#endif
ret = MALI_FALSE;
}
} else {
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_timeline_sync_fence.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
u32 _mali_ukk_report_memory_usage(void);
+u32 _mali_ukk_report_total_memory_size(void);
+
u32 _mali_ukk_utilization_gp_pp(void);
u32 _mali_ukk_utilization_gp(void);
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_USER_SETTINGS_DB_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#define MALI_GPU_NAME_UTGARD "mali-utgard"
-/* Mali-200 */
-#define MALI_GPU_RESOURCES_MALI200(base_addr, gp_irq, pp_irq, mmu_irq) \
- MALI_GPU_RESOURCE_PP(base_addr + 0x0000, pp_irq) \
- MALI_GPU_RESOURCE_GP(base_addr + 0x2000, gp_irq) \
- MALI_GPU_RESOURCE_MMU(base_addr + 0x3000, mmu_irq)
+#define MALI_OFFSET_GP 0x00000
+#define MALI_OFFSET_GP_MMU 0x03000
+
+#define MALI_OFFSET_PP0 0x08000
+#define MALI_OFFSET_PP0_MMU 0x04000
+#define MALI_OFFSET_PP1 0x0A000
+#define MALI_OFFSET_PP1_MMU 0x05000
+#define MALI_OFFSET_PP2 0x0C000
+#define MALI_OFFSET_PP2_MMU 0x06000
+#define MALI_OFFSET_PP3 0x0E000
+#define MALI_OFFSET_PP3_MMU 0x07000
+
+#define MALI_OFFSET_PP4 0x28000
+#define MALI_OFFSET_PP4_MMU 0x1C000
+#define MALI_OFFSET_PP5 0x2A000
+#define MALI_OFFSET_PP5_MMU 0x1D000
+#define MALI_OFFSET_PP6 0x2C000
+#define MALI_OFFSET_PP6_MMU 0x1E000
+#define MALI_OFFSET_PP7 0x2E000
+#define MALI_OFFSET_PP7_MMU 0x1F000
+
+#define MALI_OFFSET_L2_RESOURCE0 0x01000
+#define MALI_OFFSET_L2_RESOURCE1 0x10000
+#define MALI_OFFSET_L2_RESOURCE2 0x11000
+
+#define MALI400_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE0 MALI_OFFSET_L2_RESOURCE1
+#define MALI450_OFFSET_L2_CACHE1 MALI_OFFSET_L2_RESOURCE0
+#define MALI450_OFFSET_L2_CACHE2 MALI_OFFSET_L2_RESOURCE2
+
+#define MALI_OFFSET_BCAST 0x13000
+#define MALI_OFFSET_DLBU 0x14000
+
+#define MALI_OFFSET_PP_BCAST 0x16000
+#define MALI_OFFSET_PP_BCAST_MMU 0x15000
+
+#define MALI_OFFSET_PMU 0x02000
+#define MALI_OFFSET_DMA 0x12000
/* Mali-300 */
/* Mali-400 */
#define MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP1_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP1(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU)
#define MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x1000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x0000, gp_irq, base_addr + 0x3000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x8000, pp0_irq, base_addr + 0x4000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0xA000, pp1_irq, base_addr + 0x5000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0xC000, pp2_irq, base_addr + 0x6000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0xE000, pp3_irq, base_addr + 0x7000, pp3_mmu_irq)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI400_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq)
#define MALI_GPU_RESOURCES_MALI400_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
MALI_GPU_RESOURCES_MALI400_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000)
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
/* Mali-450 */
#define MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP2_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP2(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU)
#define MALI_GPU_RESOURCES_MALI450_MP3_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP3(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP4_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP4(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x28000, pp3_irq, base_addr + 0x1C000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x2A000, pp4_irq, base_addr + 0x1D000, pp4_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2C000, pp5_irq, base_addr + 0x1E000, pp5_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP4, pp3_irq, base_addr + MALI_OFFSET_PP4_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP5, pp4_irq, base_addr + MALI_OFFSET_PP5_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP6, pp5_irq, base_addr + MALI_OFFSET_PP6_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP6_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP6(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x10000) \
- MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + 0x00000, gp_irq, base_addr + 0x03000, gp_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x01000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + 0x08000, pp0_irq, base_addr + 0x04000, pp0_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + 0x0A000, pp1_irq, base_addr + 0x05000, pp1_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + 0x0C000, pp2_irq, base_addr + 0x06000, pp2_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + 0x0E000, pp3_irq, base_addr + 0x07000, pp3_mmu_irq) \
- MALI_GPU_RESOURCE_L2(base_addr + 0x11000) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + 0x28000, pp4_irq, base_addr + 0x1C000, pp4_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + 0x2A000, pp5_irq, base_addr + 0x1D000, pp5_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + 0x2C000, pp6_irq, base_addr + 0x1E000, pp6_mmu_irq) \
- MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + 0x2E000, pp7_irq, base_addr + 0x1F000, pp7_mmu_irq) \
- MALI_GPU_RESOURCE_BCAST(base_addr + 0x13000) \
- MALI_GPU_RESOURCE_DLBU(base_addr + 0x14000) \
- MALI_GPU_RESOURCE_PP_BCAST(base_addr + 0x16000, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + 0x15000) \
- MALI_GPU_RESOURCE_DMA(base_addr + 0x12000)
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE0) \
+ MALI_GPU_RESOURCE_GP_WITH_MMU(base_addr + MALI_OFFSET_GP, gp_irq, base_addr + MALI_OFFSET_GP_MMU, gp_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE1) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(0, base_addr + MALI_OFFSET_PP0, pp0_irq, base_addr + MALI_OFFSET_PP0_MMU, pp0_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(1, base_addr + MALI_OFFSET_PP1, pp1_irq, base_addr + MALI_OFFSET_PP1_MMU, pp1_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(2, base_addr + MALI_OFFSET_PP2, pp2_irq, base_addr + MALI_OFFSET_PP2_MMU, pp2_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(3, base_addr + MALI_OFFSET_PP3, pp3_irq, base_addr + MALI_OFFSET_PP3_MMU, pp3_mmu_irq) \
+ MALI_GPU_RESOURCE_L2(base_addr + MALI450_OFFSET_L2_CACHE2) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(4, base_addr + MALI_OFFSET_PP4, pp4_irq, base_addr + MALI_OFFSET_PP4_MMU, pp4_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(5, base_addr + MALI_OFFSET_PP5, pp5_irq, base_addr + MALI_OFFSET_PP5_MMU, pp5_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(6, base_addr + MALI_OFFSET_PP6, pp6_irq, base_addr + MALI_OFFSET_PP6_MMU, pp6_mmu_irq) \
+ MALI_GPU_RESOURCE_PP_WITH_MMU(7, base_addr + MALI_OFFSET_PP7, pp7_irq, base_addr + MALI_OFFSET_PP7_MMU, pp7_mmu_irq) \
+ MALI_GPU_RESOURCE_BCAST(base_addr + MALI_OFFSET_BCAST) \
+ MALI_GPU_RESOURCE_DLBU(base_addr + MALI_OFFSET_DLBU) \
+ MALI_GPU_RESOURCE_PP_BCAST(base_addr + MALI_OFFSET_PP_BCAST, pp_bcast_irq) \
+ MALI_GPU_RESOURCE_PP_MMU_BCAST(base_addr + MALI_OFFSET_PP_BCAST_MMU) \
+ MALI_GPU_RESOURCE_DMA(base_addr + MALI_OFFSET_DMA)
#define MALI_GPU_RESOURCES_MALI450_MP8_PMU(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
MALI_GPU_RESOURCES_MALI450_MP8(base_addr, gp_irq, gp_mmu_irq, pp0_irq, pp0_mmu_irq, pp1_irq, pp1_mmu_irq, pp2_irq, pp2_mmu_irq, pp3_irq, pp3_mmu_irq, pp4_irq, pp4_mmu_irq, pp5_irq, pp5_mmu_irq, pp6_irq, pp6_mmu_irq, pp7_irq, pp7_mmu_irq, pp_bcast_irq) \
- MALI_GPU_RESOURCE_PMU(base_addr + 0x2000) \
-
+ MALI_GPU_RESOURCE_PMU(base_addr + MALI_OFFSET_PMU) \
+
#define MALI_GPU_RESOURCE_L2(addr) \
{ \
.name = "Mali_L2", \
.start = pp_irq, \
.end = pp_irq, \
}, \
-
+
#define MALI_GPU_RESOURCE_PP_MMU_BCAST(pp_mmu_bcast_addr) \
{ \
.name = "Mali_PP_MMU_Broadcast", \
unsigned int utilization_gpu; /* Utilization for GP and all PP cores combined, 0 = no utilization, 256 = full utilization */
unsigned int utilization_gp; /* Utilization for GP core only, 0 = no utilization, 256 = full utilization */
unsigned int utilization_pp; /* Utilization for all PP cores combined, 0 = no utilization, 256 = full utilization */
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
- unsigned int number_of_window_jobs;
- unsigned int number_of_window_jobs_under_pressure;
-#endif
};
-struct mali_gpu_device_data {
- /* Dedicated GPU memory range (physical). */
- unsigned long dedicated_mem_start;
- unsigned long dedicated_mem_size;
+struct mali_gpu_clk_item {
+ unsigned int clock; /* unit(MHz) */
+ unsigned int vol;
+};
+struct mali_gpu_clock {
+ struct mali_gpu_clk_item *item;
+ unsigned int num_of_steps;
+};
+
+struct mali_gpu_device_data {
/* Shared GPU memory */
unsigned long shared_mem_size;
- /* Frame buffer memory to be accessible by Mali GPU (physical) */
- unsigned long fb_start;
- unsigned long fb_size;
-
- /* Max runtime [ms] for jobs */
- int max_job_runtime;
-
- /* Report GPU utilization in this interval (specified in ms) */
- unsigned long utilization_interval;
-
- /* Function that will receive periodic GPU utilization numbers */
- void (*utilization_callback)(struct mali_gpu_utilization_data *data);
-
/*
* Mali PMU switch delay.
* Only needed if the power gates are connected to the PMU in a high fanout
*/
u32 pmu_switch_delay;
-
/* Mali Dynamic power domain configuration in sequence from 0-11
* GP PP0 PP1 PP2 PP3 PP4 PP5 PP6 PP7, L2$0 L2$1 L2$2
*/
u16 pmu_domain_config[12];
- /* Fuction that platform callback for freq tunning, needed when POWER_PERFORMANCE_POLICY enabled*/
- int (*set_freq_callback)(unsigned int mhz);
-};
+ /* Dedicated GPU memory range (physical). */
+ unsigned long dedicated_mem_start;
+ unsigned long dedicated_mem_size;
-/** @brief MALI GPU power down using MALI in-built PMU
- *
- * called to power down all cores
- */
-int mali_pmu_powerdown(void);
+ /* Frame buffer memory to be accessible by Mali GPU (physical) */
+ unsigned long fb_start;
+ unsigned long fb_size;
+
+ /* Max runtime [ms] for jobs */
+ int max_job_runtime;
+ /* Report GPU utilization and related control in this interval (specified in ms) */
+ unsigned long control_interval;
-/** @brief MALI GPU power up using MALI in-built PMU
- *
- * called to power up all cores
- */
-int mali_pmu_powerup(void);
+ /* Function that will receive periodic GPU utilization numbers */
+ void (*utilization_callback)(struct mali_gpu_utilization_data *data);
+
+ /* Fuction that platform callback for freq setting, needed when CONFIG_MALI_DVFS enabled */
+ int (*set_freq)(int setting_clock_step);
+ /* Function that platfrom report it's clock info which driver can set, needed when CONFIG_MALI_DVFS enabled */
+ void (*get_clock_info)(struct mali_gpu_clock **data);
+ /* Function that get the current clock info, needed when CONFIG_MALI_DVFS enabled */
+ int (*get_freq)(void);
+};
/**
* Pause the scheduling and power state changes of Mali device driver.
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _MALI_UTGARD_COUNTERS_H_
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_UTGARD_IOCTL_H__
#define MALI_IOC_WAIT_FOR_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_WAIT_FOR_NOTIFICATION, _mali_uk_wait_for_notification_s)
#define MALI_IOC_GET_API_VERSION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, u32)
-#define MALI_IOC_GET_MALI_VERSION_IN_RK30 _IOWR(MALI_IOC_CORE_BASE,_MALI_UK_GET_MALI_VERSION_IN_RK30,_mali_uk_get_mali_version_in_rk30_s *)
#define MALI_IOC_GET_API_VERSION_V2 _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_API_VERSION, _mali_uk_get_api_version_v2_s)
#define MALI_IOC_POST_NOTIFICATION _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_POST_NOTIFICATION, _mali_uk_post_notification_s)
#define MALI_IOC_GET_USER_SETTING _IOWR(MALI_IOC_CORE_BASE, _MALI_UK_GET_USER_SETTING, _mali_uk_get_user_setting_s)
#define MALI_IOC_VSYNC_EVENT_REPORT _IOW (MALI_IOC_VSYNC_BASE, _MALI_UK_VSYNC_EVENT_REPORT, _mali_uk_vsync_event_report_s)
+#define MALI_IOC_GET_MALI_VERSION_IN_RK30 _IOWR(MALI_IOC_CORE_BASE,_MALI_UK_GET_MALI_VERSION_IN_RK30,_mali_uk_get_mali_version_in_rk30_s *)
+
#ifdef __cplusplus
}
#endif
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _MALI_UTGARD_PROFILING_EVENTS_H_
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_MALI_FENCE_DUP = 43,
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_FLUSH_SERVER_WAITS = 44,
MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_EGL_WAIT_SYNC = 45, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_JOBS_WAIT = 46, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOFRAMES_WAIT = 47, /* USED */
+ MALI_PROFILING_EVENT_REASON_SUSPEND_RESUME_SW_FB_NOJOBS_WAIT = 48, /* USED */
} cinstr_profiling_event_reason_suspend_resume_sw_t;
/**
MALI_PROFILING_EVENT_DATA_CORE_PP5 = 10,
MALI_PROFILING_EVENT_DATA_CORE_PP6 = 11,
MALI_PROFILING_EVENT_DATA_CORE_PP7 = 12,
+ MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU = 22, /* GP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU = 26, /* PP0 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP1_MMU = 27, /* PP1 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP2_MMU = 28, /* PP2 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP3_MMU = 29, /* PP3 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP4_MMU = 30, /* PP4 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP5_MMU = 31, /* PP5 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP6_MMU = 32, /* PP6 + 21 */
+ MALI_PROFILING_EVENT_DATA_CORE_PP7_MMU = 33, /* PP7 + 21 */
+
} cinstr_profiling_event_data_core_t;
#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_GP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_GP0_MMU + (num))
#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0 + (num))
+#define MALI_PROFILING_MAKE_EVENT_DATA_CORE_PP_MMU(num) (MALI_PROFILING_EVENT_DATA_CORE_PP0_MMU + (num))
#endif /*_MALI_UTGARD_PROFILING_EVENTS_H_*/
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_UTGARD_PROFILING_GATOR_API_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
_MALI_UK_SOFT_JOB_START, /**< _mali_ukk_soft_job_start() */
_MALI_UK_SOFT_JOB_SIGNAL, /**< _mali_ukk_soft_job_signal() */
_MALI_UK_GET_MALI_VERSION_IN_RK30,
+
/** Memory functions */
_MALI_UK_INIT_MEM = 0, /**< _mali_ukk_init_mem() */
/** @brief Determine whether two 32-bit encoded version IDs match */
#define _IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
-
/**
* RK MALI version code
*/
* interface is compatible with the kernel-side interface, since future versions
* of the interface may be backwards compatible.
*/
-typedef struct
-{
- u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
- _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
-} _mali_uk_get_mali_version_in_rk30_s;
typedef struct {
u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
_mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
int compatible; /**< [out] @c 1 when @version is compatible, @c 0 otherwise */
} _mali_uk_get_api_version_v2_s;
+typedef struct
+{
+ u64 ctx; /**< [in,out] user-kernel context (trashed on output) */
+ _mali_uk_api_version version; /**< [in,out] API version of user-side interface. */
+} _mali_uk_get_mali_version_in_rk30_s;
+
/** @} */ /* end group _mali_uk_getapiversion_s */
/** @defgroup _mali_uk_get_user_settings_s Get user space settings */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010, 2012 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#define __MALI_KERNEL_LICENSE_H__
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
#define MALI_KERNEL_LINUX_LICENSE "GPL"
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include <linux/module.h>
#include <linux/mali/mali_utgard.h>
-#include "mali_gp_scheduler.h"
-#include "mali_pp_scheduler.h"
+#include "mali_pm.h"
void mali_dev_pause(void)
{
- mali_gp_scheduler_suspend();
- mali_pp_scheduler_suspend();
- mali_group_power_off(MALI_FALSE);
- mali_l2_cache_pause_all(MALI_TRUE);
+ /*
+ * Deactive all groups to prevent hardware being touched
+ * during the period of mali device pausing
+ */
+ mali_pm_os_suspend(MALI_FALSE);
}
EXPORT_SYMBOL(mali_dev_pause);
void mali_dev_resume(void)
{
- mali_l2_cache_pause_all(MALI_FALSE);
- mali_gp_scheduler_resume();
- mali_pp_scheduler_resume();
+ mali_pm_os_resume();
}
EXPORT_SYMBOL(mali_dev_resume);
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+
/**
* @file mali_kernel_linux.c
* Implementation of the Linux device driver entrypoints
*/
+#include "../platform/rk30/custom_log.h"
+
#include <linux/module.h> /* kernel module definitions */
#include <linux/fs.h> /* file system operations */
#include <linux/cdev.h> /* character device definitions */
#include <linux/platform_device.h>
#include <linux/miscdevice.h>
#include <linux/bug.h>
+#include <linux/of.h>
+
#include <linux/mali/mali_utgard.h>
#include "mali_kernel_common.h"
#include "mali_session.h"
#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
#include "mali_profiling_internal.h"
#endif
-#include <linux/of.h>
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+#include "mali_osk_profiling.h"
+#include "mali_dvfs_policy.h"
+static int is_first_resume = 1;
+/*Store the clk and vol for boot/insmod and mali_resume*/
+static struct mali_gpu_clk_item mali_gpu_clk[2];
+#endif
/* Streamline support for the Mali driver */
#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_MALI400_PROFILING)
module_param(mali_dedicated_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_dedicated_mem_size, "Size of dedicated Mali GPU memory.");
-extern unsigned long mali_shared_mem_size;
-module_param(mali_shared_mem_size, ulong, S_IRUSR | S_IRGRP | S_IROTH);
+extern unsigned int mali_shared_mem_size;
+module_param(mali_shared_mem_size, uint, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_shared_mem_size, "Size of shared Mali GPU memory.");
#if defined(CONFIG_MALI400_PROFILING)
module_param(mali_max_pp_cores_group_2, int, S_IRUSR | S_IRGRP | S_IROTH);
MODULE_PARM_DESC(mali_max_pp_cores_group_2, "Limit the number of PP cores to use from second PP group (Mali-450 only).");
-#if defined(CONFIG_MALI400_POWER_PERFORMANCE_POLICY)
+#if defined(CONFIG_MALI_DVFS)
/** the max fps the same as display vsync default 60, can set by module insert parameter */
extern int mali_max_system_fps;
module_param(mali_max_system_fps, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH);
#endif
#if defined(MALI_FAKE_PLATFORM_DEVICE)
-extern int mali_platform_device_register(struct platform_device *pdev);
-extern int mali_platform_device_unregister(struct platform_device *pdev);
+#if defined(CONFIG_MALI_DT)
+extern int mali_platform_device_init(struct platform_device *device);
+extern int mali_platform_device_deinit(struct platform_device *device);
+#else
+extern int mali_platform_device_register(void);
+extern int mali_platform_device_unregister(void);
+#endif
#endif
/* Linux power management operations provided by the Mali device driver */
};
#endif
-#ifdef CONFIG_OF
-static const struct of_device_id mali_dt_ids[] = {
- { .compatible = "arm,mali400" },
- { /* sentinel */ }
+#ifdef CONFIG_MALI_DT
+static struct of_device_id base_dt_ids[] = {
+ {.compatible = "arm,mali-300"},
+ /*-------------------------------------------------------*/
+ /* rk_ext : to use dts_for_mali_ko_befor_r5p0-01rel0. */
+ // {.compatible = "arm,mali-400"},
+ {.compatible = "arm,mali400"},
+ /*-------------------------------------------------------*/
+ {.compatible = "arm,mali-450"},
+ {.compatible = "arm,mali-utgard"},
+ {},
};
-MODULE_DEVICE_TABLE(of, mali_dt_ids);
+
+MODULE_DEVICE_TABLE(of, base_dt_ids);
#endif
/* The Mali device driver struct */
.bus = &platform_bus_type,
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29))
.pm = &mali_dev_pm_ops,
- .of_match_table = of_match_ptr(mali_dt_ids),
+#endif
+#ifdef CONFIG_MALI_DT
+ .of_match_table = of_match_ptr(base_dt_ids),
#endif
},
};
.mmap = mali_mmap
};
-
#if MALI_ENABLE_CPU_CYCLES
void mali_init_cpu_time_counters(int reset, int enable_divide_by_64)
{
}
#endif
+/** rk_ext : version of rk_ext on mali_ko, aka. rk_ko_ver. */
+#define ROCKCHIP_VERSION (1)
int mali_module_init(void)
{
MALI_DEBUG_PRINT(2, ("Inserting Mali v%d device driver. \n", _MALI_API_VERSION));
MALI_DEBUG_PRINT(2, ("Compiled: %s, time: %s.\n", __DATE__, __TIME__));
MALI_DEBUG_PRINT(2, ("Driver revision: %s\n", SVN_REV_STRING));
+
+ I("arm_release_ver of this mali_ko is '%s', rk_ko_ver is '%d', built at '%s', on '%s'.",
+ SVN_REV_STRING,
+ ROCKCHIP_VERSION,
+ __TIME__,
+ __DATE__);
#if MALI_ENABLE_CPU_CYCLES
mali_init_cpu_time_counters_on_all_cpus(0);
mali_init_cpu_time_counters_on_all_cpus(1);
#endif
- MALI_PRINT(("mali_module_init() registering driver\n"));
+ /* Initialize module wide settings */
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
+ err = mali_platform_device_register();
+ if (0 != err) {
+ return err;
+ }
+#endif
+#endif
+
+ MALI_DEBUG_PRINT(2, ("mali_module_init() registering driver\n"));
err = platform_driver_register(&mali_platform_driver);
if (0 != err) {
- MALI_PRINT(("mali_module_init() Failed to register driver (%d)\n", err));
+ MALI_DEBUG_PRINT(2, ("mali_module_init() Failed to register driver (%d)\n", err));
+#ifdef MALI_FAKE_PLATFORM_DEVICE
+#ifndef CONFIG_MALI_DT
+ mali_platform_device_unregister();
+#endif
+#endif
mali_platform_device = NULL;
return err;
}
}
#endif
+ /* Tracing the current frequency and voltage from boot/insmod*/
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item(),to record current clk info.*/
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[0]);
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[0].clock,
+ mali_gpu_clk[0].vol / 1000,
+ 0, 0, 0);
+#endif
+
MALI_PRINT(("Mali device driver loaded\n"));
return 0; /* Success */
MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering driver\n"));
-#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
- _mali_internal_profiling_term();
-#endif
-
platform_driver_unregister(&mali_platform_driver);
#if defined(MALI_FAKE_PLATFORM_DEVICE)
+#ifndef CONFIG_MALI_DT
MALI_DEBUG_PRINT(2, ("mali_module_exit() unregistering device\n"));
- mali_platform_device_unregister(mali_platform_device);
+ mali_platform_device_unregister();
+#endif
+#endif
+
+ /* Tracing the current frequency and voltage from rmmod*/
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+#if defined(CONFIG_MALI400_INTERNAL_PROFILING)
+ _mali_internal_profiling_term();
#endif
MALI_PRINT(("Mali device driver unloaded\n"));
mali_platform_device = pdev;
- pr_info("mali_platform_device->num_resources = %d\n",mali_platform_device->num_resources);
-
+ dev_info(&pdev->dev, "mali_platform_device->num_resources = %d\n",
+ mali_platform_device->num_resources);
+
{
int i = 0;
- for(i=0;i<mali_platform_device->num_resources;i++)
- {
- pr_info("mali_platform_device->resource[%d].start = 0x%08x\n",i,mali_platform_device->resource[i].start);
- }
+
+ for(i = 0; i < mali_platform_device->num_resources; i++)
+ dev_info(&pdev->dev, "mali_platform_device->resource[%d].start = 0x%08x\n",
+ i, mali_platform_device->resource[i].start);
}
-
-
-#if defined(MALI_FAKE_PLATFORM_DEVICE)
- MALI_DEBUG_PRINT(2, ("mali_module_init() registering device\n"));
- err = mali_platform_device_register(mali_platform_device);
- if (0 != err)
- {
- mali_platform_device_unregister(mali_platform_device);
- return err;
+
+#ifdef CONFIG_MALI_DT
+ /* If we use DT to initialize our DDK, we have to prepare somethings. */
+ err = mali_platform_device_init(mali_platform_device);
+ if (0 != err) {
+ MALI_PRINT_ERROR(("mali_probe(): Failed to initialize platform device."));
+ return -EFAULT;
}
#endif
if (0 == err) {
/* Setup sysfs entries */
err = mali_sysfs_register(mali_dev_name);
+
if (0 == err) {
MALI_DEBUG_PRINT(2, ("mali_probe(): Successfully initialized driver for platform device %s\n", pdev->name));
+
return 0;
} else {
MALI_PRINT_ERROR(("mali_probe(): failed to register sysfs entries"));
mali_miscdevice_unregister();
mali_terminate_subsystems();
_mali_osk_wq_term();
+#ifdef CONFIG_MALI_DT
+ mali_platform_device_deinit(mali_platform_device);
+#endif
mali_platform_device = NULL;
return 0;
}
static int mali_driver_suspend_scheduler(struct device *dev)
{
- mali_pm_os_suspend();
+ mali_pm_os_suspend(MALI_TRUE);
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
return 0;
}
static int mali_driver_resume_scheduler(struct device *dev)
{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
mali_pm_os_resume();
return 0;
}
#ifdef CONFIG_PM_RUNTIME
static int mali_driver_runtime_suspend(struct device *dev)
{
- mali_pm_runtime_suspend();
- return 0;
+ if (MALI_TRUE == mali_pm_runtime_suspend()) {
+ /* Tracing the frequency and voltage after mali is suspended */
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ 0,
+ 0,
+ 0, 0, 0);
+
+ return 0;
+ } else {
+ return -EBUSY;
+ }
}
static int mali_driver_runtime_resume(struct device *dev)
{
+ /* Tracing the frequency and voltage after mali is resumed */
+#if defined(CONFIG_MALI400_PROFILING) && defined(CONFIG_MALI_DVFS)
+ /* Just call mali_get_current_gpu_clk_item() once,to record current clk info.*/
+ if (is_first_resume == 1) {
+ mali_get_current_gpu_clk_item(&mali_gpu_clk[1]);
+ is_first_resume = 0;
+ }
+ _mali_osk_profiling_add_event(MALI_PROFILING_EVENT_TYPE_SINGLE |
+ MALI_PROFILING_EVENT_CHANNEL_GPU |
+ MALI_PROFILING_EVENT_REASON_SINGLE_GPU_FREQ_VOLT_CHANGE,
+ mali_gpu_clk[1].clock,
+ mali_gpu_clk[1].vol / 1000,
+ 0, 0, 0);
+#endif
+
mali_pm_runtime_resume();
return 0;
}
break;
case MALI_IOC_GET_MALI_VERSION_IN_RK30:
- err = get_mali_version_in_rk30_wrapper(session_data,(_mali_uk_get_mali_version_in_rk30_s __user *)arg);
+ err = get_mali_version_in_rk30_wrapper(session_data, (_mali_uk_get_mali_version_in_rk30_s __user *)arg);
break;
-
+
case MALI_IOC_GET_USER_SETTINGS:
BUILD_BUG_ON(!IS_ALIGNED(sizeof(_mali_uk_get_user_settings_s), sizeof(u64)));
err = get_user_settings_wrapper(session_data, (_mali_uk_get_user_settings_s __user *)arg);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_LINUX_H__
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_profiling_internal.h"
#include "mali_gp_job.h"
#include "mali_pp_job.h"
-#include "mali_pp_scheduler.h"
-#include "mali_session.h"
+#include "mali_executor.h"
#define PRIVATE_DATA_COUNTER_MAKE_GP(src) (src)
#define PRIVATE_DATA_COUNTER_MAKE_PP(src) ((1 << 24) | src)
group = (struct mali_group *)filp->private_data;
MALI_DEBUG_ASSERT_POINTER(group);
- r = snprintf(buffer, 64, "%u\n", mali_group_is_enabled(group) ? 1 : 0);
+ r = snprintf(buffer, 64, "%u\n",
+ mali_executor_group_is_disabled(group) ? 0 : 1);
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
}
buffer[count] = '\0';
- r = strict_strtoul(&buffer[0], 10, &val);
+ r = kstrtoul(&buffer[0], 10, &val);
if (0 != r) {
return -EINVAL;
}
switch (val) {
case 1:
- mali_group_enable(group);
+ mali_executor_group_enable(group);
break;
case 0:
- mali_group_disable(group);
+ mali_executor_group_disable(group);
break;
default:
return -EINVAL;
hw_core = (struct mali_hw_core *)filp->private_data;
MALI_DEBUG_ASSERT_POINTER(hw_core);
- r = snprintf(buffer, 64, "0x%08X\n", hw_core->phys_addr);
+ r = snprintf(buffer, 64, "0x%lX\n", hw_core->phys_addr);
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
val = MALI_HW_CORE_NO_COUNTER;
}
- if (0 == src_id) {
- mali_l2_cache_core_set_counter_src0(l2_core, (u32)val);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_core, (u32)val);
- }
+ mali_l2_cache_core_set_counter_src(l2_core, src_id, (u32)val);
*ppos += cnt;
return cnt;
buf[cnt] = 0;
- ret = strict_strtol(buf, 10, &val);
+ ret = kstrtol(buf, 10, &val);
if (ret < 0) {
return ret;
}
l2_id = 0;
l2_cache = mali_l2_cache_core_get_glob_l2_core(l2_id);
while (NULL != l2_cache) {
- if (0 == src_id) {
- mali_l2_cache_core_set_counter_src0(l2_cache, (u32)val);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_cache, (u32)val);
- }
+ mali_l2_cache_core_set_counter_src(l2_cache, src_id, (u32)val);
/* try next L2 */
l2_id++;
.write = l2_all_counter_src1_write,
};
+static ssize_t l2_l2x_counter_valx_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos, u32 src_id)
+{
+ char buf[64];
+ int r;
+ u32 src0 = 0;
+ u32 val0 = 0;
+ u32 src1 = 0;
+ u32 val1 = 0;
+ u32 val = -1;
+ struct mali_l2_cache_core *l2_core = (struct mali_l2_cache_core *)filp->private_data;
+
+ mali_l2_cache_core_get_counter_values(l2_core, &src0, &val0, &src1, &val1);
+
+ if (0 == src_id) {
+ if (MALI_HW_CORE_NO_COUNTER != val0) {
+ val = val0;
+ }
+ } else {
+ if (MALI_HW_CORE_NO_COUNTER != val1) {
+ val = val1;
+ }
+ }
+
+ r = snprintf(buf, 64, "%u\n", val);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+}
+
+static ssize_t l2_l2x_counter_val0_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 0);
+}
+
+static ssize_t l2_l2x_counter_val1_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+{
+ return l2_l2x_counter_valx_read(filp, ubuf, cnt, ppos, 1);
+}
+
+static const struct file_operations l2_l2x_counter_val0_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val0_read,
+};
+
+static const struct file_operations l2_l2x_counter_val1_fops = {
+ .owner = THIS_MODULE,
+ .open = open_copy_private_data,
+ .read = l2_l2x_counter_val1_read,
+};
+
static ssize_t power_always_on_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
unsigned long val;
}
buf[cnt] = '\0';
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (0 != ret) {
return ret;
}
/* Update setting (not exactly thread safe) */
if (1 == val && MALI_FALSE == power_always_on_enabled) {
power_always_on_enabled = MALI_TRUE;
- _mali_osk_pm_dev_ref_add();
+ _mali_osk_pm_dev_ref_get_sync();
} else if (0 == val && MALI_TRUE == power_always_on_enabled) {
power_always_on_enabled = MALI_FALSE;
- _mali_osk_pm_dev_ref_dec();
+ _mali_osk_pm_dev_ref_put();
}
*ppos += cnt;
static ssize_t power_power_events_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos)
{
-
- if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND]))) {
- mali_pm_os_suspend();
-
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME]))) {
+ if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_SUSPEND], strlen(mali_power_events[_MALI_DEVICE_SUSPEND])-1)) {
+ mali_pm_os_suspend(MALI_TRUE);
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_RESUME], strlen(mali_power_events[_MALI_DEVICE_RESUME])-1)) {
mali_pm_os_resume();
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE]))) {
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_PAUSE], strlen(mali_power_events[_MALI_DEVICE_DVFS_PAUSE])-1)) {
mali_dev_pause();
- } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME]))) {
+ } else if (!strncmp(ubuf, mali_power_events[_MALI_DEVICE_DVFS_RESUME], strlen(mali_power_events[_MALI_DEVICE_DVFS_RESUME])-1)) {
mali_dev_resume();
}
*ppos += cnt;
buf[cnt] = 0;
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (ret < 0) {
return ret;
}
#endif
-static ssize_t memory_used_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
+static int memory_debugfs_show(struct seq_file *s, void *private_data)
{
- char buf[64];
- size_t r;
- u32 mem = _mali_ukk_report_memory_usage();
+ seq_printf(s, " %-25s %-10s %-10s %-15s %-15s %-10s %-10s\n"\
+ "==============================================================================================================\n",
+ "Name (:bytes)", "pid", "mali_mem", "max_mali_mem",
+ "external_mem", "ump_mem", "dma_mem");
+ mali_session_memory_tracking(s);
+ return 0;
+}
- r = snprintf(buf, 64, "%u\n", mem);
- return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+static int memory_debugfs_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, memory_debugfs_show, inode->i_private);
}
static const struct file_operations memory_usage_fops = {
.owner = THIS_MODULE,
- .read = memory_used_read,
+ .open = memory_debugfs_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
};
static ssize_t utilization_gp_pp_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
}
buf[cnt] = '\0';
- ret = strict_strtoul(buf, 10, &val);
+ ret = kstrtoul(buf, 10, &val);
if (0 != ret) {
return ret;
}
return 0;
}
-static ssize_t pmu_power_down_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
- int ret;
- char buffer[32];
- unsigned long val;
- struct mali_pmu_core *pmu;
- _mali_osk_errcode_t err;
-
- if (count >= sizeof(buffer)) {
- return -ENOMEM;
- }
-
- if (copy_from_user(&buffer[0], buf, count)) {
- return -EFAULT;
- }
- buffer[count] = '\0';
-
- ret = strict_strtoul(&buffer[0], 10, &val);
- if (0 != ret) {
- return -EINVAL;
- }
-
- pmu = mali_pmu_get_global_pmu_core();
- MALI_DEBUG_ASSERT_POINTER(pmu);
-
- err = mali_pmu_power_down(pmu, val);
- if (_MALI_OSK_ERR_OK != err) {
- return -EINVAL;
- }
-
- *offp += count;
- return count;
-}
-
-static ssize_t pmu_power_up_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
-{
- int ret;
- char buffer[32];
- unsigned long val;
- struct mali_pmu_core *pmu;
- _mali_osk_errcode_t err;
-
- if (count >= sizeof(buffer)) {
- return -ENOMEM;
- }
-
- if (copy_from_user(&buffer[0], buf, count)) {
- return -EFAULT;
- }
- buffer[count] = '\0';
-
- ret = strict_strtoul(&buffer[0], 10, &val);
- if (0 != ret) {
- return -EINVAL;
- }
-
- pmu = mali_pmu_get_global_pmu_core();
- MALI_DEBUG_ASSERT_POINTER(pmu);
-
- err = mali_pmu_power_up(pmu, val);
- if (_MALI_OSK_ERR_OK != err) {
- return -EINVAL;
- }
-
- *offp += count;
- return count;
-}
-
-static const struct file_operations pmu_power_down_fops = {
- .owner = THIS_MODULE,
- .write = pmu_power_down_write,
-};
-
-static const struct file_operations pmu_power_up_fops = {
- .owner = THIS_MODULE,
- .write = pmu_power_up_write,
-};
-
static ssize_t pp_num_cores_enabled_write(struct file *filp, const char __user *buf, size_t count, loff_t *offp)
{
int ret;
}
buffer[count] = '\0';
- ret = strict_strtoul(&buffer[0], 10, &val);
+ ret = kstrtoul(&buffer[0], 10, &val);
if (0 != ret) {
return -EINVAL;
}
- ret = mali_pp_scheduler_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
+ ret = mali_executor_set_perf_level(val, MALI_TRUE); /* override even if core scaling is disabled */
if (ret) {
return ret;
}
int r;
char buffer[64];
- r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_enabled());
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_enabled());
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
int r;
char buffer[64];
- r = snprintf(buffer, 64, "%u\n", mali_pp_scheduler_get_num_cores_total());
+ r = snprintf(buffer, 64, "%u\n", mali_executor_get_num_cores_total());
return simple_read_from_buffer(buf, count, offp, buffer, r);
}
}
buffer[count] = '\0';
- ret = strict_strtoul(&buffer[0], 10, &val);
+ ret = kstrtoul(&buffer[0], 10, &val);
if (0 != ret) {
return -EINVAL;
}
switch (val) {
case 1:
- mali_pp_scheduler_core_scaling_enable();
+ mali_executor_core_scaling_enable();
break;
case 0:
- mali_pp_scheduler_core_scaling_disable();
+ mali_executor_core_scaling_disable();
break;
default:
return -EINVAL;
static ssize_t pp_core_scaling_enabled_read(struct file *filp, char __user *buf, size_t count, loff_t *offp)
{
- return simple_read_from_buffer(buf, count, offp, mali_pp_scheduler_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
+ return simple_read_from_buffer(buf, count, offp, mali_executor_core_scaling_is_enabled() ? "1\n" : "0\n", 2);
}
static const struct file_operations pp_core_scaling_enabled_fops = {
.owner = THIS_MODULE,
seq_printf(s, "timeline system info: \n=================\n\n");
- mali_session_lock();
- MALI_SESSION_FOREACH(session, tmp, link){
- seq_printf(s, "session %d <%p> start:\n", session_seq,session);
- mali_timeline_debug_print_system(session->timeline_system,s);
+ mali_session_lock();
+ MALI_SESSION_FOREACH(session, tmp, link) {
+ seq_printf(s, "session %d <%p> start:\n", session_seq, session);
+ mali_timeline_debug_print_system(session->timeline_system, s);
seq_printf(s, "session %d end\n\n\n", session_seq++);
}
mali_session_unlock();
return 0;
}
-static int timeline_debugfs_open( struct inode *inode, struct file *file)
+static int timeline_debugfs_open(struct inode *inode, struct file *file)
{
return single_open(file, timeline_debugfs_show, inode->i_private);
}
} else {
if (NULL != mali_debugfs_dir) {
/* Debugfs directory created successfully; create files now */
- struct dentry *mali_pmu_dir;
struct dentry *mali_power_dir;
struct dentry *mali_gp_dir;
struct dentry *mali_pp_dir;
debugfs_create_file("version", 0400, mali_debugfs_dir, NULL, &version_fops);
- mali_pmu_dir = debugfs_create_dir("pmu", mali_debugfs_dir);
- if (NULL != mali_pmu_dir) {
- debugfs_create_file("power_down", 0200, mali_pmu_dir, NULL, &pmu_power_down_fops);
- debugfs_create_file("power_up", 0200, mali_pmu_dir, NULL, &pmu_power_up_fops);
- }
-
mali_power_dir = debugfs_create_dir("power", mali_debugfs_dir);
if (mali_power_dir != NULL) {
debugfs_create_file("always_on", 0600, mali_power_dir, NULL, &power_always_on_fops);
if (NULL != mali_l2_l2x_dir) {
debugfs_create_file("counter_src0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src0_fops);
debugfs_create_file("counter_src1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_src1_fops);
+ debugfs_create_file("counter_val0", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val0_fops);
+ debugfs_create_file("counter_val1", 0600, mali_l2_l2x_dir, l2_cache, &l2_l2x_counter_val1_fops);
debugfs_create_file("base_addr", 0400, mali_l2_l2x_dir, &l2_cache->hw_core, &hw_core_base_addr_fops);
}
}
}
- debugfs_create_file("memory_usage", 0400, mali_debugfs_dir, NULL, &memory_usage_fops);
+ debugfs_create_file("gpu_memory", 0444, mali_debugfs_dir, NULL, &memory_usage_fops);
debugfs_create_file("utilization_gp_pp", 0400, mali_debugfs_dir, NULL, &utilization_gp_pp_fops);
debugfs_create_file("utilization_gp", 0400, mali_debugfs_dir, NULL, &utilization_gp_fops);
debugfs_create_file("counter_src1", 0600, mali_profiling_pp_dir, (void *)PRIVATE_DATA_COUNTER_MAKE_PP(1), &profiling_counter_src_fops);
}
- max_sub_jobs = mali_pp_scheduler_get_num_cores_total();
+ max_sub_jobs = mali_executor_get_num_cores_total();
for (i = 0; i < max_sub_jobs; i++) {
char buf[16];
struct dentry *mali_profiling_pp_x_dir;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_KERNEL_SYSFS_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#if !defined (MALI_LINUX_TRACE_H) || defined (TRACE_HEADER_MULTI_READ)
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/list.h>
#include "mali_osk_mali.h"
#include "mali_kernel_linux.h"
#include "mali_scheduler.h"
+#include "mali_executor.h"
#include "mali_kernel_descriptor_mapping.h"
#include "mali_memory.h"
#include "mali_memory_os_alloc.h"
#include "mali_memory_block_alloc.h"
+extern unsigned int mali_dedicated_mem_size;
+extern unsigned int mali_shared_mem_size;
+
/* session->memory_lock must be held when calling this function */
static void mali_mem_release(mali_mem_allocation *descriptor)
{
case MALI_MEM_BLOCK:
mali_mem_block_release(descriptor);
break;
+ default:
+ MALI_DEBUG_PRINT(1, ("mem type %d is not in the mali_mem_type enum.\n", descriptor->type));
+ break;
}
}
/* Put on descriptor map */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &descriptor->id)) {
_mali_osk_mutex_wait(session->memory_lock);
- mali_mem_os_release(descriptor);
+ if (MALI_MEM_OS == descriptor->type) {
+ mali_mem_os_release(descriptor);
+ } else if (MALI_MEM_BLOCK == descriptor->type) {
+ mali_mem_block_release(descriptor);
+ }
_mali_osk_mutex_signal(session->memory_lock);
return -EFAULT;
}
/* Umap and flush L2 */
mali_mmu_pagedir_unmap(session->page_directory, descriptor->mali_mapping.addr, descriptor->size);
- mali_scheduler_zap_all_active(session);
+ mali_executor_zap_all_active(session);
}
u32 _mali_ukk_report_memory_usage(void)
return sum;
}
+u32 _mali_ukk_report_total_memory_size(void)
+{
+ return mali_dedicated_mem_size + mali_shared_mem_size;
+}
+
+
/**
* Per-session memory descriptor mapping table sizes
*/
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MEMORY_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_memory.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_BLOCK_ALLOCATOR_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
/* sg must be page aligned. */
MALI_DEBUG_ASSERT(0 == size % MALI_MMU_PAGE_SIZE);
+ MALI_DEBUG_ASSERT(0 == (phys & ~(uintptr_t)0xFFFFFFFF));
mali_mmu_pagedir_update(pagedir, virt, phys, size, MALI_MMU_FLAGS_DEFAULT);
_mali_osk_errcode_t err;
int i;
int ret = 0;
+ u32 num_memory_cookies;
+ struct mali_session_data *session;
+
+ MALI_DEBUG_ASSERT_POINTER(job);
+
+ num_memory_cookies = mali_pp_job_num_memory_cookies(job);
- _mali_osk_mutex_wait(job->session->memory_lock);
+ session = mali_pp_job_get_session(job);
+
+ MALI_DEBUG_ASSERT_POINTER(session);
- for (i = 0; i < job->num_memory_cookies; i++) {
- int cookie = job->memory_cookies[i];
+ mali_session_memory_lock(session);
+
+ for (i = 0; i < num_memory_cookies; i++) {
+ u32 cookie = mali_pp_job_get_memory_cookie(job, i);
if (0 == cookie) {
/* 0 is not a valid cookie */
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
MALI_DEBUG_ASSERT(0 < cookie);
- err = mali_descriptor_mapping_get(job->session->descriptor_mapping,
- cookie, (void **)&descriptor);
+ err = mali_descriptor_mapping_get(
+ mali_pp_job_get_session(job)->descriptor_mapping,
+ cookie, (void **)&descriptor);
if (_MALI_OSK_ERR_OK != err) {
MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to get descriptor for cookie %d\n", cookie));
ret = -EFAULT;
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
if (MALI_MEM_DMA_BUF != descriptor->type) {
/* Not a DMA-buf */
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
mem = descriptor->dma_buf.attachment;
MALI_DEBUG_ASSERT_POINTER(mem);
- MALI_DEBUG_ASSERT(mem->session == job->session);
+ MALI_DEBUG_ASSERT(mem->session == mali_pp_job_get_session(job));
err = mali_dma_buf_map(mem, mem->session, descriptor->mali_mapping.addr, descriptor->flags);
if (0 != err) {
MALI_DEBUG_PRINT_ERROR(("Mali DMA-buf: Failed to map dma-buf for cookie %d at mali address %x\b",
cookie, descriptor->mali_mapping.addr));
ret = -EFAULT;
- MALI_DEBUG_ASSERT(NULL == job->dma_bufs[i]);
+ MALI_DEBUG_ASSERT(NULL ==
+ mali_pp_job_get_dma_buf(job, i));
continue;
}
/* Add mem to list of DMA-bufs mapped for this job */
- job->dma_bufs[i] = mem;
+ mali_pp_job_set_dma_buf(job, i, mem);
}
- _mali_osk_mutex_signal(job->session->memory_lock);
+ mali_session_memory_unlock(session);
return ret;
}
void mali_dma_buf_unmap_job(struct mali_pp_job *job)
{
- int i;
- for (i = 0; i < job->num_dma_bufs; i++) {
- if (NULL == job->dma_bufs[i]) continue;
+ u32 i;
+ u32 num_dma_bufs = mali_pp_job_num_dma_bufs(job);
- mali_dma_buf_unmap(job->dma_bufs[i]);
- job->dma_bufs[i] = NULL;
+ for (i = 0; i < num_dma_bufs; i++) {
+ struct mali_dma_buf_attachment *mem;
+
+ mem = mali_pp_job_get_dma_buf(job, i);
+ if (NULL != mem) {
+ mali_dma_buf_unmap(mem);
+ mali_pp_job_set_dma_buf(job, i, NULL);
+ }
}
}
#endif /* !CONFIG_MALI_DMA_BUF_MAP_ON_ATTACH */
descriptor->flags = MALI_MEM_FLAG_MALI_GUARD_PAGE;
}
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
/* Map dma-buf into this session's page tables */
if (_MALI_OSK_ERR_OK != mali_mem_mali_map_prepare(descriptor)) {
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf on Mali\n"));
mali_mem_descriptor_destroy(descriptor);
mali_dma_buf_release(mem);
if (0 != mali_dma_buf_map(mem, session, descriptor->mali_mapping.addr, descriptor->flags)) {
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to map dma-buf %d into Mali address space\n", fd));
mali_mem_descriptor_destroy(descriptor);
#endif
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
/* Get descriptor mapping for memory. */
if (_MALI_OSK_ERR_OK != mali_descriptor_mapping_allocate_mapping(session->descriptor_mapping, descriptor, &md)) {
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to create descriptor mapping for dma-buf %d\n", fd));
mali_mem_descriptor_destroy(descriptor);
/* Return stuff to user space */
if (0 != put_user(md, &user_arg->cookie)) {
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
mali_mem_mali_map_free(descriptor);
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
MALI_DEBUG_PRINT_ERROR(("Failed to return descriptor to user space for dma-buf %d\n", fd));
mali_descriptor_mapping_free(session->descriptor_mapping, md);
MALI_DEBUG_PRINT(3, ("Mali DMA-buf: release descriptor cookie %ld\n", args.cookie));
- _mali_osk_mutex_wait(session->memory_lock);
+ mali_session_memory_lock(session);
descriptor = mali_descriptor_mapping_free(session->descriptor_mapping, (u32)args.cookie);
ret = -EINVAL;
}
- _mali_osk_mutex_signal(session->memory_lock);
+ mali_session_memory_unlock(session);
/* Return the error that _mali_ukk_map_external_ump_mem produced */
return ret;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MEMORY_DMA_BUF_H__
extern "C" {
#endif
+#include "mali_uk_types.h"
#include "mali_osk.h"
#include "mali_memory.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
/* size must be a multiple of the system page size */
if (args->size % _MALI_OSK_MALI_PAGE_SIZE) MALI_ERROR(_MALI_OSK_ERR_INVALID_ARGS);
- MALI_DEBUG_PRINT(2,
+ MALI_DEBUG_PRINT(3,
("Requested to map physical memory 0x%x-0x%x into virtual memory 0x%x\n",
args->phys_addr, (args->phys_addr + args->size - 1),
args->mali_address));
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/list.h>
#if defined(CONFIG_ARM) && !defined(CONFIG_ARM_LPAE)
flags |= GFP_HIGHUSER;
#else
+ /* After 3.15.0 kernel use ZONE_DMA replace ZONE_DMA32 */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
flags |= GFP_DMA32;
+#else
+ flags |= GFP_DMA;
+#endif
#endif
new_page = alloc_page(flags);
err = mali_mem_os_mali_map(descriptor, session); /* Map on Mali */
if (0 != err) goto mali_map_failed;
- _mali_osk_mutex_signal(session->memory_lock);
-
err = mali_mem_os_cpu_map(descriptor, vma); /* Map on CPU */
if (0 != err) goto cpu_map_failed;
+ _mali_osk_mutex_signal(session->memory_lock);
return descriptor;
cpu_map_failed:
static unsigned long mali_mem_os_shrink_count(struct shrinker *shrinker, struct shrink_control *sc)
{
- return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+ return mali_mem_os_allocator.pool_count;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0)
mali_mem_os_free_page(page);
}
- /* Release some pages from page table page pool */
- mali_mem_os_trim_page_table_page_pool();
-
if (MALI_OS_MEMORY_KERNEL_BUFFER_SIZE_IN_PAGES > mali_mem_os_allocator.pool_count) {
/* Pools are empty, stop timer */
MALI_DEBUG_PRINT(5, ("Stopping timer, only %u pages on pool\n", mali_mem_os_allocator.pool_count));
cancel_delayed_work(&mali_mem_os_allocator.timed_shrinker);
}
- return mali_mem_os_allocator.pool_count + mali_mem_page_table_page_pool.count;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+ return mali_mem_os_shrink_count(shrinker, sc);
+#else
+ return nr;
+#endif
}
static void mali_mem_os_trim_pool(struct work_struct *data)
unregister_shrinker(&mali_mem_os_allocator.shrinker);
cancel_delayed_work_sync(&mali_mem_os_allocator.timed_shrinker);
- destroy_workqueue(mali_mem_os_allocator.wq);
+
+ if (NULL != mali_mem_os_allocator.wq) {
+ destroy_workqueue(mali_mem_os_allocator.wq);
+ mali_mem_os_allocator.wq = NULL;
+ }
spin_lock(&mali_mem_os_allocator.pool_lock);
list_for_each_entry_safe(page, tmp, &mali_mem_os_allocator.pool_pages, lru) {
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MEMORY_OS_ALLOC_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_MEMORY_TYPES_H__
MALI_MEM_DMA_BUF,
MALI_MEM_UMP,
MALI_MEM_BLOCK,
+ MALI_MEM_TYPE_MAX,
} mali_mem_type;
typedef struct mali_mem_os_mem {
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_ukk.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
return atomic_inc_return((atomic_t *)&atom->u.val);
}
-_mali_osk_errcode_t _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
+void _mali_osk_atomic_init(_mali_osk_atomic_t *atom, u32 val)
{
- MALI_CHECK_NON_NULL(atom, _MALI_OSK_ERR_INVALID_ARGS);
+ MALI_DEBUG_ASSERT_POINTER(atom);
atomic_set((atomic_t *)&atom->u.val, val);
- return _MALI_OSK_ERR_OK;
}
u32 _mali_osk_atomic_read(_mali_osk_atomic_t *atom)
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
case _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP:
return "_MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP";
break;
- case _MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL:
- return "_MALI_OSK_LOCK_ORDER_GROUP_VIRTUAL";
+ case _MALI_OSK_LOCK_ORDER_PM_EXECUTION:
+ return "_MALI_OSK_LOCK_ORDER_PM_EXECUTION";
break;
- case _MALI_OSK_LOCK_ORDER_GROUP:
- return "_MALI_OSK_LOCK_ORDER_GROUP";
+ case _MALI_OSK_LOCK_ORDER_EXECUTOR:
+ return "_MALI_OSK_LOCK_ORDER_EXECUTOR";
+ break;
+ case _MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM:
+ return "_MALI_OSK_LOCK_ORDER_TIMELINE_SYSTEM";
break;
case _MALI_OSK_LOCK_ORDER_SCHEDULER:
return "_MALI_OSK_LOCK_ORDER_SCHEDULER";
break;
- case _MALI_OSK_LOCK_ORDER_PM_CORE_STATE:
- return "_MALI_OSK_LOCK_ORDER_PM_CORE_STATE";
+ case _MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED:
+ return "_MALI_OSK_LOCK_ORDER_SCHEDULER_DEFERRED";
break;
- case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
- return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
+ case _MALI_OSK_LOCK_ORDER_DMA_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_DMA_COMMAND";
break;
case _MALI_OSK_LOCK_ORDER_PROFILING:
return "_MALI_OSK_LOCK_ORDER_PROFILING";
break;
- case _MALI_OSK_LOCK_ORDER_L2_COUNTER:
- return "_MALI_OSK_LOCK_ORDER_L2_COUNTER";
+ case _MALI_OSK_LOCK_ORDER_L2:
+ return "_MALI_OSK_LOCK_ORDER_L2";
+ break;
+ case _MALI_OSK_LOCK_ORDER_L2_COMMAND:
+ return "_MALI_OSK_LOCK_ORDER_L2_COMMAND";
break;
case _MALI_OSK_LOCK_ORDER_UTILIZATION:
return "_MALI_OSK_LOCK_ORDER_UTILIZATION";
break;
- case _MALI_OSK_LOCK_ORDER_PM_EXECUTE:
- return "_MALI_OSK_LOCK_ORDER_PM_EXECUTE";
- break;
case _MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS:
return "_MALI_OSK_LOCK_ORDER_SESSION_PENDING_JOBS";
break;
+ case _MALI_OSK_LOCK_ORDER_PM_STATE:
+ return "_MALI_OSK_LOCK_ORDER_PM_STATE";
+ break;
default:
- return "";
+ return "<UNKNOWN_LOCK_ORDER>";
}
}
#endif /* LOCK_ORDER_CHECKING */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
wmb();
}
-mali_io_address _mali_osk_mem_mapioregion(u32 phys, u32 size, const char *description)
+mali_io_address _mali_osk_mem_mapioregion(uintptr_t phys, u32 size, const char *description)
{
return (mali_io_address)ioremap_nocache(phys, size);
}
-void _mali_osk_mem_unmapioregion(u32 phys, u32 size, mali_io_address virt)
+void _mali_osk_mem_unmapioregion(uintptr_t phys, u32 size, mali_io_address virt)
{
iounmap((void *)virt);
}
-_mali_osk_errcode_t inline _mali_osk_mem_reqregion(u32 phys, u32 size, const char *description)
+_mali_osk_errcode_t inline _mali_osk_mem_reqregion(uintptr_t phys, u32 size, const char *description)
{
#if MALI_LICENSE_IS_GPL
return _MALI_OSK_ERR_OK; /* GPL driver gets the mem region for the resources registered automatically */
#endif
}
-void inline _mali_osk_mem_unreqregion(u32 phys, u32 size)
+void inline _mali_osk_mem_unreqregion(uintptr_t phys, u32 size)
{
#if !MALI_LICENSE_IS_GPL
release_mem_region(phys, size);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+
/**
* @file mali_osk_mali.c
* Implementation of the OS abstraction layer which is specific for the Mali kernel device driver
*/
+#include "../platform/rk30/custom_log.h"
+
#include <linux/kernel.h>
#include <asm/uaccess.h>
#include <linux/platform_device.h>
#include <linux/mali/mali_utgard.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
#include "mali_osk_mali.h"
#include "mali_kernel_common.h" /* MALI_xxx macros */
#include "mali_osk.h" /* kernel side OS functions */
#include "mali_kernel_linux.h"
-static u32 _mali_osk_resource_irq(_mali_osk_resource_t *res)
+
+
+#ifdef CONFIG_MALI_DT
+
+#define MALI_OSK_INVALID_RESOURCE_ADDRESS 0xFFFFFFFF
+
+/**
+ * Define the max number of resource we could have.
+ */
+#define MALI_OSK_MAX_RESOURCE_NUMBER 27
+
+/**
+ * Define the max number of resource with interrupts, and they are
+ * the first 20 elements in array mali_osk_resource_bank.
+ */
+#define MALI_OSK_RESOURCE_WITH_IRQ_NUMBER 20
+
+/**
+ * pp core start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_PP_LOCATION_START 2
+#define MALI_OSK_RESOURCE_PP_LOCATION_END 17
+
+/**
+ * L2 cache start and end location in mali_osk_resource_bank array.
+ */
+#define MALI_OSK_RESOURCE_L2_LOCATION_START 20
+#define MALI_OSK_RESOURCE_l2_LOCATION_END 22
+
+static _mali_osk_resource_t mali_osk_resource_bank[MALI_OSK_MAX_RESOURCE_NUMBER] = {
+/*-------------------------------------------------------*/
+/* rk_ext : to use dts_for_mali_ko_befor_r5p0-01rel0. */
+// {.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "IRQGP",},
+{.description = "Mali_GP", .base = MALI_OFFSET_GP, .irq_name = "Mali_GP_IRQ",},
+// {.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "IRQGPMMU",},
+{.description = "Mali_GP_MMU", .base = MALI_OFFSET_GP_MMU, .irq_name = "Mali_GP_MMU_IRQ",},
+// {.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "IRQPP0",},
+{.description = "Mali_PP0", .base = MALI_OFFSET_PP0, .irq_name = "Mali_PP0_IRQ",},
+// {.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "IRQPPMMU0",},
+{.description = "Mali_PP0_MMU", .base = MALI_OFFSET_PP0_MMU, .irq_name = "Mali_PP0_MMU_IRQ",},
+// {.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "IRQPP1",},
+{.description = "Mali_PP1", .base = MALI_OFFSET_PP1, .irq_name = "Mali_PP1_IRQ",},
+// {.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "IRQPPMMU1",},
+{.description = "Mali_PP1_MMU", .base = MALI_OFFSET_PP1_MMU, .irq_name = "Mali_PP1_MMU_IRQ",},
+/*-------------------------------------------------------*/
+{.description = "Mali_PP2", .base = MALI_OFFSET_PP2, .irq_name = "IRQPP2",},
+{.description = "Mali_PP2_MMU", .base = MALI_OFFSET_PP2_MMU, .irq_name = "IRQPPMMU2",},
+{.description = "Mali_PP3", .base = MALI_OFFSET_PP3, .irq_name = "IRQPP3",},
+{.description = "Mali_PP3_MMU", .base = MALI_OFFSET_PP3_MMU, .irq_name = "IRQPPMMU3",},
+{.description = "Mali_PP4", .base = MALI_OFFSET_PP4, .irq_name = "IRQPP4",},
+{.description = "Mali_PP4_MMU", .base = MALI_OFFSET_PP4_MMU, .irq_name = "IRQPPMMU4",},
+{.description = "Mali_PP5", .base = MALI_OFFSET_PP5, .irq_name = "IRQPP5",},
+{.description = "Mali_PP5_MMU", .base = MALI_OFFSET_PP5_MMU, .irq_name = "IRQPPMMU5",},
+{.description = "Mali_PP6", .base = MALI_OFFSET_PP6, .irq_name = "IRQPP6",},
+{.description = "Mali_PP6_MMU", .base = MALI_OFFSET_PP6_MMU, .irq_name = "IRQPPMMU6",},
+{.description = "Mali_PP7", .base = MALI_OFFSET_PP7, .irq_name = "IRQPP7",},
+{.description = "Mali_PP7_MMU", .base = MALI_OFFSET_PP7_MMU, .irq_name = "IRQPPMMU",},
+{.description = "Mali_PP_Broadcast", .base = MALI_OFFSET_PP_BCAST, .irq_name = "IRQPP",},
+{.description = "Mali_PMU", .base = MALI_OFFSET_PMU, .irq_name = "IRQPMU",},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE0,},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE1,},
+{.description = "Mali_L2", .base = MALI_OFFSET_L2_RESOURCE2,},
+{.description = "Mali_PP_MMU_Broadcast", .base = MALI_OFFSET_PP_BCAST_MMU,},
+{.description = "Mali_Broadcast", .base = MALI_OFFSET_BCAST,},
+{.description = "Mali_DLBU", .base = MALI_OFFSET_DLBU,},
+{.description = "Mali_DMA", .base = MALI_OFFSET_DMA,},
+};
+
+_mali_osk_errcode_t _mali_osk_resource_initialize(void)
+{
+ mali_bool mali_is_450 = MALI_FALSE;
+ int i, pp_core_num = 0, l2_core_num = 0;
+ struct resource *res;
+
+ for (i = 0; i < MALI_OSK_RESOURCE_WITH_IRQ_NUMBER; i++) {
+ res = platform_get_resource_byname(mali_platform_device, IORESOURCE_IRQ, mali_osk_resource_bank[i].irq_name);
+ if (res) {
+ mali_osk_resource_bank[i].irq = res->start;
+ if (0 == strncmp("Mali_PP_Broadcast", mali_osk_resource_bank[i].description,
+ strlen(mali_osk_resource_bank[i].description))) {
+ mali_is_450 = MALI_TRUE;
+ }
+ } else {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ for (i = MALI_OSK_RESOURCE_PP_LOCATION_START; i <= MALI_OSK_RESOURCE_PP_LOCATION_END; i++) {
+ if (MALI_OSK_INVALID_RESOURCE_ADDRESS != mali_osk_resource_bank[i].base) {
+ pp_core_num++;
+ }
+ }
+
+ /* We have to divide by 2, because we caculate twice for only one pp(pp_core and pp_mmu_core). */
+ if (0 != pp_core_num % 2) {
+ MALI_DEBUG_PRINT(2, ("The value of pp core number isn't normal."));
+ return _MALI_OSK_ERR_FAULT;
+ }
+
+ pp_core_num /= 2;
+
+ /**
+ * we can caculate the number of l2 cache core according the number of pp core number
+ * and device type(mali400/mali450).
+ */
+ if (mali_is_450 && 4 < pp_core_num) {
+ l2_core_num = 3;
+ } else if (mali_is_450 && 4 >= pp_core_num) {
+ l2_core_num = 2;
+ } else {
+ l2_core_num = 1;
+ }
+
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END; i > MALI_OSK_RESOURCE_L2_LOCATION_START + l2_core_num - 1; i--) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+
+ /* If device is not mali-450 type, we have to remove related resource from resource bank. */
+ if (!mali_is_450) {
+ for (i = MALI_OSK_RESOURCE_l2_LOCATION_END + 1; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ mali_osk_resource_bank[i].base = MALI_OSK_INVALID_RESOURCE_ADDRESS;
+ }
+ }
+
+ return _MALI_OSK_ERR_OK;
+}
+
+_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
{
int i;
- char name[32];
- struct resource *resource;
- snprintf(name, sizeof(name), "%s_IRQ", res->description);
- for (i = 0; i < mali_platform_device->num_resources; i++) {
- resource = &mali_platform_device->resource[i];
- if (IORESOURCE_IRQ == resource_type(resource) &&
- !strncmp(name, resource->name, sizeof(name)))
- return resource->start;
+ if (NULL == mali_platform_device) {
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
+ }
+
+ /* Traverse all of resources in resources bank to find the matching one. */
+ for (i = 0; i < MALI_OSK_MAX_RESOURCE_NUMBER; i++) {
+ if (mali_osk_resource_bank[i].base == addr) {
+ if (NULL != res) {
+ res->base = addr + _mali_osk_resource_base_address();
+ res->description = mali_osk_resource_bank[i].description;
+ res->irq = mali_osk_resource_bank[i].irq;
+ }
+ return _MALI_OSK_ERR_OK;
+ }
}
- return -1;
+ return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
+uintptr_t _mali_osk_resource_base_address(void)
+{
+ struct resource *reg_res = NULL;
+ uintptr_t ret = 0;
+
+ // reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 0);
+ /*
+ * rk_ext : to use dts_for_mali_ko_befor_r5p0-01rel0.
+ * 关于下面的 "1" :
+ * dts_for_mali_ko_befor_r5p0-01rel0 中,
+ * base_addr 定义在 reg 的 第二个 (index 为 "1") 的 item.
+ */
+ reg_res = platform_get_resource(mali_platform_device, IORESOURCE_MEM, 1);
+
+ if (NULL != reg_res) {
+ ret = reg_res->start;
+ }
+ // D_HEX( (unsigned int)ret);
+
+ return ret;
+}
+
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ struct property *prop;
+ const __be32 *p;
+ int length = 0, i = 0;
+ u32 u;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (!of_get_property(node, "pmu_domain_config", &length)) {
+ return;
+ }
+
+ if (array_size != length/sizeof(u32)) {
+ MALI_PRINT_ERROR(("Wrong pmu domain config in device tree."));
+ return;
+ }
+
+ of_property_for_each_u32(node, "pmu_domain_config", prop, p, u) {
+ domain_config_array[i] = (u16)u;
+ i++;
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ struct device_node *node = mali_platform_device->dev.of_node;
+ u32 switch_delay;
+
+ MALI_DEBUG_ASSERT(NULL != node);
+
+ if (0 == of_property_read_u32(node, "pmu_switch_delay", &switch_delay)) {
+ return switch_delay;
+ } else {
+ MALI_DEBUG_PRINT(2, ("Couldn't find pmu_switch_delay in device tree configuration.\n"));
+ }
+
+ return 0;
+}
+
+#else /* CONFIG_MALI_DT */ /* 若未 定义 CONFIG_MALI_DT. */
+
_mali_osk_errcode_t _mali_osk_resource_find(u32 addr, _mali_osk_resource_t *res)
{
int i;
+ uintptr_t phys_addr;
if (NULL == mali_platform_device) {
/* Not connected to a device */
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
+ phys_addr = addr + _mali_osk_resource_base_address();
for (i = 0; i < mali_platform_device->num_resources; i++) {
if (IORESOURCE_MEM == resource_type(&(mali_platform_device->resource[i])) &&
- mali_platform_device->resource[i].start == addr) {
+ mali_platform_device->resource[i].start == phys_addr) {
if (NULL != res) {
- res->base = addr;
+ res->base = phys_addr;
res->description = mali_platform_device->resource[i].name;
- res->irq = _mali_osk_resource_irq(res);
-#if 0
/* Any (optional) IRQ resource belonging to this resource will follow */
if ((i + 1) < mali_platform_device->num_resources &&
IORESOURCE_IRQ == resource_type(&(mali_platform_device->resource[i + 1]))) {
} else {
res->irq = -1;
}
-#endif
}
return _MALI_OSK_ERR_OK;
}
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
-u32 _mali_osk_resource_base_address(void)
+uintptr_t _mali_osk_resource_base_address(void)
{
- u32 lowest_addr = 0xFFFFFFFF;
- u32 ret = 0;
+ uintptr_t lowest_addr = (uintptr_t)(0 - 1);
+ uintptr_t ret = 0;
if (NULL != mali_platform_device) {
int i;
return ret;
}
+void _mali_osk_device_data_pmu_config_get(u16 *domain_config_array, int array_size)
+{
+ _mali_osk_device_data data = { 0, };
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_device_data_get(&data)) {
+ /* Copy the custom customer power domain config */
+ _mali_osk_memcpy(domain_config_array, data.pmu_domain_config, sizeof(data.pmu_domain_config));
+ }
+
+ return;
+}
+
+u32 _mali_osk_get_pmu_switch_delay(void)
+{
+ _mali_osk_errcode_t err;
+ _mali_osk_device_data data = { 0, };
+
+ err = _mali_osk_device_data_get(&data);
+
+ if (_MALI_OSK_ERR_OK == err) {
+ return data.pmu_switch_delay;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MALI_DT */
+
_mali_osk_errcode_t _mali_osk_device_data_get(_mali_osk_device_data *data)
{
MALI_DEBUG_ASSERT_POINTER(data);
return _MALI_OSK_ERR_ITEM_NOT_FOUND;
}
+u32 _mali_osk_l2_resource_count(void)
+{
+ u32 l2_core_num = 0;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE0, NULL))
+ l2_core_num++;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE1, NULL))
+ l2_core_num++;
+
+ if (_MALI_OSK_ERR_OK == _mali_osk_resource_find(MALI_OFFSET_L2_RESOURCE2, NULL))
+ l2_core_num++;
+
+ MALI_DEBUG_ASSERT(0 < l2_core_num);
+
+ return l2_core_num;
+}
+
mali_bool _mali_osk_shared_interrupts(void)
{
u32 irqs[128];
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
char buf[512];
va_start(args, fmt);
- vscnprintf(buf,512,fmt,args);
- seq_printf(print_ctx,buf);
+ vscnprintf(buf, 512, fmt, args);
+ seq_printf(print_ctx, buf);
va_end(args);
}
return (u32)current->tgid;
}
+char *_mali_osk_get_comm(void)
+{
+ return (char *)current->comm;
+}
+
+
u32 _mali_osk_get_tid(void)
{
/* pid is actually identifying the thread on Linux */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "mali_kernel_common.h"
#include "mali_kernel_linux.h"
-static _mali_osk_atomic_t mali_pm_ref_count;
-
-void _mali_osk_pm_dev_enable(void)
-{
- _mali_osk_atomic_init(&mali_pm_ref_count, 0);
-}
-
-void _mali_osk_pm_dev_disable(void)
-{
- _mali_osk_atomic_term(&mali_pm_ref_count);
-}
-
/* Can NOT run in atomic context */
-_mali_osk_errcode_t _mali_osk_pm_dev_ref_add(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_sync(void)
{
#ifdef CONFIG_PM_RUNTIME
int err;
MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get_sync() returned error code %d\n", err));
return _MALI_OSK_ERR_FAULT;
}
- _mali_osk_atomic_inc(&mali_pm_ref_count);
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
#endif
return _MALI_OSK_ERR_OK;
}
/* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec(void)
+_mali_osk_errcode_t _mali_osk_pm_dev_ref_get_async(void)
{
#ifdef CONFIG_PM_RUNTIME
+ int err;
MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
- _mali_osk_atomic_dec(&mali_pm_ref_count);
+ err = pm_runtime_get(&(mali_platform_device->dev));
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
pm_runtime_mark_last_busy(&(mali_platform_device->dev));
- pm_runtime_put_autosuspend(&(mali_platform_device->dev));
-#else
- pm_runtime_put(&(mali_platform_device->dev));
#endif
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: Power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
+ if (0 > err && -EINPROGRESS != err) {
+ MALI_PRINT_ERROR(("Mali OSK PM: pm_runtime_get() returned error code %d\n", err));
+ return _MALI_OSK_ERR_FAULT;
+ }
#endif
+ return _MALI_OSK_ERR_OK;
}
-/* Can run in atomic context */
-mali_bool _mali_osk_pm_dev_ref_add_no_power_on(void)
-{
-#ifdef CONFIG_PM_RUNTIME
- u32 ref;
- MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
- pm_runtime_get_noresume(&(mali_platform_device->dev));
- ref = _mali_osk_atomic_read(&mali_pm_ref_count);
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref taken (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
- return ref > 0 ? MALI_TRUE : MALI_FALSE;
-#else
- return MALI_TRUE;
-#endif
-}
/* Can run in atomic context */
-void _mali_osk_pm_dev_ref_dec_no_power_on(void)
+void _mali_osk_pm_dev_ref_put(void)
{
#ifdef CONFIG_PM_RUNTIME
MALI_DEBUG_ASSERT_POINTER(mali_platform_device);
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_mark_last_busy(&(mali_platform_device->dev));
pm_runtime_put_autosuspend(&(mali_platform_device->dev));
#else
pm_runtime_put(&(mali_platform_device->dev));
#endif
- MALI_DEBUG_PRINT(4, ("Mali OSK PM: No-power ref released (%u)\n", _mali_osk_atomic_read(&mali_pm_ref_count)));
#endif
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
#include "mali_linux_trace.h"
#include "mali_gp.h"
#include "mali_pp.h"
-#include "mali_pp_scheduler.h"
#include "mali_l2_cache.h"
#include "mali_user_settings_db.h"
+#include "mali_executor.h"
_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
{
if (NULL != l2_cache_core) {
u32 counter_src = (counter_id - COUNTER_L2_0_C0) & 1;
- if (0 == counter_src) {
- mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id);
- } else {
- mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id);
- }
+ mali_l2_cache_core_set_counter_src(l2_cache_core,
+ counter_src, event_id);
}
} else {
return 0; /* Failure, unknown event */
*/
u32 _mali_profiling_get_l2_counters(_mali_profiling_l2_counter_values *values)
{
- struct mali_l2_cache_core *l2_cache;
u32 l2_cores_num = mali_l2_cache_core_get_glob_num_l2_cores();
u32 i;
- u32 ret = 0;
MALI_DEBUG_ASSERT(l2_cores_num <= 3);
for (i = 0; i < l2_cores_num; i++) {
- l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
+ struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(i);
if (NULL == l2_cache) {
continue;
}
- if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache)) {
- /* It is now safe to access the L2 cache core in order to retrieve the counters */
- mali_l2_cache_core_get_counter_values(l2_cache,
- &values->cores[i].source0,
- &values->cores[i].value0,
- &values->cores[i].source1,
- &values->cores[i].value1);
- } else {
- /* The core was not available, set the right bit in the mask. */
- ret |= (1 << i);
- }
- mali_l2_cache_unlock_power_state(l2_cache);
+ mali_l2_cache_core_get_counter_values(l2_cache,
+ &values->cores[i].source0,
+ &values->cores[i].value0,
+ &values->cores[i].source1,
+ &values->cores[i].value1);
}
- return ret;
+ return 0;
}
/**
values->mali_version_major = mali_kernel_core_get_gpu_major_version();
values->mali_version_minor = mali_kernel_core_get_gpu_minor_version();
values->num_of_l2_cores = mali_l2_cache_core_get_glob_num_l2_cores();
- values->num_of_fp_cores = mali_pp_scheduler_get_num_cores_total();
+ values->num_of_fp_cores = mali_executor_get_num_cores_total();
values->num_of_vp_cores = 1;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include <asm/uaccess.h>
#include <linux/platform_device.h>
-#include <linux/dmapool.h>
#include <linux/gfp.h>
#include <linux/hardirq.h>
+
#include "mali_osk_types.h"
#include "mali_kernel_linux.h"
typedef u32 mali_dma_addr;
-
-MALI_STATIC_INLINE mali_dma_pool mali_dma_pool_create(u32 size, u32 alignment, u32 boundary)
-{
- return dma_pool_create("mali-dma", &mali_platform_device->dev,
- (size_t)size, (size_t)alignment, (size_t)boundary);
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_destroy(mali_dma_pool pool)
-{
- dma_pool_destroy(pool);
-}
-
-MALI_STATIC_INLINE mali_io_address mali_dma_pool_alloc(mali_dma_pool pool, mali_dma_addr *phys_addr)
-{
- void *ret;
- dma_addr_t phys;
-
- ret = dma_pool_alloc(pool, GFP_KERNEL, &phys);
-#if defined(CONFIG_ARCH_DMA_ADDR_T_64BIT)
- /* Verify that the "physical" address is 32-bit and
- * usable for Mali, when on a system with bus addresses
- * wider than 32-bit. */
- BUG_ON(0 != (phys >> 32));
-#endif
- *phys_addr = phys;
-
- return ret;
-}
-
-MALI_STATIC_INLINE void mali_dma_pool_free(mali_dma_pool pool, void *virt_addr, mali_dma_addr phys_addr)
-{
- dma_pool_free(pool, virt_addr, (dma_addr_t)phys_addr);
-}
-
-
#if MALI_ENABLE_CPU_CYCLES
/* Reads out the clock cycle performance counter of the current cpu.
It is useful for cost-free (2 cycle) measuring of the time spent
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include <linux/time.h>
#include <asm/delay.h>
-int _mali_osk_time_after(u32 ticka, u32 tickb)
+mali_bool _mali_osk_time_after_eq(unsigned long ticka, unsigned long tickb)
{
- return time_after((unsigned long)ticka, (unsigned long)tickb);
+ return time_after_eq(ticka, tickb) ?
+ MALI_TRUE : MALI_FALSE;
}
-u32 _mali_osk_time_mstoticks(u32 ms)
+unsigned long _mali_osk_time_mstoticks(u32 ms)
{
return msecs_to_jiffies(ms);
}
-u32 _mali_osk_time_tickstoms(u32 ticks)
+u32 _mali_osk_time_tickstoms(unsigned long ticks)
{
return jiffies_to_msecs(ticks);
}
-u32 _mali_osk_time_tickcount(void)
+unsigned long _mali_osk_time_tickcount(void)
{
return jiffies;
}
getnstimeofday(&tsval);
return (u64)timespec_to_ns(&tsval);
}
+
+u64 _mali_osk_boot_time_get_ns(void)
+{
+ struct timespec tsval;
+ get_monotonic_boottime(&tsval);
+ return (u64)timespec_to_ns(&tsval);
+}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
return t;
}
-void _mali_osk_timer_add(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+void _mali_osk_timer_add(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
{
MALI_DEBUG_ASSERT_POINTER(tim);
tim->timer.expires = jiffies + ticks_to_expire;
add_timer(&(tim->timer));
}
-void _mali_osk_timer_mod(_mali_osk_timer_t *tim, u32 ticks_to_expire)
+void _mali_osk_timer_mod(_mali_osk_timer_t *tim, unsigned long ticks_to_expire)
{
MALI_DEBUG_ASSERT_POINTER(tim);
mod_timer(&(tim->timer), jiffies + ticks_to_expire);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/**
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* @file mali_pmu_power_up_down.c
*/
-#include <linux/version.h>
-#include <linux/sched.h>
#include <linux/module.h>
-#include "mali_osk.h"
-#include "mali_kernel_common.h"
-#include "mali_pmu.h"
-#include "mali_pp_scheduler.h"
-#include "linux/mali/mali_utgard.h"
-
-/* Mali PMU power up/down APIs */
-
-int mali_pmu_powerup(void)
-{
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(5, ("Mali PMU: Power up\n"));
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- if (NULL == pmu) {
- return -ENXIO;
- }
-
- if (_MALI_OSK_ERR_OK != mali_pmu_power_up_all(pmu)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerup);
-
-int mali_pmu_powerdown(void)
-{
- struct mali_pmu_core *pmu = mali_pmu_get_global_pmu_core();
-
- MALI_DEBUG_PRINT(5, ("Mali PMU: Power down\n"));
-
- MALI_DEBUG_ASSERT_POINTER(pmu);
- if (NULL == pmu) {
- return -ENXIO;
- }
-
- if (_MALI_OSK_ERR_OK != mali_pmu_power_down_all(pmu)) {
- return -EFAULT;
- }
-
- return 0;
-}
-
-EXPORT_SYMBOL(mali_pmu_powerdown);
+#include "mali_executor.h"
int mali_perf_set_num_pp_cores(unsigned int num_cores)
{
- return mali_pp_scheduler_set_perf_level(num_cores, MALI_FALSE);
+ return mali_executor_set_perf_level(num_cores, MALI_FALSE);
}
EXPORT_SYMBOL(mali_perf_set_num_pp_cores);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012, 2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PROFILING_EVENTS_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PROFILING_GATOR_API_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_PROFILING_INTERNAL_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_sync.h"
struct mali_sync_pt {
struct sync_pt sync_pt;
struct mali_sync_flag *flag;
+ struct sync_timeline *sync_tl; /**< Sync timeline this pt is connected to. */
};
/**
struct kref refcount; /**< Reference count. */
};
+/**
+ * Mali sync timeline is used to connect mali timeline to sync_timeline.
+ * When fence timeout can print more detailed mali timeline system info.
+ */
+struct mali_sync_timeline_container {
+ struct sync_timeline sync_timeline;
+ struct mali_timeline *timeline;
+};
+
MALI_STATIC_INLINE struct mali_sync_pt *to_mali_sync_pt(struct sync_pt *pt)
{
return container_of(pt, struct mali_sync_pt, sync_pt);
}
+MALI_STATIC_INLINE struct mali_sync_timeline_container *to_mali_sync_tl_container(struct sync_timeline *sync_tl)
+{
+ return container_of(sync_tl, struct mali_sync_timeline_container, sync_timeline);
+}
+
static struct sync_pt *timeline_dup(struct sync_pt *pt)
{
struct mali_sync_pt *mpt, *new_mpt;
MALI_DEBUG_ASSERT_POINTER(pt);
mpt = to_mali_sync_pt(pt);
- new_pt = sync_pt_create(pt->parent, sizeof(struct mali_sync_pt));
+ new_pt = sync_pt_create(mpt->sync_tl, sizeof(struct mali_sync_pt));
if (NULL == new_pt) return NULL;
new_mpt = to_mali_sync_pt(new_pt);
mali_sync_flag_get(mpt->flag);
new_mpt->flag = mpt->flag;
+ new_mpt->sync_tl = mpt->sync_tl;
return new_pt;
}
module_put(THIS_MODULE);
}
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
static void timeline_print_pt(struct seq_file *s, struct sync_pt *sync_pt)
{
struct mali_sync_pt *mpt;
}
}
+#else
+static void timeline_pt_value_str(struct sync_pt *pt, char *str, int size)
+{
+ struct mali_sync_pt *mpt;
+
+ MALI_DEBUG_ASSERT_POINTER(str);
+ MALI_DEBUG_ASSERT_POINTER(pt);
+
+ mpt = to_mali_sync_pt(pt);
+
+ /* It is possible this sync point is just under construct,
+ * make sure the flag is valid before accessing it
+ */
+ if (mpt->flag) {
+ _mali_osk_snprintf(str, size, "%u", mpt->flag->point);
+ } else {
+ _mali_osk_snprintf(str, size, "uninitialized");
+ }
+}
+
+static void timeline_value_str(struct sync_timeline *timeline, char *str, int size)
+{
+ struct mali_sync_timeline_container *mali_sync_tl;
+
+ MALI_DEBUG_ASSERT_POINTER(timeline);
+ MALI_DEBUG_ASSERT_POINTER(str);
+
+ mali_sync_tl = to_mali_sync_tl_container(timeline);
+
+ MALI_DEBUG_ASSERT_POINTER(mali_sync_tl);
+
+ if (NULL != mali_sync_tl->timeline) {
+ _mali_osk_snprintf(str, size, "oldest (%u) next (%u)\n", mali_sync_tl->timeline->point_oldest,
+ mali_sync_tl->timeline->point_next);
+ }
+}
+#endif
+
static struct sync_timeline_ops mali_timeline_ops = {
.driver_name = "Mali",
.dup = timeline_dup,
.compare = timeline_compare,
.free_pt = timeline_free_pt,
.release_obj = timeline_release,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
.print_pt = timeline_print_pt,
+#else
+ .pt_value_str = timeline_pt_value_str,
+ .timeline_value_str = timeline_value_str,
+#endif
};
-struct sync_timeline *mali_sync_timeline_create(const char *name)
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name)
{
struct sync_timeline *sync_tl;
+ struct mali_sync_timeline_container *mali_sync_tl;
- sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct sync_timeline), name);
+ sync_tl = sync_timeline_create(&mali_timeline_ops, sizeof(struct mali_sync_timeline_container), name);
if (NULL == sync_tl) return NULL;
+ mali_sync_tl = to_mali_sync_tl_container(sync_tl);
+ mali_sync_tl->timeline = timeline;
+
/* Grab a reference on the module to ensure the callbacks are present
* as long some timeline exists. The reference is released when the
* timeline is freed.
mpt = to_mali_sync_pt(pt);
mpt->flag = flag;
+ mpt->sync_tl = flag->sync_tl;
return pt;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "mali_osk.h"
struct mali_sync_flag;
+struct mali_timeline;
/**
* Create a sync timeline.
* @param name Name of the sync timeline.
* @return The new sync timeline if successful, NULL if not.
*/
-struct sync_timeline *mali_sync_timeline_create(const char *name);
+struct sync_timeline *mali_sync_timeline_create(struct mali_timeline *timeline, const char *name);
/**
* Check if sync timeline belongs to Mali.
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2012, 2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_UK_TYPES_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <linux/slab.h> /* memort allocation functions */
return 0;
}
-#define mali400_in_rk30_version 0x03
-int get_mali_version_in_rk30_wrapper(struct mali_session_data *session_data, _mali_uk_get_mali_version_in_rk30_s __user *uargs)
-{
- _mali_uk_get_mali_version_in_rk30_s kargs;
- MALI_CHECK_NON_NULL(uargs, -EINVAL);
- kargs.ctx = (uintptr_t)session_data;
- kargs.version = mali400_in_rk30_version;
- if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
- return 0;
-}
int get_api_version_v2_wrapper(struct mali_session_data *session_data, _mali_uk_get_api_version_v2_s __user *uargs)
{
return 0;
}
+#define mali400_in_rk30_version 0x01
+int get_mali_version_in_rk30_wrapper(struct mali_session_data *session_data, _mali_uk_get_mali_version_in_rk30_s __user *uargs)
+{
+ _mali_uk_get_mali_version_in_rk30_s kargs;
+ MALI_CHECK_NON_NULL(uargs, -EINVAL);
+ kargs.ctx = (uintptr_t)session_data;
+ kargs.version = mali400_in_rk30_version;
+ if (0 != put_user(kargs.version, &uargs->version)) return -EFAULT;
+ return 0;
+}
+
int wait_for_notification_wrapper(struct mali_session_data *session_data, _mali_uk_wait_for_notification_s __user *uargs)
{
_mali_uk_wait_for_notification_s kargs;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2011-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2011-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/fs.h> /* file system operations */
#include <asm/uaccess.h> /* user space access */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
int get_user_settings_wrapper(struct mali_session_data *session_data, _mali_uk_get_user_settings_s __user *uargs);
int post_notification_wrapper(struct mali_session_data *session_data, _mali_uk_post_notification_s __user *uargs);
int request_high_priority_wrapper(struct mali_session_data *session_data, _mali_uk_request_high_priority_s __user *uargs);
+int get_mali_version_in_rk30_wrapper(struct mali_session_data *session_data, _mali_uk_get_mali_version_in_rk30_s __user *uargs);
int mem_write_safe_wrapper(struct mali_session_data *session_data, _mali_uk_mem_write_safe_s __user *uargs);
int mem_map_ext_wrapper(struct mali_session_data *session_data, _mali_uk_map_external_mem_s __user *argument);
int mem_unmap_ext_wrapper(struct mali_session_data *session_data, _mali_uk_unmap_external_mem_s __user *argument);
int mem_query_mmu_page_table_dump_size_wrapper(struct mali_session_data *session_data, _mali_uk_query_mmu_page_table_dump_size_s __user *uargs);
int mem_dump_mmu_page_table_wrapper(struct mali_session_data *session_data, _mali_uk_dump_mmu_page_table_s __user *uargs);
-int get_mali_version_in_rk30_wrapper(struct mali_session_data *session_data, _mali_uk_get_mali_version_in_rk30_s __user *uargs)
-;
+
int timeline_get_latest_point_wrapper(struct mali_session_data *session, _mali_uk_timeline_get_latest_point_s __user *uargs);
int timeline_wait_wrapper(struct mali_session_data *session, _mali_uk_timeline_wait_s __user *uargs);
int timeline_create_sync_fence_wrapper(struct mali_session_data *session, _mali_uk_timeline_create_sync_fence_s __user *uargs);
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include <linux/moduleparam.h>
#include "arm_core_scaling.h"
-#include "mali_pp_scheduler.h"
+#include "mali_executor.h"
-static void mali_platform_device_release(struct device *device);
+
+static int mali_core_scaling_enable = 0;
+
+void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
static u32 mali_read_phys(u32 phys_addr);
#if defined(CONFIG_ARCH_REALVIEW)
static void mali_write_phys(u32 phys_addr, u32 value);
#endif
-static int mali_core_scaling_enable = 1;
-
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
+#ifndef CONFIG_MALI_DT
+static void mali_platform_device_release(struct device *device);
#if defined(CONFIG_ARCH_VEXPRESS)
#if defined(CONFIG_ARM64)
-static struct resource mali_gpu_resources_m450_mp4[] = {
- MALI_GPU_RESOURCES_MALI450_MP4_PMU(0x2F040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+/* Juno + Mali-450 MP6 in V7 FPGA */
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0x6F040000, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200, 200)
};
+
#else
static struct resource mali_gpu_resources_m450_mp8[] = {
MALI_GPU_RESOURCES_MALI450_MP8_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
};
+static struct resource mali_gpu_resources_m450_mp6[] = {
+ MALI_GPU_RESOURCES_MALI450_MP6_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
+};
+
static struct resource mali_gpu_resources_m450_mp4[] = {
MALI_GPU_RESOURCES_MALI450_MP4_PMU(0xFC040000, -1, 70, 70, 70, 70, 70, 70, 70, 70, 70, 68)
};
MALI_GPU_RESOURCES_MALI400_MP2_PMU(0xC0000000, -1, -1, -1, -1, -1, -1)
};
+#endif
#endif
static struct mali_gpu_device_data mali_gpu_data = {
+#ifndef CONFIG_MALI_DT
+ .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
+ .max_job_runtime = 60000, /* 60 seconds */
#if defined(CONFIG_ARCH_VEXPRESS)
.shared_mem_size = 256 * 1024 * 1024, /* 256MB */
-#elif defined(CONFIG_ARCH_REALVIEW)
+#endif
+#endif
+
+#if defined(CONFIG_ARCH_REALVIEW)
.dedicated_mem_start = 0x80000000, /* Physical start address (use 0xD0000000 for old indirect setup) */
.dedicated_mem_size = 0x10000000, /* 256MB */
#endif
#if defined(CONFIG_ARM64)
.fb_start = 0x5f000000,
+ .fb_size = 0x91000000,
#else
.fb_start = 0xe0000000,
-#endif
.fb_size = 0x01000000,
- .max_job_runtime = 60000, /* 60 seconds */
- .utilization_interval = 1000, /* 1000ms */
+#endif
+ .control_interval = 1000, /* 1000ms */
.utilization_callback = mali_gpu_utilization_callback,
- .pmu_switch_delay = 0xFF, /* do not have to be this high on FPGA, but it is good for testing to have a delay */
- .pmu_domain_config = {0x1, 0x2, 0x4, 0x4, 0x4, 0x8, 0x8, 0x8, 0x8, 0x1, 0x2, 0x8},
+ .get_clock_info = NULL,
+ .get_freq = NULL,
+ .set_freq = NULL,
};
+#ifndef CONFIG_MALI_DT
static struct platform_device mali_gpu_device = {
.name = MALI_GPU_NAME_UTGARD,
.id = 0,
#if defined(CONFIG_ARCH_VEXPRESS)
#if defined(CONFIG_ARM64)
- if (mali_read_phys(0x2F000000) == 0x40104450) {
- MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
- num_pp_cores = 4;
- mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp4);
- mali_gpu_device.resource = mali_gpu_resources_m450_mp4;
+ if (mali_read_phys(0x6F000000) == 0x40601450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
}
#else
if (mali_read_phys(0xFC000000) == 0x00000450) {
num_pp_cores = 8;
mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp8);
mali_gpu_device.resource = mali_gpu_resources_m450_mp8;
+ } else if (mali_read_phys(0xFC000000) == 0x40600450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ mali_gpu_device.num_resources = ARRAY_SIZE(mali_gpu_resources_m450_mp6);
+ mali_gpu_device.resource = mali_gpu_resources_m450_mp6;
} else if (mali_read_phys(0xFC000000) == 0x40400450) {
MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
num_pp_cores = 4;
MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
}
+#else /* CONFIG_MALI_DT */
+int mali_platform_device_init(struct platform_device *device)
+{
+ int num_pp_cores;
+ int err = -1;
+#if defined(CONFIG_ARCH_REALVIEW)
+ u32 m400_gp_version;
+#endif
+
+ /* Detect present Mali GPU and connect the correct resources to the device */
+#if defined(CONFIG_ARCH_VEXPRESS)
+
+#if defined(CONFIG_ARM64)
+ if (mali_read_phys(0x6F000000) == 0x40601450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP6 device\n"));
+ num_pp_cores = 6;
+ }
+#else
+ if (mali_read_phys(0xFC000000) == 0x00000450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP8 device\n"));
+ num_pp_cores = 8;
+ } else if (mali_read_phys(0xFC000000) == 0x40400450) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-450 MP4 device\n"));
+ num_pp_cores = 4;
+ }
+#endif
+
+#elif defined(CONFIG_ARCH_REALVIEW)
+
+ m400_gp_version = mali_read_phys(0xC000006C);
+ if ((m400_gp_version & 0xFFFF0000) == 0x0C070000) {
+ MALI_DEBUG_PRINT(4, ("Registering Mali-300 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if ((m400_gp_version & 0xFFFF0000) == 0x0B070000) {
+ u32 fpga_fw_version = mali_read_phys(0xC0010000);
+ if (fpga_fw_version == 0x130C008F || fpga_fw_version == 0x110C008F) {
+ /* Mali-400 MP1 r1p0 or r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP1 device\n"));
+ num_pp_cores = 1;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ } else if (fpga_fw_version == 0x130C000F) {
+ /* Mali-400 MP2 r1p1 */
+ MALI_DEBUG_PRINT(4, ("Registering Mali-400 MP2 device\n"));
+ num_pp_cores = 2;
+ mali_write_phys(0xC0010020, 0xA); /* Enable direct memory mapping for FPGA */
+ }
+ }
+#endif
+
+ err = platform_device_add_data(device, &mali_gpu_data, sizeof(mali_gpu_data));
+
+ if (0 == err) {
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37))
+ pm_runtime_set_autosuspend_delay(&(device->dev), 1000);
+ pm_runtime_use_autosuspend(&(device->dev));
+#endif
+ pm_runtime_enable(&(device->dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+ }
+
+ return err;
+}
+
+int mali_platform_device_deinit(struct platform_device *device)
+{
+ MALI_IGNORE(device);
+
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_deinit() called\n"));
+
+ mali_core_scaling_term();
+
+#if defined(CONFIG_ARCH_REALVIEW)
+ mali_write_phys(0xC0010020, 0x9); /* Restore default (legacy) memory mapping */
+#endif
+
+ return 0;
+}
+
+#endif /* CONFIG_MALI_DT */
+
static u32 mali_read_phys(u32 phys_addr)
{
u32 phys_addr_page = phys_addr & 0xFFFFE000;
int ret = param_set_int(val, kp);
if (1 == mali_core_scaling_enable) {
- mali_core_scaling_sync(mali_pp_scheduler_get_num_cores_enabled());
+ mali_core_scaling_sync(mali_executor_get_num_cores_enabled());
}
return ret;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
--- /dev/null
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2013-2014 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+/**
+ * @file arm_core_scaling.c
+ * Example core scaling policy.
+ */
+
+#include "arm_core_scaling.h"
+
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+
+#include <linux/workqueue.h>
+
+static int num_cores_total;
+static int num_cores_enabled;
+
+static struct work_struct wq_work;
+
+static void set_num_cores(struct work_struct *work)
+{
+ int err = mali_perf_set_num_pp_cores(num_cores_enabled);
+ MALI_DEBUG_ASSERT(0 == err);
+ MALI_IGNORE(err);
+}
+
+static void enable_one_core(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ ++num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void disable_one_core(void)
+{
+ if (1 < num_cores_enabled) {
+ --num_cores_enabled;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ }
+
+ MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
+ MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
+}
+
+static void enable_max_num_cores(void)
+{
+ if (num_cores_enabled < num_cores_total) {
+ num_cores_enabled = num_cores_total;
+ schedule_work(&wq_work);
+ MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
+ }
+
+ MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+}
+
+void mali_core_scaling_init(int num_pp_cores)
+{
+ INIT_WORK(&wq_work, set_num_cores);
+
+ num_cores_total = num_pp_cores;
+ num_cores_enabled = num_pp_cores;
+
+ /* NOTE: Mali is not fully initialized at this point. */
+}
+
+void mali_core_scaling_sync(int num_cores)
+{
+ num_cores_enabled = num_cores;
+}
+
+void mali_core_scaling_term(void)
+{
+ flush_scheduled_work();
+}
+
+#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
+
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
+{
+ /*
+ * This function implements a very trivial PP core scaling algorithm.
+ *
+ * It is _NOT_ of production quality.
+ * The only intention behind this algorithm is to exercise and test the
+ * core scaling functionality of the driver.
+ * It is _NOT_ tuned for neither power saving nor performance!
+ *
+ * Other metrics than PP utilization need to be considered as well
+ * in order to make a good core scaling algorithm.
+ */
+
+ MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n", data->utilization_gpu, data->utilization_gp, data->utilization_pp, num_cores_enabled, num_cores_total));
+
+ /* NOTE: this function is normally called directly from the utilization callback which is in
+ * timer context. */
+
+ if (PERCENT_OF(90, 256) < data->utilization_pp) {
+ enable_max_num_cores();
+ } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
+ enable_one_core();
+ } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
+ /* do nothing */
+ } else if (PERCENT_OF(0, 256) < data->utilization_pp) {
+ disable_one_core();
+ } else {
+ /* do nothing */
+ }
+}
--- /dev/null
+/*
+ * This confidential and proprietary software may be used only as
+ * authorised by a licensing agreement from ARM Limited
+ * (C) COPYRIGHT 2013-2014 ARM Limited
+ * ALL RIGHTS RESERVED
+ * The entire notice above must be reproduced on all authorised
+ * copies and copies may only be made to the extent permitted
+ * by a licensing agreement from ARM Limited.
+ */
+
+/**
+ * @file arm_core_scaling.h
+ * Example core scaling policy.
+ */
+
+#ifndef __ARM_CORE_SCALING_H__
+#define __ARM_CORE_SCALING_H__
+
+struct mali_gpu_utilization_data;
+
+/**
+ * Initialize core scaling policy.
+ *
+ * @note The core scaling policy will assume that all PP cores are on initially.
+ *
+ * @param num_pp_cores Total number of PP cores.
+ */
+void mali_core_scaling_init(int num_pp_cores);
+
+/**
+ * Terminate core scaling policy.
+ */
+void mali_core_scaling_term(void);
+
+/**
+ * Update core scaling policy with new utilization data.
+ *
+ * @param data Utilization data.
+ */
+void mali_core_scaling_update(struct mali_gpu_utilization_data *data);
+
+void mali_core_scaling_sync(int num_cores);
+
+#endif /* __ARM_CORE_SCALING_H__ */
--- /dev/null
+/* --------------------------------------------------------------------------------------------------------\r
+ * File: custom_log.h \r
+ *\r
+ * Desc: ChenZhen Æ«ºÃµÄ log Êä³öµÄ¶¨ÖÆÊµÏÖ. \r
+ *\r
+ * -----------------------------------------------------------------------------------\r
+ * < ϰÓï ºÍ ËõÂÔÓï > : \r
+ *\r
+ * -----------------------------------------------------------------------------------\r
+ * Usage: \r
+ *\r
+ * Note:\r
+ *\r
+ * Author: ChenZhen\r
+ *\r
+ * --------------------------------------------------------------------------------------------------------\r
+ * Version:\r
+ * v1.0\r
+ * --------------------------------------------------------------------------------------------------------\r
+ * Log:\r
+ ----Fri Nov 19 15:20:28 2010 v1.0\r
+ * \r
+ * --------------------------------------------------------------------------------------------------------\r
+ */\r
+\r
+\r
+#ifndef __CUSTOM_LOG_H__\r
+#define __CUSTOM_LOG_H__\r
+\r
+#ifdef __cplusplus\r
+extern "C" {\r
+#endif\r
+\r
+/* ---------------------------------------------------------------------------------------------------------\r
+ * Include Files\r
+ * ---------------------------------------------------------------------------------------------------------\r
+ */\r
+#include <linux/kernel.h>\r
+\r
+\r
+/* ---------------------------------------------------------------------------------------------------------\r
+ * Macros Definition \r
+ * ---------------------------------------------------------------------------------------------------------\r
+ */\r
+ \r
+/** ÈôÏÂÁÐ macro Óб»¶¨Òå, ²Å ʹÄÜ log Êä³ö. */\r
+#define ENABLE_DEBUG_LOG\r
+\r
+/** .! : ÈôÐèҪȫ¾ÖµØ¹Ø±Õ D log, ¿ÉÒÔʹÄÜÏÂÃæµÄ´úÂë. */\r
+/*\r
+#undef ENABLE_DEBUG_LOG\r
+#warning "custom debug log is disabled globally!"\r
+*/\r
+\r
+#define LOGD(fmt, args...) \\r
+ printk(KERN_DEBUG fmt "\n", ## args)\r
+\r
+/*---------------------------------------------------------------------------*/\r
+ \r
+#ifdef ENABLE_VERBOSE_LOG\r
+/** Verbose log. */\r
+#define V(fmt, args...) \\r
+ { printk(KERN_DEBUG "V : [File] : %s; [Line] : %d; [Func] : %s(); " fmt "\n", __FILE__, __LINE__, __FUNCTION__, ## args); }\r
+#else\r
+#define V(...) ((void)0)\r
+#endif\r
+\r
+\r
+#ifdef ENABLE_DEBUG_LOG\r
+/** Debug log. */\r
+#define D(fmt, args...) \\r
+ { printk(KERN_DEBUG "D : [File] : %s; [Line] : %d; [Func] : %s(); " fmt "\n", __FILE__, __LINE__, __FUNCTION__, ## args); }\r
+#else\r
+#define D(...) ((void)0)\r
+#endif\r
+\r
+#define I(fmt, args...) \\r
+ { printk(KERN_INFO "I : [File] : %s; [Line] : %d; [Func] : %s(); " fmt "\n", __FILE__, __LINE__, __FUNCTION__, ## args); }\r
+\r
+#define W(fmt, args...) \\r
+ { printk(KERN_WARNING "W : [File] : %s; [Line] : %d; [Func] : %s(); " fmt "\n", __FILE__, __LINE__, __FUNCTION__, ## args); }\r
+\r
+#define E(fmt, args...) \\r
+ { printk(KERN_ERR "E : [File] : %s; [Line] : %d; [Func] : %s(); " fmt "\n", __FILE__, __LINE__, __FUNCTION__, ## args); }\r
+\r
+/*-------------------------------------------------------*/\r
+\r
+/** ʹÓà D(), ÒÔÊ®½øÖƵÄÐÎʽ´òÓ¡±äÁ¿ 'var' µÄ value. */\r
+#define D_DEC(var) D(#var " = %d.", var);\r
+\r
+#define E_DEC(var) E(#var " = %d.", var);\r
+\r
+/** ʹÓà D(), ÒÔÊ®Áù½øÖƵÄÐÎʽ´òÓ¡±äÁ¿ 'var' µÄ value. */\r
+#define D_HEX(var) D(#var " = 0x%x.", var);\r
+\r
+#define E_HEX(var) E(#var " = 0x%x.", var);\r
+\r
+/** ʹÓà D(), ÒÔÊ®Áù½øÖƵÄÐÎʽ ´òÓ¡Ö¸ÕëÀàÐͱäÁ¿ 'ptr' µÄ value. */\r
+#define D_PTR(ptr) D(#ptr " = %p.", ptr);\r
+\r
+#define E_PTR(ptr) E(#ptr " = %p.", ptr);\r
+\r
+/** ʹÓà D(), ´òÓ¡ char ×Ö´®. */\r
+#define D_STR(pStr) \\r
+{\\r
+ if ( NULL == pStr )\\r
+ {\\r
+ D(#pStr" = NULL.");\\r
+ }\\r
+ else\\r
+ {\\r
+ D(#pStr" = '%s'.", pStr);\\r
+ }\\r
+}\r
+\r
+#define E_STR(pStr) \\r
+{\\r
+ if ( NULL == pStr )\\r
+ {\\r
+ E(#pStr" = NULL.");\\r
+ }\\r
+ else\\r
+ {\\r
+ E(#pStr" = '%s'.", pStr);\\r
+ }\\r
+}\r
+\r
+#ifdef ENABLE_DEBUG_LOG\r
+/**\r
+ * log ´Ó 'pStart' µØÖ·¿ªÊ¼µÄ 'len' ¸ö×Ö½ÚµÄÊý¾Ý. \r
+ */\r
+#define D_MEM(pStart, len) \\r
+ {\\r
+ int i = 0;\\r
+ char* p = (char*)pStart;\\r
+ D("dump memory from addr of '" #pStart "', from %p, length %d' : ", pStart, len); \\r
+ printk("\t\t");\\r
+ for ( i = 0; i < len ; i++ )\\r
+ {\\r
+ printk("0x%02x, ", p[i] );\\r
+ }\\r
+ printk("\n");\\r
+ }\r
+#else\r
+#define D_MEM(...) ((void)0)\r
+#endif\r
+\r
+/*-------------------------------------------------------*/\r
+\r
+#define EXIT_FOR_DEBUG \\r
+{\\r
+ E("To exit for debug.");\\r
+ return 1;\\r
+}\r
+\r
+/*-------------------------------------------------------*/\r
+\r
+/**\r
+ * µ÷Óú¯Êý, ²¢¼ì²é·µ»ØÖµ, ¸ù¾Ý·µ»ØÖµ¾ö¶¨ÊÇ·ñÌø×ªµ½Ö¸¶¨µÄ´íÎó´¦Àí´úÂë. \r
+ * @param functionCall\r
+ * ¶ÔÌØ¶¨º¯ÊýµÄµ÷ÓÃ, ¸Ãº¯ÊýµÄ·µ»ØÖµ±ØÐëÊÇ ±íÕ÷ ³É¹¦ or err µÄ ÕûÐÍÊý. \r
+ * ÕâÀï, ±»µ÷Óú¯Êý "±ØÐë" ÊDZ»¶¨ÒåΪ "·µ»Ø 0 ±íʾ²Ù×÷³É¹¦". \r
+ * @param result\r
+ * ÓÃÓڼǼº¯Êý·µ»ØµÄ error code µÄ ÕûÐͱäÁ¿, ͨ³£ÊÇ "ret" or "result" µÈ.\r
+ * @param label\r
+ * Èôº¯Êý·µ»Ø´íÎó, ³ÌÐò½«ÒªÌø×ªµ½µÄ ´íÎó´¦Àí´¦µÄ ±êºÅ, ͨ³£¾ÍÊÇ "EXIT". \r
+ */\r
+#define CHECK_FUNC_CALL(functionCall, result, label) \\r
+{\\r
+ if ( 0 != ( (result) = (functionCall) ) )\\r
+ {\\r
+ E("Function call returned error : " #result " = %d.", result);\\r
+ goto label;\\r
+ }\\r
+}\r
+\r
+/**\r
+ * ÔÚÌØ¶¨Ìõ¼þÏÂ, Åж¨ error ·¢Éú, ¶Ô±äÁ¿ 'retVar' ÉèÖà 'errCode', \r
+ * Log Êä³ö¶ÔÓ¦µÄ Error Caution, È»ºóÌø×ª 'label' Ö¸¶¨µÄ´úÂë´¦Ö´ÐÐ. \r
+ * @param msg\r
+ * ´¿×Ö´®ÐÎʽµÄÌáʾÐÅÏ¢. \r
+ * @param retVar\r
+ * ±êʶº¯ÊýÖ´ÐÐ״̬»òÕß½á¹ûµÄ±äÁ¿, ½«±»ÉèÖþßÌåµÄ Error Code. \r
+ * ͨ³£ÊÇ 'ret' or 'result'. \r
+ * @param errCode\r
+ * ±íÕ÷ÌØ¶¨ error µÄ³£Êý±êʶ, ͨ³£ÊÇ ºêµÄÐÎ̬. \r
+ * @param label\r
+ * ³ÌÐò½«ÒªÌø×ªµ½µÄ´íÎó´¦Àí´úÂëµÄ±êºÅ, ͨ³£¾ÍÊÇ 'EXIT'. \r
+ * @param args...\r
+ * ¶ÔÓ¦ 'msgFmt' ʵ²ÎÖÐ '%s', '%d', ... µÈ ת»»ËµÃ÷·û µÄ¾ßÌå¿É±ä³¤Êµ²Î. \r
+ */\r
+#define SET_ERROR_AND_JUMP(msgFmt, retVar, errCode, label, args...) \\r
+{\\r
+ E("To set '" #retVar "' to %d('" #errCode "'), because : " msgFmt, (errCode), ## args);\\r
+ (retVar) = (errCode);\\r
+ goto label;\\r
+}\r
+\r
+\r
+/* ---------------------------------------------------------------------------------------------------------\r
+ * Types and Structures Definition\r
+ * ---------------------------------------------------------------------------------------------------------\r
+ */\r
+\r
+\r
+/* ---------------------------------------------------------------------------------------------------------\r
+ * Global Functions' Prototype\r
+ * ---------------------------------------------------------------------------------------------------------\r
+ */\r
+\r
+\r
+/* ---------------------------------------------------------------------------------------------------------\r
+ * Inline Functions Implementation \r
+ * ---------------------------------------------------------------------------------------------------------\r
+ */\r
+\r
+#ifdef __cplusplus\r
+}\r
+#endif\r
+\r
+#endif /* __CUSTOM_LOG_H__ */\r
+\r
--- /dev/null
+/*\r
+ * Rockchip SoC Mali-450 DVFS driver\r
+ *\r
+ * This program is free software; you can redistribute it and/or modify\r
+ * it under the terms of the GNU General Public License version 2 as\r
+ * published by the Free Software FoundatIon.\r
+ */\r
+\r
+#include "mali_platform.h"\r
+#include "mali_dvfs.h"\r
+\r
+#define level0_min 0\r
+#define level0_max 70\r
+#define levelf_max 100\r
+\r
+#define mali_dividend 7\r
+#define mali_fix_float(a) ((((a)*mali_dividend)%10)?((((a)*mali_dividend)/10)+1):(((a)*mali_dividend)/10))\r
+\r
+#define work_to_dvfs(w) container_of(w, struct mali_dvfs, work)\r
+#define dvfs_to_drv_data(dvfs) container_of(dvfs, struct mali_platform_drv_data, dvfs)\r
+\r
+static void mali_dvfs_event_proc(struct work_struct *w)\r
+{\r
+ struct mali_dvfs *dvfs = work_to_dvfs(w);\r
+ struct mali_platform_drv_data *drv_data = dvfs_to_drv_data(dvfs);\r
+ unsigned int utilisation = dvfs->utilisation;\r
+ unsigned int level = dvfs->current_level;\r
+ const struct mali_fv_info *threshold = &drv_data->fv_info[level];\r
+ int ret;\r
+\r
+ utilisation = utilisation * 100 / 256;\r
+\r
+ // dev_dbg(drv_data->dev, "utilisation percent = %d\n", utilisation);\r
+\r
+ if (utilisation > threshold->max &&\r
+ level < drv_data->fv_info_length - 1 - 1)\r
+ level += 1;\r
+ else if (level > 0 && utilisation < threshold->min)\r
+ level -= 1;\r
+ else\r
+ return;\r
+\r
+ dev_dbg(drv_data->dev, "Setting dvfs level %u: freq = %lu Hz\n",\r
+ level, drv_data->fv_info[level].freq);\r
+\r
+ ret = mali_set_level(drv_data->dev, level);\r
+ if (ret) {\r
+ dev_err(drv_data->dev, "set freq error, %d", ret);\r
+ return;\r
+ }\r
+}\r
+\r
+bool mali_dvfs_is_enabled(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ return dvfs->enabled;\r
+}\r
+\r
+void mali_dvfs_enable(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ dvfs->enabled = true;\r
+}\r
+\r
+void mali_dvfs_disable(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ dvfs->enabled = false;\r
+ cancel_work_sync(&dvfs->work);\r
+}\r
+\r
+unsigned int mali_dvfs_utilisation(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ return dvfs->utilisation;\r
+}\r
+\r
+int mali_dvfs_event(struct device *dev, u32 utilisation)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ dvfs->utilisation = utilisation;\r
+\r
+ if (dvfs->enabled)\r
+ schedule_work(&dvfs->work);\r
+\r
+ return MALI_TRUE;\r
+}\r
+static void mali_dvfs_threshold(u32 div, struct mali_platform_drv_data *drv_data)\r
+{\r
+ int length = drv_data->fv_info_length;\r
+ u32 pre_level;\r
+ u32 tmp;\r
+ int level;\r
+\r
+ for (level = 0; level < length; level++) {\r
+ if (level == 0) {\r
+ drv_data->fv_info[level].min = level0_min;\r
+ if (length == 1)\r
+ drv_data->fv_info[level].max = levelf_max;\r
+ else\r
+ drv_data->fv_info[level].max = level0_max;\r
+ } else {\r
+ pre_level = level - 1;\r
+ if (level == length - 1)\r
+ drv_data->fv_info[level].max = levelf_max;\r
+ else\r
+ drv_data->fv_info[level].max = drv_data->fv_info[pre_level].max + div;\r
+\r
+ drv_data->fv_info[level].min = drv_data->fv_info[pre_level].max *\r
+ drv_data->fv_info[pre_level].freq / drv_data->fv_info[level].freq;\r
+\r
+ tmp = drv_data->fv_info[level].max - drv_data->fv_info[level].min;\r
+ drv_data->fv_info[level].min += mali_fix_float(tmp);\r
+ }\r
+\r
+ dev_info(drv_data->dev, "freq: %lu, min_threshold: %d, max_threshold: %d\n",\r
+ drv_data->fv_info[level].freq,\r
+ drv_data->fv_info[level].min,\r
+ drv_data->fv_info[level].max);\r
+ }\r
+}\r
+\r
+int mali_dvfs_init(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+ struct cpufreq_frequency_table *freq_table;\r
+ int i = 0;\r
+ int div_dvfs;\r
+ int ret;\r
+\r
+ freq_table = dvfs_get_freq_volt_table(drv_data->clk);\r
+ if (!freq_table) {\r
+ dev_err(dev, "Can't find dvfs table in dts\n");\r
+ return -1;\r
+ }\r
+\r
+ while (freq_table[i].frequency != CPUFREQ_TABLE_END) {\r
+ drv_data->fv_info_length++;\r
+ i++;\r
+ }\r
+\r
+ drv_data->fv_info = devm_kcalloc(dev, drv_data->fv_info_length,\r
+ sizeof(*drv_data->fv_info),\r
+ GFP_KERNEL);\r
+ if (!drv_data->fv_info)\r
+ return -ENOMEM;\r
+\r
+ for (i = 0; i < drv_data->fv_info_length; i++)\r
+ drv_data->fv_info[i].freq = freq_table[i].frequency * 1000;\r
+\r
+ if(drv_data->fv_info_length > 1)\r
+ div_dvfs = round_up(((levelf_max - level0_max) /\r
+ (drv_data->fv_info_length-1)), 1);\r
+\r
+ mali_dvfs_threshold(div_dvfs, drv_data);\r
+\r
+ ret = dvfs_clk_set_rate(drv_data->clk, drv_data->fv_info[0].freq);\r
+ if (ret)\r
+ return ret;\r
+\r
+ drv_data->dvfs.current_level = 0;\r
+\r
+ dev_info(dev, "initial freq = %lu\n",\r
+ dvfs_clk_get_rate(drv_data->clk));\r
+\r
+ INIT_WORK(&dvfs->work, mali_dvfs_event_proc);\r
+ dvfs->enabled = true;\r
+\r
+ return 0;\r
+}\r
+\r
+void mali_dvfs_term(struct device *dev)\r
+{\r
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);\r
+ struct mali_dvfs *dvfs = &drv_data->dvfs;\r
+\r
+ dvfs->enabled = false;\r
+ cancel_work_sync(&dvfs->work);\r
+}\r
--- /dev/null
+/*\r
+ * Rockchip SoC Mali-450 DVFS driver\r
+ *\r
+ * This program is free software; you can redistribute it and/or modify\r
+ * it under the terms of the GNU General Public License version 2 as\r
+ * published by the Free Software FoundatIon.\r
+ */\r
+#include <linux/workqueue.h>\r
+#include <linux/types.h>\r
+#include <linux/device.h>\r
+\r
+#ifndef _MALI_DVFS_H_\r
+#define _MALI_DVFS_H_\r
+\r
+struct mali_dvfs {\r
+ struct work_struct work;\r
+ unsigned int utilisation;\r
+ unsigned int current_level;\r
+ bool enabled;\r
+};\r
+\r
+int mali_dvfs_init(struct device *dev);\r
+void mali_dvfs_term(struct device *dev);\r
+void mali_set_dvfs(struct device *dev, bool enable);\r
+bool mali_dvfs_is_enabled(struct device *dev);\r
+void mali_dvfs_enable(struct device *dev);\r
+void mali_dvfs_disable(struct device *dev);\r
+unsigned int mali_dvfs_utilisation(struct device *dev);\r
+int mali_dvfs_event(struct device *dev, u32 utilisation);\r
+#endif /*_MALI_DVFS_H_*/\r
* @file mali_platform.c
* Platform specific Mali driver functions for a default platform
*/
-#include "mali_kernel_common.h"
-#include "mali_osk.h"
-#include "mali_platform.h"
#include <linux/workqueue.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/clk.h>
#include <linux/device.h>
-#ifdef CONFIG_HAS_EARLYSUSPEND
-#include <linux/earlysuspend.h>
-#endif
-
+#include <linux/regulator/driver.h>
#include <linux/miscdevice.h>
#include <asm/uaccess.h>
-#include <linux/module.h>
#include <linux/cpufreq.h>
+#include <linux/of.h>
-#include <linux/rockchip/cpu.h>
-#include <linux/rockchip/dvfs.h>
-#define GPUCLK_NAME "clk_gpu"
-#define GPUCLK_PD_NAME "pd_gpu"
-#define GPU_MHZ 1000000
-static struct dvfs_node *mali_clock = 0;
-static struct clk *mali_clock_pd = 0;
-static struct clk *audis_gpu_clk = 0;
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "arm_core_scaling.h"
+#include "mali_platform.h"
-#define MALI_DVFS_DEFAULT_STEP 0 // 50Mhz default
-u32 mali_dvfs[] = {50, 100, 133, 160, 200, 266, 400};
-int num_clock;
-u32 mali_init_clock = 50;
-static int minuend = 0;
+static int mali_core_scaling_enable;
-static struct cpufreq_frequency_table *freq_table = NULL;
+u32 mali_group_error;
-module_param_array(mali_dvfs, int, &num_clock,S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(mali_dvfs,"mali clock table");
+struct device *mali_dev;
-module_param(mali_init_clock, int,S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(mali_init_clock,"mali init clock value");
-u32 mali_group_error = 0;
-u32 scale_enable = 1;
-u32 gpu_power_state = 0;
-static u32 utilization_global = 0;
+int mali_set_level(struct device *dev, int level)
+{
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
+ unsigned long freq;
+ int ret;
+ unsigned int current_level;
-u32 mali_utilization_timeout = 10;
-u32 sampling_enable = 1;
-#define mali_freq_workqueue_name "mali_freq_workqueue"
-#define mali_freq_work_name "mali_freq_work"
-struct mali_freq_data {
- struct workqueue_struct *wq;
- struct work_struct work;
- u32 freq;
-}*mali_freq_data;
+ _mali_osk_mutex_wait(drv_data->clockSetlock);
-typedef struct mali_dvfs_tableTag{
- u32 clock;
- u32 vol;
-}mali_dvfs_table;
+ current_level = drv_data->dvfs.current_level;
+ freq = drv_data->fv_info[level].freq;
-typedef struct mali_dvfs_statusTag{
- int currentStep;
- mali_dvfs_table * pCurrentDvfs;
+ if (level == current_level) {
+ _mali_osk_mutex_signal(drv_data->clockSetlock);
+ return 0;
+ }
-}mali_dvfs_status;
+ ret = dvfs_clk_set_rate(drv_data->clk, freq);
+ if (ret) {
+ _mali_osk_mutex_signal(drv_data->clockSetlock);
+ return ret;
+ }
-mali_dvfs_status maliDvfsStatus;
+ dev_dbg(dev, "set freq %lu\n", freq);
-#define GPU_DVFS_UP_THRESHOLD ((int)((255*50)/100))
-#define GPU_DVFS_DOWN_THRESHOLD ((int)((255*35)/100))
+ drv_data->dvfs.current_level = level;
-_mali_osk_mutex_t *clockSetlock;
+ _mali_osk_mutex_signal(drv_data->clockSetlock);
-struct clk* mali_clk_get(unsigned char *name)
-{
- struct clk *clk;
- clk = clk_get(NULL,name);
- return clk;
-}
-unsigned long mali_clk_get_rate(struct dvfs_node *clk)
-{
- return dvfs_clk_get_rate(clk);
+ return 0;
}
-void mali_clk_set_rate(struct dvfs_node *clk, u32 value)
+static int mali_clock_init(struct device *dev)
{
- unsigned long rate = (unsigned long)value * GPU_MHZ;
- dvfs_clk_set_rate(clk, rate);
- rate = mali_clk_get_rate(clk);
-}
+ int ret;
-static struct kobject *mali400_utility_object;
-static struct kobject *rk_gpu;
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
-static u32 get_mali_dvfs_status(void)
-{
- return maliDvfsStatus.currentStep;
-}
-static void set_mali_dvfs_step(u32 value)
-{
- maliDvfsStatus.currentStep = value;
-}
+ drv_data->pd = devm_clk_get(dev, "pd_gpu");
+ if (IS_ERR(drv_data->pd)) {
+ ret = PTR_ERR(drv_data->pd);
+ dev_err(dev, "get pd_clk failed, %d\n", ret);
+ return ret;
+ }
-static void scale_enable_set(u32 value)
-{
- scale_enable = value;
-}
-static u32 mali_dvfs_search(u32 value)
-{
- u32 i;
- u32 clock = value;
- for (i=0;i<num_clock;i++) {
- if (clock == mali_dvfs[i]) {
- _mali_osk_mutex_wait(clockSetlock);
- mali_clk_set_rate(mali_clock,clock);
- _mali_osk_mutex_signal(clockSetlock);
- set_mali_dvfs_step(i);
- scale_enable_set(0);
- return 0;
- }
- if(i>=7)
- MALI_DEBUG_PRINT(2,("USER set clock not in the mali_dvfs table\r\n"));
+ ret = clk_prepare_enable(drv_data->pd);
+ if (ret) {
+ dev_err(dev, "prepare pd_clk failed, %d\n", ret);
+ return ret;
}
- return 1;
-}
-static int mali400_utility_show(struct device *dev,struct device_attribute *attr, char *buf)
-{
- return sprintf(buf, "%d\n", utilization_global);
+ drv_data->clk = clk_get_dvfs_node("clk_gpu");
+ if (IS_ERR(drv_data->clk)) {
+ ret = PTR_ERR(drv_data->clk);
+ dev_err(dev, "prepare clk gpu failed, %d\n", ret);
+ return ret;
+ }
+
+ ret = dvfs_clk_prepare_enable(drv_data->clk);
+ if (ret) {
+ dev_err(dev, "prepare clk failed, %d\n", ret);
+ return ret;
+ }
+
+ drv_data->power_state = true;
+
+ return 0;
}
-static int mali400_clock_set(struct device *dev,struct device_attribute *attr, const char *buf,u32 count)
+
+static void mali_clock_term(struct device *dev)
{
- u32 clock;
- u32 currentStep;
- u64 timeValue;
- clock = simple_strtoul(buf, NULL, 10);
- currentStep = get_mali_dvfs_status();
- timeValue = _mali_osk_time_get_ns();
- /*MALI_PRINT(("USER SET CLOCK,%d\r\n",clock));*/
- if(!clock) {
- scale_enable_set(1);
- } else {
- mali_dvfs_search(clock);
- }
- return count;
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
+
+ dvfs_clk_disable_unprepare(drv_data->clk);
+ clk_disable_unprepare(drv_data->pd);
+ drv_data->power_state = false;
}
-static int clock_show(struct device *dev,struct device_attribute *attr, char *buf)
+
+static ssize_t show_available_frequencies(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
{
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
+ ssize_t ret = 0;
u32 i;
- char *pos = buf;
- pos += snprintf(pos,PAGE_SIZE,"%d,",num_clock);
- for(i=0;i<(num_clock-1);i++) {
- pos += snprintf(pos,PAGE_SIZE,"%d,",mali_dvfs[i]);
- }
- pos +=snprintf(pos,PAGE_SIZE,"%d\n",mali_dvfs[i]);
- return pos - buf;
+
+ for (i = 0; i < drv_data->fv_info_length; i++)
+ ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%lu\n",
+ drv_data->fv_info[i].freq);
+
+ return ret;
}
-static int sampling_timeout_show(struct device *dev,struct device_attribute *attr, char *buf)
+
+static ssize_t show_clock(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "mali_utilization_timeout = %d\n", mali_utilization_timeout);
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%lu\n", dvfs_clk_get_rate(drv_data->clk));
}
-static int sampling_timeout_set(struct device *dev,struct device_attribute *attr,
- const char *buf,u32 count)
-{
- u32 sampling;
- sampling = simple_strtoul(buf, NULL, 10);
-
- if (sampling == 0 ) {
- sampling_enable = 0;
- MALI_PRINT(("disable mali clock frequency scalling\r\n"));
- } else {
- mali_utilization_timeout = sampling;
- sampling_enable = 1;
- MALI_PRINT(("enable mali clock frequency scalling ,mali_utilization_timeout : %dms\r\n",
- mali_utilization_timeout));
+
+static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
+ unsigned long freq;
+ ssize_t ret;
+ u32 level;
+
+ ret = kstrtoul(buf, 10, &freq);
+ if (ret)
+ return ret;
+
+ for (level = drv_data->fv_info_length - 1; level > 0; level--) {
+ unsigned long tmp = drv_data->fv_info[level].freq;
+ if (tmp <= freq)
+ break;
}
+
+ dev_info(dev, "Using fv_info table %d: for %lu Hz\n", level, freq);
+
+ ret = mali_set_level(dev, level);
+ if (ret)
+ return ret;
+
return count;
}
-static int error_count_show(struct device *dev,struct device_attribute *attr, char *buf)
+
+static ssize_t show_dvfs_enable(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%d\n", mali_group_error);
+ return scnprintf(buf, PAGE_SIZE, "%u\n", mali_dvfs_is_enabled(dev));
}
-static DEVICE_ATTR(utility, 0644, mali400_utility_show, mali400_clock_set);
-static DEVICE_ATTR(param, 0644, clock_show, NULL);
-static DEVICE_ATTR(sampling_timeout, 0644, sampling_timeout_show,sampling_timeout_set);
-static DEVICE_ATTR(error_count, 0644, error_count_show, NULL);
+static ssize_t set_dvfs_enable(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ unsigned long enable;
+ ssize_t ret;
+ ret = kstrtoul(buf, 0, &enable);
+ if (ret)
+ return ret;
-static mali_bool mali400_utility_sysfs_init(void)
-{
- u32 ret ;
+ if (enable == 1)
+ mali_dvfs_enable(dev);
+ else if (enable == 0)
+ mali_dvfs_disable(dev);
+ else
+ return -EINVAL;
- mali400_utility_object = kobject_create_and_add("mali400_utility", NULL);
- if (mali400_utility_object == NULL) {
- return -1;
- }
- rk_gpu = kobject_create_and_add("rk_gpu", NULL);
- if (!rk_gpu)
- return -1;
- ret = sysfs_create_file(mali400_utility_object, &dev_attr_utility.attr);
- if (ret) {
- return -1;
- }
- ret = sysfs_create_file(mali400_utility_object, &dev_attr_param.attr);
- if (ret) {
- return -1;
- }
- ret = sysfs_create_file(mali400_utility_object, &dev_attr_sampling_timeout.attr);
- if(ret){
- return -1;
- }
- ret = sysfs_create_file(rk_gpu, &dev_attr_error_count.attr);
- if(ret){
- return -1;
- }
- return 0 ;
-}
-static unsigned int decideNextStatus(unsigned int utilization)
-{
- u32 level=0;
-
- if(utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 0 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 1;
- else if (utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 1 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 2;
- else if (utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 2 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 3;
- else if (utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 3 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 4;
- else if (utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 4 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 5;
- else if (utilization > GPU_DVFS_UP_THRESHOLD &&
- maliDvfsStatus.currentStep == 5 &&
- maliDvfsStatus.currentStep < (num_clock-minuend))
- level = 6;
- /*
- determined by minuend to up to level 6
- */
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 6)
- level = 5;
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 5)
- level = 4;
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 4)
- level = 3;
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 3)
- level = 2;
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 2)
- level = 1;
- else if(utilization < GPU_DVFS_DOWN_THRESHOLD &&
- maliDvfsStatus.currentStep == 1)
- level = 0;
- else
- level = maliDvfsStatus.currentStep;
- return level;
+ return count;
}
-static mali_bool set_mali_dvfs_status(u32 step)
+static ssize_t show_utilisation(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
- u32 validatedStep=step;
-
- _mali_osk_mutex_wait(clockSetlock);
- mali_clk_set_rate(mali_clock, mali_dvfs[validatedStep]);
- _mali_osk_mutex_signal(clockSetlock);
- set_mali_dvfs_step(validatedStep);
-
- return MALI_TRUE;
+ return scnprintf(buf, PAGE_SIZE, "%u\n", mali_dvfs_utilisation(dev));
}
-static mali_bool change_mali_dvfs_status(u32 step)
+static int error_count_show(struct device *dev,struct device_attribute *attr, char *buf)
{
- if(!set_mali_dvfs_status(step)) {
- MALI_DEBUG_PRINT(2,("error on set_mali_dvfs_status: %d\n",step));
- return MALI_FALSE;
- }
-
- return MALI_TRUE;
+ return sprintf(buf, "%d\n", mali_group_error);
}
-static void mali_freq_scale_work(struct work_struct *work)
-{
+DEVICE_ATTR(available_frequencies, S_IRUGO, show_available_frequencies, NULL);
+DEVICE_ATTR(clock, S_IRUGO | S_IWUSR, show_clock, set_clock);
+DEVICE_ATTR(dvfs_enable, S_IRUGO | S_IWUSR, show_dvfs_enable, set_dvfs_enable);
+DEVICE_ATTR(utilisation, S_IRUGO, show_utilisation, NULL);
+DEVICE_ATTR(error_count, 0644, error_count_show, NULL);
+
+static struct attribute *mali_sysfs_entries[] = {
+ &dev_attr_available_frequencies.attr,
+ &dev_attr_clock.attr,
+ &dev_attr_dvfs_enable.attr,
+ &dev_attr_utilisation.attr,
+ &dev_attr_error_count.attr,
+ NULL,
+};
- u32 nextStatus = 0;
- u32 curStatus = 0;
+static const struct attribute_group mali_attr_group = {
+ .attrs = mali_sysfs_entries,
+};
- curStatus = get_mali_dvfs_status();
- nextStatus = decideNextStatus(utilization_global);
-
- if (curStatus!=nextStatus) {
- if (!change_mali_dvfs_status(nextStatus)) {
- MALI_DEBUG_PRINT(1, ("error on change_mali_dvfs_status \n"));
- }
- }
-}
-static mali_bool init_mali_clock(void)
+static int mali_create_sysfs(struct device *dev)
{
- mali_bool ret = MALI_TRUE;
- int i;
-
- if (mali_clock != 0 || mali_clock_pd != 0)
- return ret;
-#if 1
- mali_clock_pd = clk_get(NULL,GPUCLK_PD_NAME);
- if (IS_ERR(mali_clock_pd)) {
- MALI_PRINT( ("MALI Error : failed to get source mali pd\n"));
- ret = MALI_FALSE;
- goto err_gpu_clk;
- }
- clk_prepare_enable(mali_clock_pd);
-#endif
- mali_clock = clk_get_dvfs_node(GPUCLK_NAME);
- if (IS_ERR(mali_clock)) {
- MALI_PRINT( ("MALI Error : failed to get source mali clock\n"));
- ret = MALI_FALSE;
- goto err_gpu_clk;
- }
- dvfs_clk_prepare_enable(mali_clock);
- freq_table = dvfs_get_freq_volt_table(mali_clock);
- if (!freq_table) {
- MALI_PRINT(("Stop,dvfs table should be set in dts\n"));
- return MALI_FALSE;
- }
- for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
- mali_dvfs[i] = freq_table[i].frequency/1000;
- }
- mali_init_clock = mali_dvfs[0];
- num_clock = i;
- minuend = 2;
- MALI_PRINT(("Mali400 inside of rk3126\r\n"));
-
- mali_clk_set_rate(mali_clock, mali_init_clock);
- gpu_power_state = 1;
-
- return MALI_TRUE;
-
-err_gpu_clk:
- MALI_PRINT(("::clk_put:: %s mali_clock\n", __FUNCTION__));
- gpu_power_state = 0;
-#if 1
- clk_disable_unprepare(mali_clock_pd);
-#endif
- dvfs_clk_disable_unprepare(mali_clock);
- mali_clock = 0;
- mali_clock_pd = 0;
+ int ret;
+
+ ret = sysfs_create_group(&dev->kobj, &mali_attr_group);
+ if (ret)
+ dev_err(dev, "create sysfs group error, %d\n", ret);
return ret;
}
-static mali_bool deinit_mali_clock(void)
+void mali_remove_sysfs(struct device *dev)
{
- if (mali_clock == 0 && mali_clock_pd == 0)
- return MALI_TRUE;
- dvfs_clk_disable_unprepare(mali_clock);
-#if 1
- clk_disable_unprepare(mali_clock_pd);
-#endif
- mali_clock = 0;
- mali_clock_pd = 0;
- if(gpu_power_state)
- gpu_power_state = 0;
- return MALI_TRUE;
+ sysfs_remove_group(&dev->kobj, &mali_attr_group);
}
-mali_bool init_mali_dvfs_status(int step)
+_mali_osk_errcode_t mali_platform_init(struct platform_device *pdev)
{
- set_mali_dvfs_step(step);
- return MALI_TRUE;
-}
+ struct device *dev = &pdev->dev;
+ struct mali_platform_drv_data *mali_drv_data;
+ int ret;
+
+ mali_drv_data = devm_kzalloc(dev, sizeof(*mali_drv_data), GFP_KERNEL);
+ if (!mali_drv_data) {
+ dev_err(dev, "no mem\n");
+ return _MALI_OSK_ERR_NOMEM;
+ }
-#ifdef CONFIG_HAS_EARLYSUSPEND
-static void mali_pm_early_suspend(struct early_suspend *mali_dev)
-{
- /*do nothing*/
-}
-static void mali_pm_late_resume(struct early_suspend *mali_dev)
-{
- /*do nothing*/
-}
-static struct early_suspend mali_dev_early_suspend = {
- .suspend = mali_pm_early_suspend,
- .resume = mali_pm_late_resume,
- .level = EARLY_SUSPEND_LEVEL_DISABLE_FB,
-};
-#endif /* CONFIG_HAS_EARLYSUSPEND */
+ dev_set_drvdata(dev, mali_drv_data);
-_mali_osk_errcode_t mali_platform_init(void)
-{
- if (cpu_is_rk3036()) {
- audis_gpu_clk = clk_get(NULL,"clk_gpu");
+ mali_drv_data->dev = dev;
- if (IS_ERR(audis_gpu_clk)) {
- MALI_PRINT( ("MALI Error : failed to get audis mali clk\n"));
- return MALI_FALSE;
-
- }
+ mali_dev = dev;
- clk_prepare_enable(audis_gpu_clk);
+ ret = mali_clock_init(dev);
+ if (ret)
+ goto err_init;
- MALI_SUCCESS;
- }
- MALI_CHECK(init_mali_clock(), _MALI_OSK_ERR_FAULT);
-
- clockSetlock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,_MALI_OSK_LOCK_ORDER_UTILIZATION);
- if(!init_mali_dvfs_status(MALI_DVFS_DEFAULT_STEP))
- MALI_DEBUG_PRINT(1, ("init_mali_dvfs_status failed\n"));
-
- if(mali400_utility_sysfs_init())
- MALI_PRINT(("mali400_utility_sysfs_init error\r\n"));
-
- mali_freq_data = kmalloc(sizeof(struct mali_freq_data), GFP_KERNEL);
- if(!mali_freq_data) {
- MALI_PRINT(("kmalloc error\r\n"));
- MALI_ERROR(-1);
- }
- mali_freq_data->wq = create_workqueue(mali_freq_workqueue_name);
- if(!mali_freq_data->wq)
- MALI_ERROR(-1);
- INIT_WORK(&mali_freq_data->work,mali_freq_scale_work);
-
-#ifdef CONFIG_HAS_EARLYSUSPEND
- register_early_suspend(&mali_dev_early_suspend);
-#endif
+ ret = mali_dvfs_init(dev);
+ if (ret)
+ goto err_init;
+
+ ret = mali_create_sysfs(dev);
+ if (ret)
+ goto term_clk;
- MALI_SUCCESS;
+ mali_drv_data->clockSetlock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_UTILIZATION);
+ mali_core_scaling_enable = 1;
+
+ return 0;
+term_clk:
+ mali_clock_term(dev);
+err_init:
+ return _MALI_OSK_ERR_FAULT;
}
-_mali_osk_errcode_t mali_platform_deinit(void)
+_mali_osk_errcode_t mali_platform_deinit(struct platform_device *pdev)
{
- if (cpu_is_rk3036()) {
- clk_disable_unprepare(audis_gpu_clk);
- MALI_SUCCESS;
- }
+ struct device *dev = &pdev->dev;
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(dev);
- deinit_mali_clock();
- _mali_osk_mutex_term(clockSetlock);
+ mali_core_scaling_term();
+ mali_clock_term(dev);
+ _mali_osk_mutex_term(drv_data->clockSetlock);
- MALI_SUCCESS;
+ return 0;
}
+
_mali_osk_errcode_t mali_power_domain_control(u32 bpower_off)
{
- if (!bpower_off) {
- if (!gpu_power_state) {
- if (cpu_is_rk3036()) {
- clk_prepare_enable(audis_gpu_clk);
- } else {
- #if 1
- clk_prepare_enable(mali_clock_pd);
- #endif
- dvfs_clk_prepare_enable(mali_clock);
- }
- gpu_power_state = 1 ;
- }
- } else if (bpower_off == 2) {
- ;
+ struct mali_platform_drv_data *drv_data = dev_get_drvdata(mali_dev);
+
+ if (bpower_off == 0) {
+ if (!drv_data->power_state) {
+ dvfs_clk_prepare_enable(drv_data->clk);
+ clk_prepare_enable(drv_data->pd);
+ drv_data->power_state = true;
+ }
} else if (bpower_off == 1) {
- if(gpu_power_state) {
- if (cpu_is_rk3036()) {
- clk_disable_unprepare(audis_gpu_clk);
- } else {
- dvfs_clk_disable_unprepare(mali_clock);
- #if 1
- clk_disable_unprepare(mali_clock_pd);
- #endif
- }
- gpu_power_state = 0;
+ if (drv_data->power_state) {
+ dvfs_clk_disable_unprepare(drv_data->clk);
+ clk_disable_unprepare(drv_data->pd);
+ drv_data->power_state = false;
}
}
- MALI_SUCCESS;
+
+ return 0;
}
_mali_osk_errcode_t mali_platform_power_mode_change(mali_power_mode power_mode)
{
-#if 1
switch(power_mode) {
case MALI_POWER_MODE_ON:
- MALI_DEBUG_PRINT(2,("MALI_POWER_MODE_ON\r\n"));
+ MALI_DEBUG_PRINT(2, ("MALI_POWER_MODE_ON\r\n"));
mali_power_domain_control(MALI_POWER_MODE_ON);
break;
case MALI_POWER_MODE_LIGHT_SLEEP:
- MALI_DEBUG_PRINT(2,("MALI_POWER_MODE_LIGHT_SLEEP\r\n"));
+ MALI_DEBUG_PRINT(2, ("MALI_POWER_MODE_LIGHT_SLEEP\r\n"));
mali_power_domain_control(MALI_POWER_MODE_LIGHT_SLEEP);
break;
case MALI_POWER_MODE_DEEP_SLEEP:
- MALI_DEBUG_PRINT(2,("MALI_POWER_MODE_DEEP_SLEEP\r\n"));
+ MALI_DEBUG_PRINT(2, ("MALI_POWER_MODE_DEEP_SLEEP\r\n"));
mali_power_domain_control(MALI_POWER_MODE_DEEP_SLEEP);
break;
default:
- MALI_DEBUG_PRINT(2,("mali_platform_power_mode_change:power_mode(%d) not support \r\n",power_mode));
+ MALI_DEBUG_PRINT(2, ("mali_platform_power_mode_change:power_mode(%d) not support \r\n",
+ power_mode));
}
-#endif
- MALI_SUCCESS;
+
+ return 0;
}
void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data)
{
- if (cpu_is_rk3036())
- return;
-
if(data->utilization_pp > 256)
return;
- utilization_global = data->utilization_pp;
-
- //MALI_PRINT(("utilization_global = %d\r\n",utilization_global));
- if(scale_enable && sampling_enable)
- queue_work(mali_freq_data->wq,&mali_freq_data->work);
-
- return ;
+ if (mali_core_scaling_enable)
+ mali_core_scaling_update(data);
+
+ // dev_dbg(mali_dev, "utilization:%d\r\n", data->utilization_pp);
+
+ mali_dvfs_event(mali_dev, data->utilization_pp);
}
#ifndef __MALI_PLATFORM_H__
#define __MALI_PLATFORM_H__
+#include "mali_dvfs.h"
#include "mali_osk.h"
#include <linux/mali/mali_utgard.h>
+#include <linux/rockchip/dvfs.h>
+#include <linux/cpufreq.h>
+
#ifdef __cplusplus
extern "C" {
#endif
MALI_POWER_MODE_DEEP_SLEEP, /**< Mali has been idle for a long time, or OS suspend */
} mali_power_mode;
+struct mali_fv_info {
+ unsigned long freq;
+ unsigned int min;
+ unsigned int max;
+};
+
+struct mali_platform_drv_data {
+ struct dvfs_node *clk;
+ struct clk *pd;
+ struct mali_fv_info *fv_info;
+ unsigned int fv_info_length;
+ struct mali_dvfs dvfs;
+ struct device *dev;
+ bool power_state;
+ _mali_osk_mutex_t *clockSetlock;
+};
+
/** @brief Platform specific setup and initialisation of MALI
*
* This is called from the entrypoint of the driver to initialize the platform
*
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_platform_init(void);
+_mali_osk_errcode_t mali_platform_init(struct platform_device *pdev);
/** @brief Platform specific deinitialisation of MALI
*
*
* @return _MALI_OSK_ERR_OK on success otherwise, a suitable _mali_osk_errcode_t error.
*/
-_mali_osk_errcode_t mali_platform_deinit(void);
+_mali_osk_errcode_t mali_platform_deinit(struct platform_device *pdev);
/** @brief Platform specific powerdown sequence of MALI
*
* @param utilization The workload utilization of the Mali GPU. 0 = no utilization, 256 = full utilization.
*/
void mali_gpu_utilization_handler(struct mali_gpu_utilization_data *data);
-
-/** @brief Setting the power domain of MALI
- *
- * This function sets the power domain of MALI if Linux run time power management is enabled
- *
- * @param dev Reference to struct platform_device (defined in linux) used by MALI GPU
- */
-void set_mali_parent_power_domain(void* dev);
+int mali_set_level(struct device *dev, int level);
#ifdef __cplusplus
}
* copies and copies may only be made to the extent permitted
* by a licensing agreement from ARM Limited.
*/
-
-/*author by xxm 2012-12-1*/
-
+
#include <linux/platform_device.h>
#include <linux/version.h>
#include <linux/pm.h>
#ifdef CONFIG_PM_RUNTIME
#include <linux/pm_runtime.h>
#endif
-#include <linux/mali/mali_utgard.h>
-#include "mali_kernel_common.h"
-
-#include "mali_platform.h"
-
#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/rockchip/cpu.h>
-static int num_cores_total;
-static int num_cores_enabled;
-static void mali_platform_device_release(struct device *device);
-static int mali_os_suspend(struct device *device);
-static int mali_os_resume(struct device *device);
-static int mali_os_freeze(struct device *device);
-static int mali_os_thaw(struct device *device);
-#ifdef CONFIG_PM_RUNTIME
-static int mali_runtime_suspend(struct device *device);
-static int mali_runtime_resume(struct device *device);
-static int mali_runtime_idle(struct device *device);
-#endif
-
-/*#include "arm_core_scaling.h"*/
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data);
-
-static struct work_struct wq_work;
+#include <linux/mali/mali_utgard.h>
+#include "mali_kernel_common.h"
+#include "mali_platform.h"
+#include "arm_core_scaling.h"
-static struct dev_pm_ops mali_gpu_device_type_pm_ops = {
- .suspend = mali_os_suspend,
- .resume = mali_os_resume,
- .freeze = mali_os_freeze,
- .thaw = mali_os_thaw,
#ifdef CONFIG_PM_RUNTIME
- .runtime_suspend = mali_runtime_suspend,
- .runtime_resume = mali_runtime_resume,
- .runtime_idle = mali_runtime_idle,
-#endif
-};
-
-static struct device_type mali_gpu_device_device_type = {
- .pm = &mali_gpu_device_type_pm_ops,
-};
-
-static u64 dma_dmamask = DMA_BIT_MASK(32);
-
-static struct mali_gpu_device_data mali_gpu_data = {
- .shared_mem_size = 1024* 1024 * 1024, /* 1GB */
- .fb_start = 0x40000000,
- .fb_size = 0xb1000000,
- .utilization_interval = 0, /* 0ms */
- .utilization_callback = mali_gpu_utilization_handler,
-};
-static void mali_platform_device_add_config(struct platform_device *pdev)
-{
- if (cpu_is_rk3036())
- mali_gpu_device_device_type.pm = NULL;
- pdev->dev.id = 0;
- pdev->dev.release = mali_platform_device_release;
- pdev->dev.type = &mali_gpu_device_device_type;
- pdev->dev.dma_mask = &dma_dmamask;
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
-}
-static void set_num_cores(struct work_struct *work)
-{
- int err = mali_perf_set_num_pp_cores(num_cores_enabled);
- MALI_DEBUG_ASSERT(0 == err);
- MALI_IGNORE(err);
-}
-static void enable_one_core(void)
+static int mali_runtime_suspend(struct device *device)
{
- if (num_cores_enabled < num_cores_total) {
- ++num_cores_enabled;
- schedule_work(&wq_work);
- MALI_DEBUG_PRINT(3, ("Core scaling: Enabling one more core\n"));
- }
+ int ret = 0;
+ MALI_DEBUG_PRINT(4, ("mali_runtime_suspend() called\n"));
- MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
- MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
-}
-static void disable_one_core(void)
-{
- if (1 < num_cores_enabled) {
- --num_cores_enabled;
- schedule_work(&wq_work);
- MALI_DEBUG_PRINT(3, ("Core scaling: Disabling one core\n"));
+ if (NULL != device->driver &&
+ NULL != device->driver->pm &&
+ NULL != device->driver->pm->runtime_suspend) {
+ /* Need to notify Mali driver about this event */
+ ret = device->driver->pm->runtime_suspend(device);
}
- MALI_DEBUG_ASSERT(1 <= num_cores_enabled);
- MALI_DEBUG_ASSERT(num_cores_total >= num_cores_enabled);
-}
-static void enable_max_num_cores(void)
-{
- if (num_cores_enabled < num_cores_total) {
- num_cores_enabled = num_cores_total;
- schedule_work(&wq_work);
- MALI_DEBUG_PRINT(3, ("Core scaling: Enabling maximum number of cores\n"));
- }
+ mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
- MALI_DEBUG_ASSERT(num_cores_total == num_cores_enabled);
+ return ret;
}
-void mali_core_scaling_init(int num_pp_cores)
+
+static int mali_runtime_resume(struct device *device)
{
- INIT_WORK(&wq_work, set_num_cores);
+ int ret = 0;
+ MALI_DEBUG_PRINT(4, ("mali_runtime_resume() called\n"));
- num_cores_total = num_pp_cores;
- num_cores_enabled = num_pp_cores;
+ mali_platform_power_mode_change(MALI_POWER_MODE_ON);
- /* NOTE: Mali is not fully initialized at this point. */
-}
-void mali_core_scaling_term(void)
-{
- flush_scheduled_work();
-}
-#define PERCENT_OF(percent, max) ((int) ((percent)*(max)/100.0 + 0.5))
-void mali_core_scaling_update(struct mali_gpu_utilization_data *data)
-{
- /*
- * This function implements a very trivial PP core scaling algorithm.
- *
- * It is _NOT_ of production quality.
- * The only intention behind this algorithm is to exercise and test the
- * core scaling functionality of the driver.
- * It is _NOT_ tuned for neither power saving nor performance!
- *
- * Other metrics than PP utilization need to be considered as well
- * in order to make a good core scaling algorithm.
- */
-
- MALI_DEBUG_PRINT(3, ("Utilization: (%3d, %3d, %3d), cores enabled: %d/%d\n",
- data->utilization_gpu, data->utilization_gp,
- data->utilization_pp, num_cores_enabled, num_cores_total));
-
- /* NOTE: this function is normally called directly from the utilization callback which is in
- * timer context. */
-
- if (PERCENT_OF(90, 256) < data->utilization_pp) {
- enable_max_num_cores();
- } else if (PERCENT_OF(50, 256) < data->utilization_pp) {
- enable_one_core();
- } else if (PERCENT_OF(40, 256) < data->utilization_pp) {
- /* do nothing */
- } else if (PERCENT_OF( 0, 256) < data->utilization_pp) {
- disable_one_core();
- } else {
- /* do nothing */
+ if (NULL != device->driver &&
+ NULL != device->driver->pm &&
+ NULL != device->driver->pm->runtime_resume) {
+ /* Need to notify Mali driver about this event */
+ ret = device->driver->pm->runtime_resume(device);
}
+
+ return ret;
}
-int mali_platform_device_register(struct platform_device *pdev)
+
+static int mali_runtime_idle(struct device *device)
{
- int err = 0;
- int num_pp_cores = 0;
- MALI_PRINT(("mali_platform_device_register() called\n"));
-
- num_pp_cores = 1;
-
- mali_platform_device_add_config(pdev);
+ int ret = 0;
+ MALI_DEBUG_PRINT(4, ("mali_runtime_idle() called\n"));
- err = platform_device_add_data(pdev, &mali_gpu_data, sizeof(mali_gpu_data));
-
- if (0 == err) {
- err = mali_platform_init();
- if(0 == err) {
-#ifdef CONFIG_PM_RUNTIME
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
- pm_runtime_set_autosuspend_delay(&(pdev->dev), 1000);
- pm_runtime_use_autosuspend(&(pdev->dev));
-#endif
- pm_runtime_enable(&(pdev->dev));
-#endif
- mali_core_scaling_init(num_pp_cores);
- return 0;
- }
+ if (NULL != device->driver &&
+ NULL != device->driver->pm &&
+ NULL != device->driver->pm->runtime_idle) {
+ /* Need to notify Mali driver about this event */
+ ret = device->driver->pm->runtime_idle(device);
+ if (0 != ret)
+ return ret;
}
- return err;
-}
-void mali_platform_device_unregister(struct platform_device *pdev)
-{
- MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
- mali_platform_deinit();
- mali_core_scaling_term();
-}
+ pm_runtime_suspend(device);
-static void mali_platform_device_release(struct device *device)
-{
- MALI_DEBUG_PRINT(4, ("mali_platform_device_release() called\n"));
+ return 0;
}
+#endif
+
static int mali_os_suspend(struct device *device)
{
int ret = 0;
- MALI_DEBUG_PRINT(2, ("mali_os_suspend() called\n"));
+ MALI_DEBUG_PRINT(4, ("mali_os_suspend() called\n"));
if (NULL != device->driver &&
NULL != device->driver->pm &&
{
int ret = 0;
- MALI_DEBUG_PRINT(2, ("mali_os_resume() called\n"));
+ MALI_DEBUG_PRINT(4, ("mali_os_resume() called\n"));
mali_platform_power_mode_change(MALI_POWER_MODE_ON);
return ret;
}
-#ifdef CONFIG_PM_RUNTIME
-static int mali_runtime_suspend(struct device *device)
+static struct dev_pm_ops mali_gpu_device_type_pm_ops =
{
- int ret = 0;
- MALI_DEBUG_PRINT(4, ("mali_runtime_suspend() called\n"));
+ .suspend = mali_os_suspend,
+ .resume = mali_os_resume,
+ .freeze = mali_os_freeze,
+ .thaw = mali_os_thaw,
+#ifdef CONFIG_PM_RUNTIME
+ .runtime_suspend = mali_runtime_suspend,
+ .runtime_resume = mali_runtime_resume,
+ .runtime_idle = mali_runtime_idle,
+#endif
+};
- if (NULL != device->driver &&
- NULL != device->driver->pm &&
- NULL != device->driver->pm->runtime_suspend) {
- /* Need to notify Mali driver about this event */
- ret = device->driver->pm->runtime_suspend(device);
- }
+static struct device_type mali_gpu_device_device_type =
+{
+ .pm = &mali_gpu_device_type_pm_ops,
+};
- mali_platform_power_mode_change(MALI_POWER_MODE_LIGHT_SLEEP);
+static struct mali_gpu_device_data mali_gpu_data =
+{
+ .shared_mem_size = 1024* 1024 * 1024, /* 1GB */
+ .fb_start = 0x40000000,
+ .fb_size = 0xb1000000,
+ .max_job_runtime = 60000, /* 60 seconds */
+ //.utilization_interval = 0, /* 0ms */
+ .utilization_callback = mali_gpu_utilization_handler,
+};
- return ret;
+static void mali_platform_device_add_config(struct platform_device *pdev)
+{
+ pdev->name = MALI_GPU_NAME_UTGARD,
+ pdev->id = 0;
+ pdev->dev.type = &mali_gpu_device_device_type;
+ pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask,
+ pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
}
-static int mali_runtime_resume(struct device *device)
+int mali_platform_device_init(struct platform_device *pdev)
{
- int ret = 0;
- MALI_DEBUG_PRINT(4, ("mali_runtime_resume() called\n"));
-
- mali_platform_power_mode_change(MALI_POWER_MODE_ON);
+ int err = 0;
+ int num_pp_cores = 0;
+ MALI_DEBUG_PRINT(2,("mali_platform_device_register() called\n"));
- if (NULL != device->driver &&
- NULL != device->driver->pm &&
- NULL != device->driver->pm->runtime_resume) {
- /* Need to notify Mali driver about this event */
- ret = device->driver->pm->runtime_resume(device);
- }
+ if (cpu_is_rk312x())
+ num_pp_cores = 2;
+ else if (cpu_is_rk3036())
+ num_pp_cores = 1;
+ else if (cpu_is_rk3188())
+ num_pp_cores = 4;
- return ret;
-}
+ mali_platform_device_add_config(pdev);
-static int mali_runtime_idle(struct device *device)
-{
- int ret = 0;
- MALI_DEBUG_PRINT(4, ("mali_runtime_idle() called\n"));
+ err = platform_device_add_data(pdev, &mali_gpu_data,
+ sizeof(mali_gpu_data));
+ if (err == 0) {
+ err = mali_platform_init(pdev);
+ if (err == 0) {
- if (NULL != device->driver &&
- NULL != device->driver->pm &&
- NULL != device->driver->pm->runtime_idle) {
- /* Need to notify Mali driver about this event */
- ret = device->driver->pm->runtime_idle(device);
- if (0 != ret)
- return ret;
+#ifdef CONFIG_PM_RUNTIME
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
+ pm_runtime_set_autosuspend_delay(&(pdev->dev), 1000);
+ pm_runtime_use_autosuspend(&(pdev->dev));
+#endif
+ pm_runtime_enable(&(pdev->dev));
+#endif
+ MALI_DEBUG_ASSERT(0 < num_pp_cores);
+ mali_core_scaling_init(num_pp_cores);
+ return 0;
+ }
}
- pm_runtime_suspend(device);
-
- return 0;
+ return err;
}
-#endif
-void mali_gpu_utilization_callback(struct mali_gpu_utilization_data *data)
+
+void mali_platform_device_deinit(struct platform_device *pdev)
{
- mali_core_scaling_update(data);
-}
+ MALI_DEBUG_PRINT(4, ("mali_platform_device_unregister() called\n"));
+ mali_platform_deinit(pdev);
+}
--- /dev/null
+Building the Mali Device Driver for Linux
+-----------------------------------------
+
+Build the Mali Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> USING_UMP=<ump_option> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ ump_option: 1 = Enable UMP support(*)
+ 0 = disable UMP support
+ build_option: debug = debug build of driver
+ release = release build of driver
+
+(*) For newer Linux Kernels, the Module.symvers file for the UMP device driver
+ must be available. The UMP_SYMVERS_FILE variable in the Makefile should
+ point to this file. This file is generated when the UMP driver is built.
+
+The result will be a mali.ko file, which can be loaded into the Linux kernel
+by using the insmod command.
+
+Use of UMP is not recommended. The dma-buf API in the Linux kernel has
+replaced UMP. The Mali Device Driver will be built with dma-buf support if the
+kernel config includes enabled dma-buf.
+
+The kernel needs to be provided with a platform_device struct for the Mali GPU
+device. See the mali_utgard.h header file for how to set up the Mali GPU
+resources.
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _MALI200_REGS_H_
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2010, 2012-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef _MALIGP2_CONROL_REGS_H_
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_timestamp.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_TIMESTAMP_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_timestamp.h"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2011, 2013-2014 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __MALI_TIMESTAMP_H__
MALI_STATIC_INLINE u64 _mali_timestamp_get(void)
{
- return _mali_osk_time_get_ns();
+ return _mali_osk_boot_time_get_ns();
}
#endif /* __MALI_TIMESTAMP_H__ */
--- /dev/null
+
+r5p0-01rel0-1-x@0
+ 对 arm_release_ver r5p0-01rel0 的定制集成.
+ r5p0-01rel0 对 gpu 的 dts 有大修改, 但这里出于兼容考虑, 仍旧使用 dts_for_mali_ko_befor_r5p0-01rel0.
+
#
-# This confidential and proprietary software may be used only as
-# authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2008-2012 ARM Limited
-# ALL RIGHTS RESERVED
-# The entire notice above must be reproduced on all authorised
-# copies and copies may only be made to the extent permitted
-# by a licensing agreement from ARM Limited.
+# Copyright (C) 2010-2012 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# Set default configuration to use, if Makefile didn't provide one.
# Change this to use a different config.h
-CONFIG ?= pb-virtex5-m400-4
+CONFIG ?= default
# Validate selected config
ifneq ($(shell [ -d $(src)/arch-$(CONFIG) ] && [ -f $(src)/arch-$(CONFIG)/config.h ] && echo "OK"), OK)
ccflags-y += -DSVN_REV=$(SVN_REV)
ccflags-y += -DSVN_REV_STRING=\"$(DRIVER_REV)\"
-ccflags-y += -I$(src) -I$(src)/common -I$(src)/linux -I$(src)/../mali/common -I$(src)/../mali/linux -I$(src)/../include/ump
+ccflags-y += -I$(src) -I$(src)/common -I$(src)/linux -I$(src)/../mali/common -I$(src)/../mali/linux -I$(src)/../../ump/include/ump
ccflags-y += -DMALI_STATE_TRACKING=0
ccflags-y += -DMALI_ENABLE_CPU_CYCLES=0
ccflags-$(CONFIG_UMP_DEBUG) += -DDEBUG
linux/ump_ukk_ref_wrappers.o \
linux/ump_osk_atomics.o \
linux/ump_osk_low_level_mem.o \
- linux/ump_osk_misc.o
-ifeq ($(CONFIG_UMP),m)
-ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o
-ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_locks.o
-ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_memory.o
-ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_math.o
-ump-y += $(UDD_FILE_PREFIX)linux/mali_osk_misc.o
-endif
+ linux/ump_osk_misc.o \
+ linux/ump_kernel_random_mapping.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_atomics.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_locks.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_memory.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_math.o \
+ $(UDD_FILE_PREFIX)linux/mali_osk_misc.o
+
obj-$(CONFIG_UMP) := ump.o
--- /dev/null
+#
+# Copyright (C) 2010-2012, 2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# For each arch check: CROSS_COMPILE , KDIR , CFLAGS += -DARCH
+
+export ARCH ?= arm
+BUILD ?= debug
+
+check_cc2 = \
+ $(shell if $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; \
+ then \
+ echo "$(2)"; \
+ else \
+ echo "$(3)"; \
+ fi ;)
+
+# Check that required parameters are supplied.
+ifeq ($(CONFIG),)
+CONFIG := default
+endif
+ifeq ($(CPU)$(KDIR),)
+$(error "KDIR or CPU must be specified.")
+endif
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+# when compiling for ARM we're cross compiling
+export CROSS_COMPILE ?= $(call check_cc2, arm-linux-gnueabi-gcc, arm-linux-gnueabi-, arm-none-linux-gnueabi-)
+endif
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+export CONFIG
+
+export CONFIG_UMP := m
+ifeq ($(BUILD),debug)
+export CONFIG_UMP_DEBUG := y
+else
+export CONFIG_UMP_DEBUG := n
+endif
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+all:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) modules
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) -C $(KDIR) M=$(CURDIR) clean
+ $(MAKE) -C $(KDIR) M=$(CURDIR)/../mali clean
#
-# This confidential and proprietary software may be used only as
-# authorised by a licensing agreement from ARM Limited
-# (C) COPYRIGHT 2008-2011, 2013 ARM Limited
-# ALL RIGHTS RESERVED
-# The entire notice above must be reproduced on all authorised
-# copies and copies may only be made to the extent permitted
-# by a licensing agreement from ARM Limited.
+# Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
SRC = $(UMP_FILE_PREFIX)common/ump_kernel_common.c \
-arch-pb-virtex5-m400-4
\ No newline at end of file
+arch-default
\ No newline at end of file
--- /dev/null
+/*
+ * Copyright (C) 2010, 2012, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+/* Use OS memory. */
+#define ARCH_UMP_BACKEND_DEFAULT 1
+
+/* OS memory won't need a base address. */
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0x00000000
+
+/* 512 MB maximum limit for UMP allocations. */
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 512UL * 1024UL * 1024UL
+
+
+#endif /* __ARCH_CONFIG_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __ARCH_CONFIG_H__
+#define __ARCH_CONFIG_H__
+
+#define ARCH_UMP_BACKEND_DEFAULT 0
+#define ARCH_UMP_MEMORY_ADDRESS_DEFAULT 0xE1000000
+#define ARCH_UMP_MEMORY_SIZE_DEFAULT 16UL * 1024UL * 1024UL
+
+#endif /* __ARCH_CONFIG_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_osk.h"
#include "ump_uk_types.h"
#include "ump_kernel_interface.h"
#include "ump_kernel_common.h"
+#include "ump_kernel_random_mapping.h"
UMP_KERNEL_API_EXPORT ump_secure_id ump_dd_secure_id_get(ump_dd_handle memh)
{
- ump_dd_mem * mem = (ump_dd_mem *)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
DEBUG_ASSERT_POINTER(mem);
UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_secure_id(ump_secure_id secure_id)
{
- ump_dd_mem * mem;
-
- _mali_osk_mutex_wait(device.secure_id_map_lock);
+ ump_dd_mem *mem;
DBG_MSG(5, ("Getting handle from secure ID. ID: %u\n", secure_id));
- if (0 != ump_descriptor_mapping_get(device.secure_id_map, (int)secure_id, (void**)&mem)) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
+ mem = ump_random_mapping_get(device.secure_id_map, (int)secure_id);
+ if (NULL == mem) {
DBG_MSG(1, ("Secure ID not found. ID: %u\n", secure_id));
return UMP_DD_HANDLE_INVALID;
}
- ump_dd_reference_add(mem);
-
- _mali_osk_mutex_signal(device.secure_id_map_lock);
+ /* Keep the reference taken in ump_random_mapping_get() */
return (ump_dd_handle)mem;
}
UMP_KERNEL_API_EXPORT unsigned long ump_dd_phys_block_count_get(ump_dd_handle memh)
{
- ump_dd_mem * mem = (ump_dd_mem*) memh;
+ ump_dd_mem *mem = (ump_dd_mem *) memh;
DEBUG_ASSERT_POINTER(mem);
-UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block * blocks, unsigned long num_blocks)
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_blocks_get(ump_dd_handle memh, ump_dd_physical_block *blocks, unsigned long num_blocks)
{
- ump_dd_mem * mem = (ump_dd_mem *)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
DEBUG_ASSERT_POINTER(mem);
-UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block * block)
+UMP_KERNEL_API_EXPORT ump_dd_status_code ump_dd_phys_block_get(ump_dd_handle memh, unsigned long index, ump_dd_physical_block *block)
{
- ump_dd_mem * mem = (ump_dd_mem *)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
DEBUG_ASSERT_POINTER(mem);
UMP_KERNEL_API_EXPORT unsigned long ump_dd_size_get(ump_dd_handle memh)
{
- ump_dd_mem * mem = (ump_dd_mem*)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
DEBUG_ASSERT_POINTER(mem);
UMP_KERNEL_API_EXPORT void ump_dd_reference_add(ump_dd_handle memh)
{
- ump_dd_mem * mem = (ump_dd_mem*)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
int new_ref;
DEBUG_ASSERT_POINTER(mem);
UMP_KERNEL_API_EXPORT void ump_dd_reference_release(ump_dd_handle memh)
{
- int new_ref;
- ump_dd_mem * mem = (ump_dd_mem*)memh;
+ ump_dd_mem *mem = (ump_dd_mem *)memh;
DEBUG_ASSERT_POINTER(mem);
- /* We must hold this mutex while doing the atomic_dec_and_read, to protect
- that elements in the ump_descriptor_mapping table is always valid. If they
- are not, userspace may accidently map in this secure_ids right before its freed
- giving a mapped backdoor into unallocated memory.*/
- _mali_osk_mutex_wait(device.secure_id_map_lock);
-
- new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
-
- DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n", mem->secure_id, new_ref));
-
- if (0 == new_ref) {
- DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
-
- ump_descriptor_mapping_free(device.secure_id_map, (int)mem->secure_id);
-
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- mem->release_func(mem->ctx, mem);
- _mali_osk_free(mem);
- } else {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- }
+ ump_random_mapping_put(mem);
}
/* --------------- Handling of user space requests follows --------------- */
-_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args )
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args)
{
- ump_session_data * session_data;
+ ump_session_data *session_data;
- DEBUG_ASSERT_POINTER( args );
- DEBUG_ASSERT_POINTER( args->ctx );
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
session_data = (ump_session_data *)args->ctx;
/* check compatability */
if (args->version == UMP_IOCTL_API_VERSION) {
- DBG_MSG(3, ("API version set to newest %d (compatible)\n", GET_VERSION(args->version)));
- args->compatible = 1;
- session_data->api_version = args->version;
- } else if (args->version == MAKE_VERSION_ID(1)) {
- DBG_MSG(2, ("API version set to depricated: %d (compatible)\n", GET_VERSION(args->version)));
+ DBG_MSG(3, ("API version set to newest %d (compatible)\n",
+ GET_VERSION(args->version)));
args->compatible = 1;
session_data->api_version = args->version;
} else {
- DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n", GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
+ DBG_MSG(2, ("API version set to %d (incompatible with client version %d)\n",
+ GET_VERSION(UMP_IOCTL_API_VERSION), GET_VERSION(args->version)));
args->compatible = 0;
args->version = UMP_IOCTL_API_VERSION; /* report our version */
}
}
-_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info )
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info)
{
- ump_session_memory_list_element * session_memory_element;
- ump_session_memory_list_element * tmp;
- ump_session_data * session_data;
+ ump_session_memory_list_element *session_memory_element;
+ ump_session_memory_list_element *tmp;
+ ump_session_data *session_data;
_mali_osk_errcode_t ret = _MALI_OSK_ERR_INVALID_FUNC;
int secure_id;
- DEBUG_ASSERT_POINTER( release_info );
- DEBUG_ASSERT_POINTER( release_info->ctx );
+ DEBUG_ASSERT_POINTER(release_info);
+ DEBUG_ASSERT_POINTER(release_info->ctx);
/* Retreive the session data */
- session_data = (ump_session_data*)release_info->ctx;
+ session_data = (ump_session_data *)release_info->ctx;
/* If there are many items in the memory session list we
* could be de-referencing this pointer a lot so keep a local copy
/* Iterate through the memory list looking for the requested secure ID */
_mali_osk_mutex_wait(session_data->lock);
_MALI_OSK_LIST_FOREACHENTRY(session_memory_element, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
- if ( session_memory_element->mem->secure_id == secure_id) {
+ if (session_memory_element->mem->secure_id == secure_id) {
ump_dd_mem *release_mem;
release_mem = session_memory_element->mem;
return ret;
}
-_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction )
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction)
{
- ump_dd_mem * mem;
+ ump_dd_mem *mem;
_mali_osk_errcode_t ret = _MALI_OSK_ERR_FAULT;
- DEBUG_ASSERT_POINTER( user_interaction );
+ DEBUG_ASSERT_POINTER(user_interaction);
/* We lock the mappings so things don't get removed while we are looking for the memory */
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- if (0 == ump_descriptor_mapping_get(device.secure_id_map, (int)user_interaction->secure_id, (void**)&mem)) {
+ mem = ump_random_mapping_get(device.secure_id_map, user_interaction->secure_id);
+ if (NULL != mem) {
user_interaction->size = mem->size_bytes;
- DBG_MSG(4, ("Returning size. ID: %u, size: %lu ", (ump_secure_id)user_interaction->secure_id, (unsigned long)user_interaction->size));
+ DBG_MSG(4, ("Returning size. ID: %u, size: %lu ",
+ (ump_secure_id)user_interaction->secure_id,
+ (unsigned long)user_interaction->size));
+ ump_random_mapping_put(mem);
ret = _MALI_OSK_ERR_OK;
} else {
user_interaction->size = 0;
- DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n", (ump_secure_id)user_interaction->secure_id));
+ DBG_MSG(1, ("Failed to look up mapping in ump_ioctl_size_get(). ID: %u\n",
+ (ump_secure_id)user_interaction->secure_id));
}
- _mali_osk_mutex_signal(device.secure_id_map_lock);
return ret;
}
-void _ump_ukk_msync( _ump_uk_msync_s *args )
+void _ump_ukk_msync(_ump_uk_msync_s *args)
{
- ump_dd_mem * mem = NULL;
+ ump_dd_mem *mem = NULL;
void *virtual = NULL;
u32 size = 0;
u32 offset = 0;
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
-
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n", (ump_secure_id)args->secure_id));
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_msync(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
return;
}
- /* Ensure the memory doesn't dissapear when we are flushing it. */
- ump_dd_reference_add(mem);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
/* Returns the cache settings back to Userspace */
- args->is_cached=mem->is_cached;
+ args->is_cached = mem->is_cached;
/* If this flag is the only one set, we should not do the actual flush, only the readout */
- if ( _UMP_UK_MSYNC_READOUT_CACHE_ENABLED==args->op ) {
+ if (_UMP_UK_MSYNC_READOUT_CACHE_ENABLED == args->op) {
DBG_MSG(3, ("_ump_ukk_msync READOUT ID: %u Enabled: %d\n", (ump_secure_id)args->secure_id, mem->is_cached));
goto msync_release_and_return;
}
/* Nothing to do if the memory is not caches */
- if ( 0==mem->is_cached ) {
+ if (0 == mem->is_cached) {
DBG_MSG(3, ("_ump_ukk_msync IGNORING ID: %u Enabled: %d OP: %d\n", (ump_secure_id)args->secure_id, mem->is_cached, args->op));
goto msync_release_and_return;
}
DBG_MSG(3, ("UMP[%02u] _ump_ukk_msync Flush OP: %d Address: 0x%08x Mapping: 0x%08x\n",
- (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
+ (ump_secure_id)args->secure_id, args->op, args->address, args->mapping));
- if ( args->address ) {
+ if (args->address) {
virtual = (void *)((u32)args->address);
offset = (u32)((args->address) - (args->mapping));
} else {
/* Flush entire mapping when no address is specified. */
virtual = args->mapping;
}
- if ( args->size ) {
+ if (args->size) {
size = args->size;
} else {
/* Flush entire mapping when no size is specified. */
size = mem->size_bytes - offset;
}
- if ( (offset + size) > mem->size_bytes ) {
+ if ((offset + size) > mem->size_bytes) {
DBG_MSG(1, ("Trying to flush more than the entire UMP allocation: offset: %u + size: %u > %u\n", offset, size, mem->size_bytes));
goto msync_release_and_return;
}
/* The actual cache flush - Implemented for each OS*/
- _ump_osk_msync( mem, virtual, offset, size, args->op, NULL);
+ _ump_osk_msync(mem, virtual, offset, size, args->op, NULL);
msync_release_and_return:
- ump_dd_reference_release(mem);
+ ump_random_mapping_put(mem);
return;
}
-void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args)
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args)
{
- ump_session_data * session_data;
+ ump_session_data *session_data;
ump_uk_cache_op_control op;
- DEBUG_ASSERT_POINTER( args );
- DEBUG_ASSERT_POINTER( args->ctx );
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
op = args->op;
session_data = (ump_session_data *)args->ctx;
_mali_osk_mutex_wait(session_data->lock);
- if ( op== _UMP_UK_CACHE_OP_START ) {
+ if (op == _UMP_UK_CACHE_OP_START) {
session_data->cache_operations_ongoing++;
- DBG_MSG(4, ("Cache ops start\n" ));
- if ( session_data->cache_operations_ongoing != 1 ) {
- DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing) );
+ DBG_MSG(4, ("Cache ops start\n"));
+ if (session_data->cache_operations_ongoing != 1) {
+ DBG_MSG(2, ("UMP: Number of simultanious cache control ops: %d\n", session_data->cache_operations_ongoing));
}
- } else if ( op== _UMP_UK_CACHE_OP_FINISH ) {
+ } else if (op == _UMP_UK_CACHE_OP_FINISH) {
DBG_MSG(4, ("Cache ops finish\n"));
session_data->cache_operations_ongoing--;
#if 0
- if ( session_data->has_pending_level1_cache_flush) {
+ if (session_data->has_pending_level1_cache_flush) {
/* This function will set has_pending_level1_cache_flush=0 */
- _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
}
#endif
/* to be on the safe side: always flush l1 cache when cache operations are done */
- _ump_osk_msync( NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
- DBG_MSG(4, ("Cache ops finish end\n" ));
+ _ump_osk_msync(NULL, NULL, 0, 0, _UMP_UK_MSYNC_FLUSH_L1, session_data);
+ DBG_MSG(4, ("Cache ops finish end\n"));
} else {
DBG_MSG(1, ("Illegal call to %s at line %d\n", __FUNCTION__, __LINE__));
}
}
-void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args )
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args)
{
- ump_dd_mem * mem = NULL;
+ ump_dd_mem *mem = NULL;
ump_uk_user old_user;
ump_uk_msync_op cache_op = _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE;
ump_session_data *session_data;
- DEBUG_ASSERT_POINTER( args );
- DEBUG_ASSERT_POINTER( args->ctx );
+ DEBUG_ASSERT_POINTER(args);
+ DEBUG_ASSERT_POINTER(args->ctx);
session_data = (ump_session_data *)args->ctx;
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
-
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n", (ump_secure_id)args->secure_id));
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_switch_hw_usage(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
return;
}
old_user = mem->hw_device;
mem->hw_device = args->new_user;
- DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n", (ump_secure_id)args->secure_id, args->new_user?"MALI":"CPU",old_user?"MALI":"CPU"));
+ DBG_MSG(3, ("UMP[%02u] Switch usage Start New: %s Prev: %s.\n",
+ (ump_secure_id)args->secure_id,
+ args->new_user ? "MALI" : "CPU",
+ old_user ? "MALI" : "CPU"));
- if ( ! mem->is_cached ) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
- return;
+ if (!mem->is_cached) {
+ DBG_MSG(3, ("UMP[%02u] Changing owner of uncached memory. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
}
- if ( old_user == args->new_user) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
- return;
+ if (old_user == args->new_user) {
+ DBG_MSG(4, ("UMP[%02u] Setting the new_user equal to previous for. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
}
if (
- /* Previous AND new is both different from CPU */
- (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU )
+ /* Previous AND new is both different from CPU */
+ (old_user != _UMP_UK_USED_BY_CPU) && (args->new_user != _UMP_UK_USED_BY_CPU)
) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n", (ump_secure_id)args->secure_id));
- return;
+ DBG_MSG(4, ("UMP[%02u] Previous and new user is not CPU. Cache flushing not needed.\n",
+ (ump_secure_id)args->secure_id));
+ goto out;
}
- if ( (old_user != _UMP_UK_USED_BY_CPU ) && (args->new_user==_UMP_UK_USED_BY_CPU) ) {
- cache_op =_UMP_UK_MSYNC_INVALIDATE;
+ if ((old_user != _UMP_UK_USED_BY_CPU) && (args->new_user == _UMP_UK_USED_BY_CPU)) {
+ cache_op = _UMP_UK_MSYNC_INVALIDATE;
DBG_MSG(4, ("UMP[%02u] Cache invalidation needed\n", (ump_secure_id)args->secure_id));
#ifdef UMP_SKIP_INVALIDATION
#error
- _mali_osk_mutex_signal(device.secure_id_map_lock);
DBG_MSG(4, ("UMP[%02u] Performing Cache invalidation SKIPPED\n", (ump_secure_id)args->secure_id));
- return;
+ goto out;
#endif
}
- /* Ensure the memory doesn't dissapear when we are flushing it. */
- ump_dd_reference_add(mem);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
/* Take lock to protect: session->cache_operations_ongoing and session->has_pending_level1_cache_flush */
_mali_osk_mutex_wait(session_data->lock);
/* Actual cache flush */
- _ump_osk_msync( mem, NULL, 0, mem->size_bytes, cache_op, session_data);
+ _ump_osk_msync(mem, NULL, 0, mem->size_bytes, cache_op, session_data);
_mali_osk_mutex_signal(session_data->lock);
- ump_dd_reference_release(mem);
+out:
+ ump_random_mapping_put(mem);
DBG_MSG(4, ("UMP[%02u] Switch usage Finish\n", (ump_secure_id)args->secure_id));
return;
}
-void _ump_ukk_lock(_ump_uk_lock_s *args )
+void _ump_ukk_lock(_ump_uk_lock_s *args)
{
- ump_dd_mem * mem = NULL;
-
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ ump_dd_mem *mem = NULL;
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n", (ump_secure_id)args->secure_id));
+ DBG_MSG(1, ("UMP[%02u] Failed to look up mapping in _ump_ukk_lock(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
return;
}
- ump_dd_reference_add(mem);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage ));
+ DBG_MSG(1, ("UMP[%02u] Lock. New lock flag: %d. Old Lock flag:\n", (u32)args->secure_id, (u32)args->lock_usage, (u32) mem->lock_usage));
mem->lock_usage = (ump_lock_usage) args->lock_usage;
- ump_dd_reference_release(mem);
+ ump_random_mapping_put(mem);
}
-void _ump_ukk_unlock(_ump_uk_unlock_s *args )
+void _ump_ukk_unlock(_ump_uk_unlock_s *args)
{
- ump_dd_mem * mem = NULL;
-
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- ump_descriptor_mapping_get(device.secure_id_map, (int)args->secure_id, (void**)&mem);
+ ump_dd_mem *mem = NULL;
+ mem = ump_random_mapping_get(device.secure_id_map, (int)args->secure_id);
if (NULL == mem) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n", (ump_secure_id)args->secure_id));
+ DBG_MSG(1, ("Failed to look up mapping in _ump_ukk_unlock(). ID: %u\n",
+ (ump_secure_id)args->secure_id));
return;
}
- ump_dd_reference_add(mem);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n", (u32)args->secure_id, (u32) mem->lock_usage ));
+ DBG_MSG(1, ("UMP[%02u] Unlocking. Old Lock flag:\n",
+ (u32)args->secure_id, (u32) mem->lock_usage));
mem->lock_usage = (ump_lock_usage) UMP_NOT_LOCKED;
- ump_dd_reference_release(mem);
+ ump_random_mapping_put(mem);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
/* Perform OS Specific initialization */
err = _ump_osk_init();
- if( _MALI_OSK_ERR_OK != err ) {
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("Failed to initiaze the UMP Device Driver"));
return err;
}
/* Init the global device */
- _mali_osk_memset(&device, 0, sizeof(device) );
+ _mali_osk_memset(&device, 0, sizeof(device));
/* Create the descriptor map, which will be used for mapping secure ID to ump_dd_mem structs */
- device.secure_id_map_lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
- if (NULL == device.secure_id_map_lock) {
- MSG_ERR(("Failed to create OSK lock for secure id lookup table\n"));
- return _MALI_OSK_ERR_NOMEM;
- }
-
- device.secure_id_map = ump_descriptor_mapping_create(UMP_SECURE_ID_TABLE_ENTRIES_INITIAL, UMP_SECURE_ID_TABLE_ENTRIES_MAXIMUM);
+ device.secure_id_map = ump_random_mapping_create();
if (NULL == device.secure_id_map) {
- _mali_osk_mutex_term(device.secure_id_map_lock);
MSG_ERR(("Failed to create secure id lookup table\n"));
return _MALI_OSK_ERR_NOMEM;
}
device.backend = ump_memory_backend_create();
if (NULL == device.backend) {
MSG_ERR(("Failed to create memory backend\n"));
- _mali_osk_mutex_term(device.secure_id_map_lock);
- ump_descriptor_mapping_destroy(device.secure_id_map);
+ ump_random_mapping_destroy(device.secure_id_map);
return _MALI_OSK_ERR_NOMEM;
}
void ump_kernel_destructor(void)
{
DEBUG_ASSERT_POINTER(device.secure_id_map);
- DEBUG_ASSERT_POINTER(device.secure_id_map_lock);
-
- _mali_osk_mutex_term(device.secure_id_map_lock);
- device.secure_id_map_lock = NULL;
- ump_descriptor_mapping_destroy(device.secure_id_map);
+ ump_random_mapping_destroy(device.secure_id_map);
device.secure_id_map = NULL;
device.backend->shutdown(device.backend);
/** Creates a new UMP session
*/
-_mali_osk_errcode_t _ump_ukk_open( void** context )
+_mali_osk_errcode_t _ump_ukk_open(void **context)
{
- struct ump_session_data * session_data;
+ struct ump_session_data *session_data;
/* allocated struct to track this session */
session_data = (struct ump_session_data *)_mali_osk_malloc(sizeof(struct ump_session_data));
}
session_data->lock = _mali_osk_mutex_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
- if( NULL == session_data->lock ) {
+ if (NULL == session_data->lock) {
MSG_ERR(("Failed to initialize lock for ump_session_data in ump_file_open()\n"));
_mali_osk_free(session_data);
return _MALI_OSK_ERR_NOMEM;
}
- session_data->cookies_map = ump_descriptor_mapping_create( UMP_COOKIES_PER_SESSION_INITIAL, UMP_COOKIES_PER_SESSION_MAXIMUM );
+ session_data->cookies_map = ump_descriptor_mapping_create(
+ UMP_COOKIES_PER_SESSION_INITIAL,
+ UMP_COOKIES_PER_SESSION_MAXIMUM);
- if ( NULL == session_data->cookies_map ) {
+ if (NULL == session_data->cookies_map) {
MSG_ERR(("Failed to create descriptor mapping for _ump_ukk_map_mem cookies\n"));
_mali_osk_mutex_term(session_data->lock);
- _mali_osk_free( session_data );
+ _mali_osk_free(session_data);
return _MALI_OSK_ERR_NOMEM;
}
to the correct one.*/
session_data->api_version = MAKE_VERSION_ID(1);
- *context = (void*)session_data;
+ *context = (void *)session_data;
session_data->cache_operations_ongoing = 0 ;
session_data->has_pending_level1_cache_flush = 0;
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t _ump_ukk_close( void** context )
+_mali_osk_errcode_t _ump_ukk_close(void **context)
{
- struct ump_session_data * session_data;
- ump_session_memory_list_element * item;
- ump_session_memory_list_element * tmp;
+ struct ump_session_data *session_data;
+ ump_session_memory_list_element *item;
+ ump_session_memory_list_element *tmp;
session_data = (struct ump_session_data *)*context;
if (NULL == session_data) {
_MALI_OSK_LIST_FOREACHENTRY(descriptor, temp, &session_data->list_head_session_memory_mappings_list, ump_memory_allocation, list) {
_ump_uk_unmap_mem_s unmap_args;
DBG_MSG(4, ("Freeing block with phys address 0x%x size 0x%x mapped in user space at 0x%x\n",
- descriptor->phys_addr, descriptor->size, descriptor->mapping));
- unmap_args.ctx = (void*)session_data;
+ descriptor->phys_addr, descriptor->size, descriptor->mapping));
+ unmap_args.ctx = (void *)session_data;
unmap_args.mapping = descriptor->mapping;
unmap_args.size = descriptor->size;
unmap_args._ukk_private = NULL; /* NOTE: unused */
unmap_args.cookie = descriptor->cookie;
/* NOTE: This modifies the list_head_session_memory_mappings_list */
- _ump_ukk_unmap_mem( &unmap_args );
+ _ump_ukk_unmap_mem(&unmap_args);
}
}
/* ASSERT that we really did free everything, because _ump_ukk_unmap_mem()
* can fail silently. */
- DEBUG_ASSERT( _mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list) );
+ DEBUG_ASSERT(_mali_osk_list_empty(&session_data->list_head_session_memory_mappings_list));
_MALI_OSK_LIST_FOREACHENTRY(item, tmp, &session_data->list_head_session_memory_list, ump_session_memory_list_element, list) {
_mali_osk_list_del(&item->list);
_mali_osk_free(item);
}
- ump_descriptor_mapping_destroy( session_data->cookies_map );
+ ump_descriptor_mapping_destroy(session_data->cookies_map);
_mali_osk_mutex_term(session_data->lock);
_mali_osk_free(session_data);
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args )
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args)
{
- struct ump_session_data * session_data;
- ump_memory_allocation * descriptor; /* Describes current mapping of memory */
+ struct ump_session_data *session_data;
+ ump_memory_allocation *descriptor; /* Describes current mapping of memory */
_mali_osk_errcode_t err;
unsigned long offset = 0;
unsigned long left;
ump_dd_handle handle; /* The real UMP handle for this memory. Its real datatype is ump_dd_mem* */
- ump_dd_mem * mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
+ ump_dd_mem *mem; /* The real UMP memory. It is equal to the handle, but with exposed struct */
u32 block;
int map_id;
session_data = (ump_session_data *)args->ctx;
- if( NULL == session_data ) {
+ if (NULL == session_data) {
MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
return _MALI_OSK_ERR_INVALID_ARGS;
}
- descriptor = (ump_memory_allocation*) _mali_osk_calloc( 1, sizeof(ump_memory_allocation));
+ descriptor = (ump_memory_allocation *) _mali_osk_calloc(1, sizeof(ump_memory_allocation));
if (NULL == descriptor) {
MSG_ERR(("ump_ukk_map_mem: descriptor allocation failed\n"));
return _MALI_OSK_ERR_NOMEM;
}
handle = ump_dd_handle_create_from_secure_id(args->secure_id);
- if ( UMP_DD_HANDLE_INVALID == handle) {
+ if (UMP_DD_HANDLE_INVALID == handle) {
_mali_osk_free(descriptor);
DBG_MSG(1, ("Trying to map unknown secure ID %u\n", args->secure_id));
return _MALI_OSK_ERR_FAULT;
}
- mem = (ump_dd_mem*)handle;
+ mem = (ump_dd_mem *)handle;
DEBUG_ASSERT(mem);
if (mem->size_bytes != args->size) {
_mali_osk_free(descriptor);
return _MALI_OSK_ERR_FAULT;
}
- map_id = ump_descriptor_mapping_allocate_mapping( session_data->cookies_map, (void*) descriptor );
+ map_id = ump_descriptor_mapping_allocate_mapping(session_data->cookies_map, (void *) descriptor);
if (map_id < 0) {
_mali_osk_free(descriptor);
descriptor->ump_session = session_data;
descriptor->cookie = (u32)map_id;
- if ( mem->is_cached ) {
+ if (mem->is_cached) {
descriptor->is_cached = 1;
args->is_cached = 1;
DBG_MSG(3, ("Mapping UMP secure_id: %d as cached.\n", args->secure_id));
DBG_MSG(3, ("Mapping UMP secure_id: %d as Uncached.\n", args->secure_id));
}
- _mali_osk_list_init( &descriptor->list );
+ _mali_osk_list_init(&descriptor->list);
- err = _ump_osk_mem_mapregion_init( descriptor );
- if( _MALI_OSK_ERR_OK != err ) {
+ err = _ump_osk_mem_mapregion_init(descriptor);
+ if (_MALI_OSK_ERR_OK != err) {
DBG_MSG(1, ("Failed to initialize memory mapping in _ump_ukk_map_mem(). ID: %u\n", args->secure_id));
- ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_descriptor_mapping_free(session_data->cookies_map, map_id);
_mali_osk_free(descriptor);
ump_dd_reference_release(mem);
return err;
}
DBG_MSG(4, ("Mapping virtual to physical memory: ID: %u, size:%lu, first physical addr: 0x%08lx, number of regions: %lu\n",
- mem->secure_id,
- mem->size_bytes,
- ((NULL != mem->block_array) ? mem->block_array->addr : 0),
- mem->nr_blocks));
+ mem->secure_id,
+ mem->size_bytes,
+ ((NULL != mem->block_array) ? mem->block_array->addr : 0),
+ mem->nr_blocks));
left = descriptor->size;
/* loop over all blocks and map them in */
size_to_map = left;
}
- if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *)&(mem->block_array[block].addr), size_to_map ) ) {
+ if (_MALI_OSK_ERR_OK != _ump_osk_mem_mapregion_map(descriptor, offset, (u32 *) & (mem->block_array[block].addr), size_to_map)) {
DBG_MSG(1, ("WARNING: _ump_ukk_map_mem failed to map memory into userspace\n"));
- ump_descriptor_mapping_free( session_data->cookies_map, map_id );
+ ump_descriptor_mapping_free(session_data->cookies_map, map_id);
ump_dd_reference_release(mem);
- _ump_osk_mem_mapregion_term( descriptor );
+ _ump_osk_mem_mapregion_term(descriptor);
_mali_osk_free(descriptor);
return _MALI_OSK_ERR_FAULT;
}
/* Add to the ump_memory_allocation tracking list */
_mali_osk_mutex_wait(session_data->lock);
- _mali_osk_list_add( &descriptor->list, &session_data->list_head_session_memory_mappings_list );
+ _mali_osk_list_add(&descriptor->list, &session_data->list_head_session_memory_mappings_list);
_mali_osk_mutex_signal(session_data->lock);
args->mapping = descriptor->mapping;
return _MALI_OSK_ERR_OK;
}
-void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args )
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args)
{
- struct ump_session_data * session_data;
- ump_memory_allocation * descriptor;
+ struct ump_session_data *session_data;
+ ump_memory_allocation *descriptor;
ump_dd_handle handle;
session_data = (ump_session_data *)args->ctx;
- if( NULL == session_data ) {
+ if (NULL == session_data) {
MSG_ERR(("Session data is NULL in _ump_ukk_map_mem()\n"));
return;
}
- if (0 != ump_descriptor_mapping_get( session_data->cookies_map, (int)args->cookie, (void**)&descriptor) ) {
- MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie ));
+ if (0 != ump_descriptor_mapping_get(session_data->cookies_map, (int)args->cookie, (void **)&descriptor)) {
+ MSG_ERR(("_ump_ukk_map_mem: cookie 0x%X not found for this session\n", args->cookie));
return;
}
DEBUG_ASSERT_POINTER(descriptor);
handle = descriptor->handle;
- if ( UMP_DD_HANDLE_INVALID == handle) {
+ if (UMP_DD_HANDLE_INVALID == handle) {
DBG_MSG(1, ("WARNING: Trying to unmap unknown handle: UNKNOWN\n"));
return;
}
/* Remove the ump_memory_allocation from the list of tracked mappings */
_mali_osk_mutex_wait(session_data->lock);
- _mali_osk_list_del( &descriptor->list );
+ _mali_osk_list_del(&descriptor->list);
_mali_osk_mutex_signal(session_data->lock);
- ump_descriptor_mapping_free( session_data->cookies_map, (int)args->cookie );
+ ump_descriptor_mapping_free(session_data->cookies_map, (int)args->cookie);
ump_dd_reference_release(handle);
- _ump_osk_mem_mapregion_term( descriptor );
+ _ump_osk_mem_mapregion_term(descriptor);
_mali_osk_free(descriptor);
}
-u32 _ump_ukk_report_memory_usage( void )
+u32 _ump_ukk_report_memory_usage(void)
{
- if(device.backend->stat)
+ if (device.backend->stat)
return device.backend->stat(device.backend);
else
return 0;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __UMP_KERNEL_COMMON_H__
#include "ump_kernel_types.h"
#include "ump_kernel_interface.h"
#include "ump_kernel_descriptor_mapping.h"
+#include "ump_kernel_random_mapping.h"
#include "ump_kernel_memory_backend.h"
((level) <= ump_debug_level)?\
UMP_DEBUG_PRINT(("UMP<" #level ">: ")), \
UMP_DEBUG_PRINT(args):0; \
- } while (0)
+ } while (0)
#define DBG_MSG_IF(level,condition,args) /* args should be in brackets */ \
- if((condition)&&((level) <= ump_debug_level)) {\
+ if((condition)&&((level) <= ump_debug_level)) {\
UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
UMP_DEBUG_PRINT(args); \
- }
+ }
#define DBG_MSG_ELSE(level,args) /* args should be in brackets */ \
- else if((level) <= ump_debug_level) { \
+ else if((level) <= ump_debug_level) { \
UMP_DEBUG_PRINT(("UMP<" #level ">: ")); \
UMP_DEBUG_PRINT(args); \
- }
+ }
#define DEBUG_ASSERT_POINTER(pointer) do {if( (pointer)== NULL) MSG_ERR(("NULL pointer " #pointer)); } while(0)
#define DEBUG_ASSERT(condition) do {if(!(condition)) MSG_ERR(("ASSERT failed: " #condition)); } while(0)
#endif /* DEBUG */
#define MSG_ERR(args) do{ /* args should be in brackets */ \
- _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
- _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
- _mali_osk_dbgmsg args ; \
- _mali_osk_dbgmsg("\n"); \
+ _mali_osk_dbgmsg("UMP: ERR: %s\n" ,__FILE__); \
+ _mali_osk_dbgmsg( " %s()%4d\n", __FUNCTION__, __LINE__) ; \
+ _mali_osk_dbgmsg args ; \
+ _mali_osk_dbgmsg("\n"); \
} while(0)
#define MSG(args) do{ /* args should be in brackets */ \
- _mali_osk_dbgmsg("UMP: "); \
- _mali_osk_dbgmsg args; \
- } while (0)
+ _mali_osk_dbgmsg("UMP: "); \
+ _mali_osk_dbgmsg args; \
+ } while (0)
_mali_osk_list_t list_head_session_memory_mappings_list; /**< List of ump_memory_allocations mapped in */
int api_version;
_mali_osk_mutex_t *lock;
- ump_descriptor_mapping * cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
+ ump_descriptor_mapping *cookies_map; /**< Secure mapping of cookies from _ump_ukk_map_mem() */
int cache_operations_ongoing;
int has_pending_level1_cache_flush;
} ump_session_data;
* which don't do it themself (e.g. due to a crash or premature termination).
*/
typedef struct ump_session_memory_list_element {
- struct ump_dd_mem * mem;
+ struct ump_dd_mem *mem;
_mali_osk_list_t list;
} ump_session_memory_list_element;
* Device specific data, created when device driver is loaded, and then kept as the global variable device.
*/
typedef struct ump_dev {
- _mali_osk_mutex_t *secure_id_map_lock;
- ump_descriptor_mapping * secure_id_map;
- ump_memory_backend * backend;
+ ump_random_mapping *secure_id_map;
+ ump_memory_backend *backend;
} ump_dev;
_mali_osk_errcode_t ump_kernel_constructor(void);
void ump_kernel_destructor(void);
-int map_errcode( _mali_osk_errcode_t err );
+int map_errcode(_mali_osk_errcode_t err);
/**
* variables from user space cannot be dereferenced from kernel space; tagging them
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
* @param count Number of mappings in the table
* @return Pointer to a new table, NULL on error
*/
-static ump_descriptor_table * descriptor_table_alloc(int count);
+static ump_descriptor_table *descriptor_table_alloc(int count);
/**
* Free a descriptor table
* @param table The table to free
*/
-static void descriptor_table_free(ump_descriptor_table * table);
+static void descriptor_table_free(ump_descriptor_table *table);
-ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries)
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries)
{
- ump_descriptor_mapping * map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping) );
+ ump_descriptor_mapping *map = _mali_osk_calloc(1, sizeof(ump_descriptor_mapping));
init_entries = MALI_PAD_INT(init_entries);
max_entries = MALI_PAD_INT(max_entries);
map->table = descriptor_table_alloc(init_entries);
if (NULL != map->table) {
map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_UNORDERED, 0);
- if ( NULL != map->lock ) {
+ if (NULL != map->lock) {
_mali_osk_set_nonatomic_bit(0, map->table->usage); /* reserve bit 0 to prevent NULL/zero logic to kick in */
map->max_nr_mappings_allowed = max_entries;
map->current_nr_mappings = init_entries;
return NULL;
}
-void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map)
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map)
{
descriptor_table_free(map->table);
_mali_osk_mutex_rw_term(map->lock);
_mali_osk_free(map);
}
-int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target)
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target)
{
int descriptor = -1;/*-EFAULT;*/
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
if (descriptor == map->current_nr_mappings) {
int nr_mappings_new;
/* no free descriptor, try to expand the table */
- ump_descriptor_table * new_table;
- ump_descriptor_table * old_table = map->table;
- nr_mappings_new= map->current_nr_mappings *2;
+ ump_descriptor_table *new_table;
+ ump_descriptor_table *old_table = map->table;
+ nr_mappings_new = map->current_nr_mappings * 2;
if (map->current_nr_mappings >= map->max_nr_mappings_allowed) {
descriptor = -1;
}
_mali_osk_memcpy(new_table->usage, old_table->usage, (sizeof(unsigned long)*map->current_nr_mappings) / BITS_PER_LONG);
- _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void*));
+ _mali_osk_memcpy(new_table->mappings, old_table->mappings, map->current_nr_mappings * sizeof(void *));
map->table = new_table;
map->current_nr_mappings = nr_mappings_new;
descriptor_table_free(old_table);
return descriptor;
}
-int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target)
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target)
{
int result = -1;/*-EFAULT;*/
DEBUG_ASSERT(map);
return result;
}
-int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target)
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target)
{
int result = -1;/*-EFAULT;*/
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
return result;
}
-void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor)
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor)
{
_mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
if ((descriptor > 0) && (descriptor < map->current_nr_mappings) && _mali_osk_test_bit(descriptor, map->table->usage)) {
_mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
}
-static ump_descriptor_table * descriptor_table_alloc(int count)
+static ump_descriptor_table *descriptor_table_alloc(int count)
{
- ump_descriptor_table * table;
+ ump_descriptor_table *table;
- table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG) + (sizeof(void*) * count) );
+ table = _mali_osk_calloc(1, sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG) + (sizeof(void *) * count));
if (NULL != table) {
- table->usage = (u32*)((u8*)table + sizeof(ump_descriptor_table));
- table->mappings = (void**)((u8*)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count)/BITS_PER_LONG));
+ table->usage = (u32 *)((u8 *)table + sizeof(ump_descriptor_table));
+ table->mappings = (void **)((u8 *)table + sizeof(ump_descriptor_table) + ((sizeof(unsigned long) * count) / BITS_PER_LONG));
}
return table;
}
-static void descriptor_table_free(ump_descriptor_table * table)
+static void descriptor_table_free(ump_descriptor_table *table)
{
_mali_osk_free(table);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
* The actual descriptor mapping table, never directly accessed by clients
*/
typedef struct ump_descriptor_table {
- u32 * usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
- void** mappings; /**< Array of the pointers the descriptors map to */
+ u32 *usage; /**< Pointer to bitpattern indicating if a descriptor is valid/used or not */
+ void **mappings; /**< Array of the pointers the descriptors map to */
} ump_descriptor_table;
/**
_mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
int max_nr_mappings_allowed; /**< Max number of mappings to support in this namespace */
int current_nr_mappings; /**< Current number of possible mappings */
- ump_descriptor_table * table; /**< Pointer to the current mapping table */
+ ump_descriptor_table *table; /**< Pointer to the current mapping table */
} ump_descriptor_mapping;
/**
* @param max_entries Number of entries to max support
* @return Pointer to a descriptor mapping object, NULL on failure
*/
-ump_descriptor_mapping * ump_descriptor_mapping_create(int init_entries, int max_entries);
+ump_descriptor_mapping *ump_descriptor_mapping_create(int init_entries, int max_entries);
/**
* Destroy a descriptor mapping object
* @param map The map to free
*/
-void ump_descriptor_mapping_destroy(ump_descriptor_mapping * map);
+void ump_descriptor_mapping_destroy(ump_descriptor_mapping *map);
/**
* Allocate a new mapping entry (descriptor ID)
* @param target The value to map to
* @return The descriptor allocated, a negative value on error
*/
-int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping * map, void * target);
+int ump_descriptor_mapping_allocate_mapping(ump_descriptor_mapping *map, void *target);
/**
* Get the value mapped to by a descriptor ID
* @param target Pointer to a pointer which will receive the stored value
* @return 0 on successful lookup, negative on error
*/
-int ump_descriptor_mapping_get(ump_descriptor_mapping * map, int descriptor, void** target);
+int ump_descriptor_mapping_get(ump_descriptor_mapping *map, int descriptor, void **target);
/**
* Set the value mapped to by a descriptor ID
* @param target Pointer to replace the current value with
* @return 0 on successful lookup, negative on error
*/
-int ump_descriptor_mapping_set(ump_descriptor_mapping * map, int descriptor, void * target);
+int ump_descriptor_mapping_set(ump_descriptor_mapping *map, int descriptor, void *target);
/**
* Free the descriptor ID
* @param map The map to free the descriptor from
* @param descriptor The descriptor ID to free
*/
-void ump_descriptor_mapping_free(ump_descriptor_mapping * map, int descriptor);
+void ump_descriptor_mapping_free(ump_descriptor_mapping *map, int descriptor);
#endif /* __UMP_KERNEL_DESCRIPTOR_MAPPING_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
typedef struct ump_memory_allocation {
- void * phys_addr;
- void * mapping;
+ void *phys_addr;
+ void *mapping;
unsigned long size;
ump_dd_handle handle;
- void * process_mapping_info;
+ void *process_mapping_info;
u32 cookie; /**< necessary on some U/K interface implementations */
- struct ump_session_data * ump_session; /**< Session that this allocation belongs to */
+ struct ump_session_data *ump_session; /**< Session that this allocation belongs to */
_mali_osk_list_t list; /**< List for linking together memory allocations into the session's memory head */
u32 is_cached;
} ump_memory_allocation;
typedef struct ump_memory_backend {
- int (*allocate)(void* ctx, ump_dd_mem * descriptor);
- void (*release)(void* ctx, ump_dd_mem * descriptor);
- void (*shutdown)(struct ump_memory_backend * backend);
- u32 (*stat)(struct ump_memory_backend *backend);
- int (*pre_allocate_physical_check)(void *ctx, u32 size);
- u32 (*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
- void * ctx;
+ int (*allocate)(void *ctx, ump_dd_mem *descriptor);
+ void (*release)(void *ctx, ump_dd_mem *descriptor);
+ void (*shutdown)(struct ump_memory_backend *backend);
+ u32(*stat)(struct ump_memory_backend *backend);
+ int (*pre_allocate_physical_check)(void *ctx, u32 size);
+ u32(*adjust_to_mali_phys)(void *ctx, u32 cpu_phys);
+ void *ctx;
} ump_memory_backend;
-ump_memory_backend * ump_memory_backend_create ( void );
-void ump_memory_backend_destroy( void );
+ump_memory_backend *ump_memory_backend_create(void);
+void ump_memory_backend_destroy(void);
#endif /*__UMP_KERNEL_MEMORY_BACKEND_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_osk.h"
#define UMP_MINIMUM_SIZE_MASK (~(UMP_MINIMUM_SIZE-1))
#define UMP_SIZE_ALIGN(x) (((x)+UMP_MINIMUM_SIZE-1)&UMP_MINIMUM_SIZE_MASK)
#define UMP_ADDR_ALIGN_OFFSET(x) ((x)&(UMP_MINIMUM_SIZE-1))
-static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor);
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor);
-UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block * blocks, unsigned long num_blocks)
+UMP_KERNEL_API_EXPORT ump_dd_handle ump_dd_handle_create_from_phys_blocks(ump_dd_physical_block *blocks, unsigned long num_blocks)
{
- ump_dd_mem * mem;
+ ump_dd_mem *mem;
unsigned long size_total = 0;
- int map_id;
+ int ret;
u32 i;
/* Go through the input blocks and verify that they are sane */
- for (i=0; i < num_blocks; i++) {
+ for (i = 0; i < num_blocks; i++) {
unsigned long addr = blocks[i].addr;
unsigned long size = blocks[i].size;
return UMP_DD_HANDLE_INVALID;
}
- /* Find a secure ID for this allocation */
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*) mem);
-
- if (map_id < 0) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- _mali_osk_free(mem);
- DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
- return UMP_DD_HANDLE_INVALID;
- }
-
/* Now, make a copy of the block information supplied by the user */
- mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block)* num_blocks);
+ mem->block_array = _mali_osk_malloc(sizeof(ump_dd_physical_block) * num_blocks);
if (NULL == mem->block_array) {
- ump_descriptor_mapping_free(device.secure_id_map, map_id);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
_mali_osk_free(mem);
DBG_MSG(1, ("Could not allocate a mem handle for function ump_dd_handle_create_from_phys_blocks().\n"));
return UMP_DD_HANDLE_INVALID;
/* And setup the rest of the ump_dd_mem struct */
_mali_osk_atomic_init(&mem->ref_count, 1);
- mem->secure_id = (ump_secure_id)map_id;
mem->size_bytes = size_total;
mem->nr_blocks = num_blocks;
mem->backend_info = NULL;
mem->hw_device = _UMP_UK_USED_BY_CPU;
mem->lock_usage = UMP_NOT_LOCKED;
- _mali_osk_mutex_signal(device.secure_id_map_lock);
+ /* Find a secure ID for this allocation */
+ ret = ump_random_mapping_insert(device.secure_id_map, mem);
+ if (unlikely(ret)) {
+ _mali_osk_free(mem->block_array);
+ _mali_osk_free(mem);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_dd_handle_create_from_phys_blocks()\n"));
+ return UMP_DD_HANDLE_INVALID;
+ }
+
DBG_MSG(3, ("UMP memory created. ID: %u, size: %lu\n", mem->secure_id, mem->size_bytes));
return (ump_dd_handle)mem;
}
-static void phys_blocks_release(void * ctx, struct ump_dd_mem * descriptor)
+static void phys_blocks_release(void *ctx, struct ump_dd_mem *descriptor)
{
_mali_osk_free(descriptor->block_array);
descriptor->block_array = NULL;
}
-_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction )
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction)
{
- ump_session_data * session_data = NULL;
+ ump_session_data *session_data = NULL;
ump_dd_mem *new_allocation = NULL;
- ump_session_memory_list_element * session_memory_element = NULL;
- int map_id;
+ ump_session_memory_list_element *session_memory_element = NULL;
+ int ret;
- DEBUG_ASSERT_POINTER( user_interaction );
- DEBUG_ASSERT_POINTER( user_interaction->ctx );
+ DEBUG_ASSERT_POINTER(user_interaction);
+ DEBUG_ASSERT_POINTER(user_interaction->ctx);
session_data = (ump_session_data *) user_interaction->ctx;
- session_memory_element = _mali_osk_calloc( 1, sizeof(ump_session_memory_list_element));
+ session_memory_element = _mali_osk_calloc(1, sizeof(ump_session_memory_list_element));
if (NULL == session_memory_element) {
DBG_MSG(1, ("Failed to allocate ump_session_memory_list_element in ump_ioctl_allocate()\n"));
return _MALI_OSK_ERR_NOMEM;
}
- new_allocation = _mali_osk_calloc( 1, sizeof(ump_dd_mem));
- if (NULL==new_allocation) {
+ new_allocation = _mali_osk_calloc(1, sizeof(ump_dd_mem));
+ if (NULL == new_allocation) {
_mali_osk_free(session_memory_element);
DBG_MSG(1, ("Failed to allocate ump_dd_mem in _ump_ukk_allocate()\n"));
return _MALI_OSK_ERR_NOMEM;
}
- /* Create a secure ID for this allocation */
- _mali_osk_mutex_wait(device.secure_id_map_lock);
- map_id = ump_descriptor_mapping_allocate_mapping(device.secure_id_map, (void*)new_allocation);
-
- if (map_id < 0) {
- _mali_osk_mutex_signal(device.secure_id_map_lock);
- _mali_osk_free(session_memory_element);
- _mali_osk_free(new_allocation);
- DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
- return - _MALI_OSK_ERR_INVALID_FUNC;
- }
-
/* Initialize the part of the new_allocation that we know so for */
- new_allocation->secure_id = (ump_secure_id)map_id;
- _mali_osk_atomic_init(&new_allocation->ref_count,1);
- if ( 0==(UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints) )
+ _mali_osk_atomic_init(&new_allocation->ref_count, 1);
+ if (0 == (UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE & user_interaction->constraints))
new_allocation->is_cached = 0;
else new_allocation->is_cached = 1;
- /* special case a size of 0, we should try to emulate what malloc does in this case, which is to return a valid pointer that must be freed, but can't be dereferences */
+ /* Special case a size of 0, we should try to emulate what malloc does
+ * in this case, which is to return a valid pointer that must be freed,
+ * but can't be dereferenced */
if (0 == user_interaction->size) {
- user_interaction->size = 1; /* emulate by actually allocating the minimum block size */
+ /* Emulate by actually allocating the minimum block size */
+ user_interaction->size = 1;
}
- new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size); /* Page align the size */
+ /* Page align the size */
+ new_allocation->size_bytes = UMP_SIZE_ALIGN(user_interaction->size);
new_allocation->lock_usage = UMP_NOT_LOCKED;
/* Now, ask the active memory backend to do the actual memory allocation */
- if (!device.backend->allocate( device.backend->ctx, new_allocation ) ) {
- DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n", new_allocation->size_bytes, (unsigned long)user_interaction->size));
- ump_descriptor_mapping_free(device.secure_id_map, map_id);
- _mali_osk_mutex_signal(device.secure_id_map_lock);
+ if (!device.backend->allocate(device.backend->ctx, new_allocation)) {
+ DBG_MSG(3, ("OOM: No more UMP memory left. Failed to allocate memory in ump_ioctl_allocate(). Size: %lu, requested size: %lu\n",
+ new_allocation->size_bytes,
+ (unsigned long)user_interaction->size));
_mali_osk_free(new_allocation);
_mali_osk_free(session_memory_element);
return _MALI_OSK_ERR_INVALID_FUNC;
new_allocation->ctx = device.backend->ctx;
new_allocation->release_func = device.backend->release;
- _mali_osk_mutex_signal(device.secure_id_map_lock);
-
/* Initialize the session_memory_element, and add it to the session object */
session_memory_element->mem = new_allocation;
_mali_osk_mutex_wait(session_data->lock);
_mali_osk_list_add(&(session_memory_element->list), &(session_data->list_head_session_memory_list));
_mali_osk_mutex_signal(session_data->lock);
+ /* Create a secure ID for this allocation */
+ ret = ump_random_mapping_insert(device.secure_id_map, new_allocation);
+ if (unlikely(ret)) {
+ new_allocation->release_func(new_allocation->ctx, new_allocation);
+ _mali_osk_free(session_memory_element);
+ _mali_osk_free(new_allocation);
+ DBG_MSG(1, ("Failed to allocate secure ID in ump_ioctl_allocate()\n"));
+ return _MALI_OSK_ERR_INVALID_FUNC;
+ }
+
user_interaction->secure_id = new_allocation->secure_id;
user_interaction->size = new_allocation->size_bytes;
- DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n", new_allocation->secure_id, new_allocation->size_bytes));
+ DBG_MSG(3, ("UMP memory allocated. ID: %u, size: %lu\n",
+ new_allocation->secure_id,
+ new_allocation->size_bytes));
return _MALI_OSK_ERR_OK;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __UMP_KERNEL_TYPES_H__
#include "ump_kernel_interface.h"
#include "mali_osk.h"
+#include <linux/rbtree.h>
typedef enum {
UMP_USED_BY_CPU = 0,
UMP_USED_BY_MALI = 1,
- UMP_USED_BY_UNKNOWN_DEVICE= 100,
+ UMP_USED_BY_UNKNOWN_DEVICE = 100,
} ump_hw_usage;
typedef enum {
UMP_READ_WRITE = 3,
} ump_lock_usage;
-
/*
* This struct is what is "behind" a ump_dd_handle
*/
typedef struct ump_dd_mem {
+ struct rb_node node;
ump_secure_id secure_id;
_mali_osk_atomic_t ref_count;
unsigned long size_bytes;
unsigned long nr_blocks;
- ump_dd_physical_block * block_array;
- void (*release_func)(void * ctx, struct ump_dd_mem * descriptor);
- void * ctx;
- void * backend_info;
+ ump_dd_physical_block *block_array;
+ void (*release_func)(void *ctx, struct ump_dd_mem *descriptor);
+ void *ctx;
+ void *backend_info;
int is_cached;
ump_hw_usage hw_device;
ump_lock_usage lock_usage;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
extern "C" {
#endif
-_mali_osk_errcode_t _ump_osk_init( void );
+_mali_osk_errcode_t _ump_osk_init(void);
-_mali_osk_errcode_t _ump_osk_term( void );
+_mali_osk_errcode_t _ump_osk_term(void);
-int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom );
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom);
-int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom );
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom);
-_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation *descriptor );
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor);
-_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size );
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size);
-void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor );
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor);
-void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data );
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data);
#ifdef __cplusplus
}
--- /dev/null
+/*
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_uk_types.h
+ * Defines the types and constants used in the user-kernel interface
+ */
+
+#ifndef __UMP_UK_TYPES_H__
+#define __UMP_UK_TYPES_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* Helpers for API version handling */
+#define MAKE_VERSION_ID(x) (((x) << 16UL) | (x))
+#define IS_VERSION_ID(x) (((x) & 0xFFFF) == (((x) >> 16UL) & 0xFFFF))
+#define GET_VERSION(x) (((x) >> 16UL) & 0xFFFF)
+#define IS_API_MATCH(x, y) (IS_VERSION_ID((x)) && IS_VERSION_ID((y)) && (GET_VERSION((x)) == GET_VERSION((y))))
+
+/**
+ * API version define.
+ * Indicates the version of the kernel API
+ * The version is a 16bit integer incremented on each API change.
+ * The 16bit integer is stored twice in a 32bit integer
+ * So for version 1 the value would be 0x00010001
+ */
+#define UMP_IOCTL_API_VERSION MAKE_VERSION_ID(3)
+
+typedef enum
+{
+ _UMP_IOC_QUERY_API_VERSION = 1,
+ _UMP_IOC_ALLOCATE,
+ _UMP_IOC_RELEASE,
+ _UMP_IOC_SIZE_GET,
+ _UMP_IOC_MAP_MEM, /* not used in Linux */
+ _UMP_IOC_UNMAP_MEM, /* not used in Linux */
+ _UMP_IOC_MSYNC,
+ _UMP_IOC_CACHE_OPERATIONS_CONTROL,
+ _UMP_IOC_SWITCH_HW_USAGE,
+ _UMP_IOC_LOCK,
+ _UMP_IOC_UNLOCK,
+} _ump_uk_functions;
+
+typedef enum
+{
+ UMP_REF_DRV_UK_CONSTRAINT_NONE = 0,
+ UMP_REF_DRV_UK_CONSTRAINT_PHYSICALLY_LINEAR = 1,
+ UMP_REF_DRV_UK_CONSTRAINT_USE_CACHE = 4,
+} ump_uk_alloc_constraints;
+
+typedef enum
+{
+ _UMP_UK_MSYNC_CLEAN = 0,
+ _UMP_UK_MSYNC_CLEAN_AND_INVALIDATE = 1,
+ _UMP_UK_MSYNC_INVALIDATE = 2,
+ _UMP_UK_MSYNC_FLUSH_L1 = 3,
+ _UMP_UK_MSYNC_READOUT_CACHE_ENABLED = 128,
+} ump_uk_msync_op;
+
+typedef enum
+{
+ _UMP_UK_CACHE_OP_START = 0,
+ _UMP_UK_CACHE_OP_FINISH = 1,
+} ump_uk_cache_op_control;
+
+typedef enum
+{
+ _UMP_UK_READ = 1,
+ _UMP_UK_READ_WRITE = 3,
+} ump_uk_lock_usage;
+
+typedef enum
+{
+ _UMP_UK_USED_BY_CPU = 0,
+ _UMP_UK_USED_BY_MALI = 1,
+ _UMP_UK_USED_BY_UNKNOWN_DEVICE = 100,
+} ump_uk_user;
+
+/**
+ * Get API version ([in,out] u32 api_version, [out] u32 compatible)
+ */
+typedef struct _ump_uk_api_version_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 version; /**< Set to the user space version on entry, stores the device driver version on exit */
+ u32 compatible; /**< Non-null if the device is compatible with the client */
+} _ump_uk_api_version_s;
+
+/**
+ * ALLOCATE ([out] u32 secure_id, [in,out] u32 size, [in] contraints)
+ */
+typedef struct _ump_uk_allocate_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Return value from DD to Userdriver */
+ u32 size; /**< Input and output. Requested size; input. Returned size; output */
+ ump_uk_alloc_constraints constraints; /**< Only input to Devicedriver */
+} _ump_uk_allocate_s;
+
+/**
+ * SIZE_GET ([in] u32 secure_id, [out]size )
+ */
+typedef struct _ump_uk_size_get_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+ u32 size; /**< Returned size; output */
+} _ump_uk_size_get_s;
+
+/**
+ * Release ([in] u32 secure_id)
+ */
+typedef struct _ump_uk_release_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< Input to DD */
+} _ump_uk_release_s;
+
+typedef struct _ump_uk_map_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [out] Returns user-space virtual address for the mapping */
+ void *phys_addr; /**< [in] physical address */
+ unsigned long size; /**< [in] size */
+ u32 secure_id; /**< [in] secure_id to assign to mapping */
+ void *_ukk_private; /**< Only used inside linux port between kernel frontend and common part to store vma */
+ u32 cookie;
+ u32 is_cached; /**< [in,out] caching of CPU mappings */
+} _ump_uk_map_mem_s;
+
+typedef struct _ump_uk_unmap_mem_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping;
+ u32 size;
+ void *_ukk_private;
+ u32 cookie;
+} _ump_uk_unmap_mem_s;
+
+typedef struct _ump_uk_msync_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ void *mapping; /**< [in] mapping addr */
+ void *address; /**< [in] flush start addr */
+ u32 size; /**< [in] size to flush */
+ ump_uk_msync_op op; /**< [in] flush operation */
+ u32 cookie; /**< [in] cookie stored with reference to the kernel mapping internals */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ u32 is_cached; /**< [out] caching of CPU mappings */
+} _ump_uk_msync_s;
+
+typedef struct _ump_uk_cache_operations_control_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ ump_uk_cache_op_control op; /**< [in] cache operations start/stop */
+} _ump_uk_cache_operations_control_s;
+
+
+typedef struct _ump_uk_switch_hw_usage_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_user new_user; /**< [in] cookie stored with reference to the kernel mapping internals */
+
+} _ump_uk_switch_hw_usage_s;
+
+typedef struct _ump_uk_lock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+ ump_uk_lock_usage lock_usage;
+} _ump_uk_lock_s;
+
+typedef struct _ump_uk_unlock_s
+{
+ void *ctx; /**< [in,out] user-kernel context (trashed on output) */
+ u32 secure_id; /**< [in] secure_id that identifies the ump buffer */
+} _ump_uk_unlock_s;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMP_UK_TYPES_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#endif
-_mali_osk_errcode_t _ump_ukk_open( void** context );
+_mali_osk_errcode_t _ump_ukk_open(void **context);
-_mali_osk_errcode_t _ump_ukk_close( void** context );
+_mali_osk_errcode_t _ump_ukk_close(void **context);
-_mali_osk_errcode_t _ump_ukk_allocate( _ump_uk_allocate_s *user_interaction );
+_mali_osk_errcode_t _ump_ukk_allocate(_ump_uk_allocate_s *user_interaction);
-_mali_osk_errcode_t _ump_ukk_release( _ump_uk_release_s *release_info );
+_mali_osk_errcode_t _ump_ukk_release(_ump_uk_release_s *release_info);
-_mali_osk_errcode_t _ump_ukk_size_get( _ump_uk_size_get_s *user_interaction );
+_mali_osk_errcode_t _ump_ukk_size_get(_ump_uk_size_get_s *user_interaction);
-_mali_osk_errcode_t _ump_ukk_map_mem( _ump_uk_map_mem_s *args );
+_mali_osk_errcode_t _ump_ukk_map_mem(_ump_uk_map_mem_s *args);
-_mali_osk_errcode_t _ump_uku_get_api_version( _ump_uk_api_version_s *args );
+_mali_osk_errcode_t _ump_uku_get_api_version(_ump_uk_api_version_s *args);
-void _ump_ukk_unmap_mem( _ump_uk_unmap_mem_s *args );
+void _ump_ukk_unmap_mem(_ump_uk_unmap_mem_s *args);
-void _ump_ukk_msync( _ump_uk_msync_s *args );
+void _ump_ukk_msync(_ump_uk_msync_s *args);
-void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s* args);
+void _ump_ukk_cache_operations_control(_ump_uk_cache_operations_control_s *args);
-void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args );
+void _ump_ukk_switch_hw_usage(_ump_uk_switch_hw_usage_s *args);
-void _ump_ukk_lock(_ump_uk_lock_s *args );
+void _ump_ukk_lock(_ump_uk_lock_s *args);
-void _ump_ukk_unlock(_ump_uk_unlock_s *args );
+void _ump_ukk_unlock(_ump_uk_unlock_s *args);
-u32 _ump_ukk_report_memory_usage( void );
+u32 _ump_ukk_report_memory_usage(void);
#ifdef __cplusplus
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2010-2012 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#define __UMP_KERNEL_LICENSE_H__
#ifdef __cplusplus
-extern "C"
-{
+extern "C" {
#endif
#define UMP_KERNEL_LINUX_LICENSE "GPL"
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __UMP_IOCTL_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h> /* kernel module definitions */
struct ump_device {
struct cdev cdev;
#if UMP_LICENSE_IS_GPL
- struct class * ump_class;
+ struct class *ump_class;
#endif
};
#else
static int ump_file_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg);
#endif
-static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma);
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma);
/* This variable defines the file operations this UMP device driver offer */
if (IS_ERR(ump_device.ump_class)) {
err = PTR_ERR(ump_device.ump_class);
} else {
- struct device * mdev;
+ struct device *mdev;
mdev = device_create(ump_device.ump_class, NULL, dev, NULL, ump_dev_name);
if (!IS_ERR(mdev)) {
return 0;
unregister_chrdev_region(dev, 1);
#if UMP_LICENSE_IS_GPL
- if(ump_debugfs_dir)
+ if (ump_debugfs_dir)
debugfs_remove_recursive(ump_debugfs_dir);
#endif
}
*/
static int ump_file_open(struct inode *inode, struct file *filp)
{
- struct ump_session_data * session_data;
+ struct ump_session_data *session_data;
_mali_osk_errcode_t err;
/* input validation */
}
/* Call the OS-Independent UMP Open function */
- err = _ump_ukk_open((void**) &session_data );
- if( _MALI_OSK_ERR_OK != err ) {
+ err = _ump_ukk_open((void **) &session_data);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("Ump failed to open a new session\n"));
- return map_errcode( err );
+ return map_errcode(err);
}
- filp->private_data = (void*)session_data;
+ filp->private_data = (void *)session_data;
filp->f_pos = 0;
return 0; /* success */
{
_mali_osk_errcode_t err;
- err = _ump_ukk_close((void**) &filp->private_data );
- if( _MALI_OSK_ERR_OK != err ) {
- return map_errcode( err );
+ err = _ump_ukk_close((void **) &filp->private_data);
+ if (_MALI_OSK_ERR_OK != err) {
+ return map_errcode(err);
}
return 0; /* success */
#endif
{
int err = -ENOTTY;
- void __user * argument;
- struct ump_session_data * session_data;
+ void __user *argument;
+ struct ump_session_data *session_data;
#ifndef HAVE_UNLOCKED_IOCTL
(void)inode; /* inode not used */
return err;
}
-int map_errcode( _mali_osk_errcode_t err )
+int map_errcode(_mali_osk_errcode_t err)
{
- switch(err) {
+ switch (err) {
case _MALI_OSK_ERR_OK :
return 0;
case _MALI_OSK_ERR_FAULT:
/*
* Handle from OS to map specified virtual memory to specified UMP memory.
*/
-static int ump_file_mmap(struct file * filp, struct vm_area_struct * vma)
+static int ump_file_mmap(struct file *filp, struct vm_area_struct *vma)
{
_ump_uk_map_mem_s args;
_mali_osk_errcode_t err;
- struct ump_session_data * session_data;
+ struct ump_session_data *session_data;
/* Validate the session data */
session_data = (struct ump_session_data *)filp->private_data;
/* By setting this flag, during a process fork; the child process will not have the parent UMP mappings */
vma->vm_flags |= VM_DONTCOPY;
- DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags ));
+ DBG_MSG(4, ("UMP vma->flags: %x\n", vma->vm_flags));
/* Call the common mmap handler */
- err = _ump_ukk_map_mem( &args );
- if ( _MALI_OSK_ERR_OK != err) {
+ err = _ump_ukk_map_mem(&args);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("_ump_ukk_map_mem() failed in function ump_file_mmap()"));
- return map_errcode( err );
+ return map_errcode(err);
}
return 0; /* success */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2007-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __UMP_KERNEL_LINUX_H__
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* needed to detect kernel version specific code */
typedef struct block_info {
- struct block_info * next;
+ struct block_info *next;
} block_info;
typedef struct block_allocator {
struct semaphore mutex;
- block_info * all_blocks;
- block_info * first_free;
+ block_info *all_blocks;
+ block_info *first_free;
u32 base;
u32 num_blocks;
u32 num_free;
} block_allocator;
-static void block_allocator_shutdown(ump_memory_backend * backend);
-static int block_allocator_allocate(void* ctx, ump_dd_mem * mem);
-static void block_allocator_release(void * ctx, ump_dd_mem * handle);
-static inline u32 get_phys(block_allocator * allocator, block_info * block);
+static void block_allocator_shutdown(ump_memory_backend *backend);
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem);
+static void block_allocator_release(void *ctx, ump_dd_mem *handle);
+static inline u32 get_phys(block_allocator *allocator, block_info *block);
static u32 block_allocator_stat(struct ump_memory_backend *backend);
/*
* Create dedicated memory backend
*/
-ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size)
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size)
{
- ump_memory_backend * backend;
- block_allocator * allocator;
+ ump_memory_backend *backend;
+ block_allocator *allocator;
u32 usable_size;
u32 num_blocks;
/*
* Destroy specified dedicated memory backend
*/
-static void block_allocator_shutdown(ump_memory_backend * backend)
+static void block_allocator_shutdown(ump_memory_backend *backend)
{
- block_allocator * allocator;
+ block_allocator *allocator;
BUG_ON(!backend);
BUG_ON(!backend->ctx);
- allocator = (block_allocator*)backend->ctx;
+ allocator = (block_allocator *)backend->ctx;
DBG_MSG_IF(1, allocator->num_free != allocator->num_blocks, ("%u blocks still in use during shutdown\n", allocator->num_blocks - allocator->num_free));
-static int block_allocator_allocate(void* ctx, ump_dd_mem * mem)
+static int block_allocator_allocate(void *ctx, ump_dd_mem *mem)
{
- block_allocator * allocator;
+ block_allocator *allocator;
u32 left;
- block_info * last_allocated = NULL;
+ block_info *last_allocated = NULL;
int i = 0;
BUG_ON(!ctx);
BUG_ON(!mem);
- allocator = (block_allocator*)ctx;
+ allocator = (block_allocator *)ctx;
left = mem->size_bytes;
BUG_ON(!left);
BUG_ON(!&allocator->mutex);
mem->nr_blocks = ((left + UMP_BLOCK_SIZE - 1) & ~(UMP_BLOCK_SIZE - 1)) / UMP_BLOCK_SIZE;
- mem->block_array = (ump_dd_physical_block*)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
+ mem->block_array = (ump_dd_physical_block *)vmalloc(sizeof(ump_dd_physical_block) * mem->nr_blocks);
if (NULL == mem->block_array) {
MSG_ERR(("Failed to allocate block array\n"));
return 0;
mem->size_bytes = 0;
while ((left > 0) && (allocator->first_free)) {
- block_info * block;
+ block_info *block;
block = allocator->first_free;
allocator->first_free = allocator->first_free->next;
}
if (left) {
- block_info * block;
+ block_info *block;
/* release all memory back to the pool */
while (last_allocated) {
block = last_allocated->next;
mem->backend_info = last_allocated;
up(&allocator->mutex);
- mem->is_cached=0;
+ mem->is_cached = 0;
return 1;
}
-static void block_allocator_release(void * ctx, ump_dd_mem * handle)
+static void block_allocator_release(void *ctx, ump_dd_mem *handle)
{
- block_allocator * allocator;
- block_info * block, * next;
+ block_allocator *allocator;
+ block_info *block, * next;
BUG_ON(!ctx);
BUG_ON(!handle);
- allocator = (block_allocator*)ctx;
- block = (block_info*)handle->backend_info;
+ allocator = (block_allocator *)ctx;
+ block = (block_info *)handle->backend_info;
BUG_ON(!block);
if (down_interruptible(&allocator->mutex)) {
while (block) {
next = block->next;
- BUG_ON( (block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
+ BUG_ON((block < allocator->all_blocks) || (block > (allocator->all_blocks + allocator->num_blocks)));
block->next = allocator->first_free;
allocator->first_free = block;
/*
* Helper function for calculating the physical base adderss of a memory block
*/
-static inline u32 get_phys(block_allocator * allocator, block_info * block)
+static inline u32 get_phys(block_allocator *allocator, block_info *block)
{
return allocator->base + ((block - allocator->all_blocks) * UMP_BLOCK_SIZE);
}
{
block_allocator *allocator;
BUG_ON(!backend);
- allocator = (block_allocator*)backend->ctx;
+ allocator = (block_allocator *)backend->ctx;
BUG_ON(!allocator);
- return (allocator->num_blocks - allocator->num_free)* UMP_BLOCK_SIZE;
+ return (allocator->num_blocks - allocator->num_free) * UMP_BLOCK_SIZE;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "ump_kernel_memory_backend.h"
-ump_memory_backend * ump_block_allocator_create(u32 base_address, u32 size);
+ump_memory_backend *ump_block_allocator_create(u32 base_address, u32 size);
#endif /* __UMP_KERNEL_MEMORY_BACKEND_DEDICATED_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2011, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/* needed to detect kernel version specific code */
-static void os_free(void* ctx, ump_dd_mem * descriptor);
-static int os_allocate(void* ctx, ump_dd_mem * descriptor);
-static void os_memory_backend_destroy(ump_memory_backend * backend);
+static void os_free(void *ctx, ump_dd_mem *descriptor);
+static int os_allocate(void *ctx, ump_dd_mem *descriptor);
+static void os_memory_backend_destroy(ump_memory_backend *backend);
static u32 os_stat(struct ump_memory_backend *backend);
/*
* Create OS memory backend
*/
-ump_memory_backend * ump_os_memory_backend_create(const int max_allocation)
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation)
{
- ump_memory_backend * backend;
- os_allocator * info;
+ ump_memory_backend *backend;
+ os_allocator *info;
info = kmalloc(sizeof(os_allocator), GFP_KERNEL);
if (NULL == info) {
/*
* Destroy specified OS memory backend
*/
-static void os_memory_backend_destroy(ump_memory_backend * backend)
+static void os_memory_backend_destroy(ump_memory_backend *backend)
{
- os_allocator * info = (os_allocator*)backend->ctx;
+ os_allocator *info = (os_allocator *)backend->ctx;
DBG_MSG_IF(1, 0 != info->num_pages_allocated, ("%d pages still in use during shutdown\n", info->num_pages_allocated));
/*
* Allocate UMP memory
*/
-static int os_allocate(void* ctx, ump_dd_mem * descriptor)
+static int os_allocate(void *ctx, ump_dd_mem *descriptor)
{
u32 left;
- os_allocator * info;
+ os_allocator *info;
int pages_allocated = 0;
int is_cached;
BUG_ON(!descriptor);
BUG_ON(!ctx);
- info = (os_allocator*)ctx;
+ info = (os_allocator *)ctx;
left = descriptor->size_bytes;
is_cached = descriptor->is_cached;
}
while (left > 0 && ((info->num_pages_allocated + pages_allocated) < info->num_pages_max)) {
- struct page * new_page;
+ struct page *new_page;
if (is_cached) {
new_page = alloc_page(GFP_HIGHUSER | __GFP_ZERO | __GFP_REPEAT | __GFP_NOWARN);
}
/* Ensure page caches are flushed. */
- if ( is_cached ) {
+ if (is_cached) {
descriptor->block_array[pages_allocated].addr = page_to_phys(new_page);
descriptor->block_array[pages_allocated].size = PAGE_SIZE;
} else {
- descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL );
+ descriptor->block_array[pages_allocated].addr = dma_map_page(NULL, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
descriptor->block_array[pages_allocated].size = PAGE_SIZE;
}
if (left) {
DBG_MSG(1, ("Failed to allocate needed pages\n"));
- while(pages_allocated) {
+ while (pages_allocated) {
pages_allocated--;
- if ( !is_cached ) {
+ if (!is_cached) {
dma_unmap_page(NULL, descriptor->block_array[pages_allocated].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
- __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT) );
+ __free_page(pfn_to_page(descriptor->block_array[pages_allocated].addr >> PAGE_SHIFT));
}
up(&info->mutex);
/*
* Free specified UMP memory
*/
-static void os_free(void* ctx, ump_dd_mem * descriptor)
+static void os_free(void *ctx, ump_dd_mem *descriptor)
{
- os_allocator * info;
+ os_allocator *info;
int i;
BUG_ON(!ctx);
BUG_ON(!descriptor);
- info = (os_allocator*)ctx;
+ info = (os_allocator *)ctx;
BUG_ON(descriptor->nr_blocks > info->num_pages_allocated);
up(&info->mutex);
- for ( i = 0; i < descriptor->nr_blocks; i++) {
+ for (i = 0; i < descriptor->nr_blocks; i++) {
DBG_MSG(6, ("Freeing physical page. Address: 0x%08lx\n", descriptor->block_array[i].addr));
- if ( ! descriptor->is_cached) {
+ if (! descriptor->is_cached) {
dma_unmap_page(NULL, descriptor->block_array[i].addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
}
- __free_page(pfn_to_page(descriptor->block_array[i].addr>>PAGE_SHIFT) );
+ __free_page(pfn_to_page(descriptor->block_array[i].addr >> PAGE_SHIFT));
}
vfree(descriptor->block_array);
static u32 os_stat(struct ump_memory_backend *backend)
{
os_allocator *info;
- info = (os_allocator*)backend->ctx;
+ info = (os_allocator *)backend->ctx;
return info->num_pages_allocated * _MALI_OSK_MALI_PAGE_SIZE;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "ump_kernel_memory_backend.h"
-ump_memory_backend * ump_os_memory_backend_create(const int max_allocation);
+ump_memory_backend *ump_os_memory_backend_create(const int max_allocation);
#endif /* __UMP_KERNEL_MEMORY_BACKEND_OS_H__ */
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include "mali_kernel_common.h"
+#include "mali_osk.h"
+#include "ump_osk.h"
+#include "ump_kernel_common.h"
+#include "ump_kernel_types.h"
+#include "ump_kernel_random_mapping.h"
+
+#include <linux/random.h>
+#include <linux/rbtree.h>
+#include <linux/sched.h>
+#include <linux/jiffies.h>
+
+
+static ump_dd_mem *search(struct rb_root *root, int id)
+{
+ struct rb_node *node = root->rb_node;
+
+ while (node) {
+ ump_dd_mem *e = container_of(node, ump_dd_mem, node);
+
+ if (id < e->secure_id) {
+ node = node->rb_left;
+ } else if (id > e->secure_id) {
+ node = node->rb_right;
+ } else {
+ return e;
+ }
+ }
+
+ return NULL;
+}
+
+static mali_bool insert(struct rb_root *root, int id, ump_dd_mem *mem)
+{
+ struct rb_node **new = &(root->rb_node);
+ struct rb_node *parent = NULL;
+
+ while (*new) {
+ ump_dd_mem *this = container_of(*new, ump_dd_mem, node);
+
+ parent = *new;
+ if (id < this->secure_id) {
+ new = &((*new)->rb_left);
+ } else if (id > this->secure_id) {
+ new = &((*new)->rb_right);
+ } else {
+ printk(KERN_ERR "UMP: ID already used %x\n", id);
+ return MALI_FALSE;
+ }
+ }
+
+ rb_link_node(&mem->node, parent, new);
+ rb_insert_color(&mem->node, root);
+
+ return MALI_TRUE;
+}
+
+
+ump_random_mapping *ump_random_mapping_create(void)
+{
+ ump_random_mapping *map = _mali_osk_calloc(1, sizeof(ump_random_mapping));
+
+ if (NULL == map)
+ return NULL;
+
+ map->lock = _mali_osk_mutex_rw_init(_MALI_OSK_LOCKFLAG_ORDERED,
+ _MALI_OSK_LOCK_ORDER_DESCRIPTOR_MAP);
+ if (NULL != map->lock) {
+ map->root = RB_ROOT;
+#if UMP_RANDOM_MAP_DELAY
+ map->failed.count = 0;
+ map->failed.timestamp = jiffies;
+#endif
+ return map;
+ }
+ return NULL;
+}
+
+void ump_random_mapping_destroy(ump_random_mapping *map)
+{
+ _mali_osk_mutex_rw_term(map->lock);
+ _mali_osk_free(map);
+}
+
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem)
+{
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ while (1) {
+ u32 id;
+
+ get_random_bytes(&id, sizeof(id));
+
+ /* Try a new random number if id happened to be the invalid
+ * secure ID (-1). */
+ if (unlikely(id == UMP_INVALID_SECURE_ID))
+ continue;
+
+ /* Insert into the tree. If the id was already in use, get a
+ * new random id and try again. */
+ if (insert(&map->root, id, mem)) {
+ mem->secure_id = id;
+ break;
+ }
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return 0;
+}
+
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id)
+{
+ ump_dd_mem *mem = NULL;
+#if UMP_RANDOM_MAP_DELAY
+ int do_delay = 0;
+#endif
+
+ DEBUG_ASSERT(map);
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RO);
+ mem = search(&map->root, id);
+
+ if (unlikely(NULL == mem)) {
+#if UMP_RANDOM_MAP_DELAY
+ map->failed.count++;
+
+ if (time_is_before_jiffies(map->failed.timestamp +
+ UMP_FAILED_LOOKUP_DELAY * HZ)) {
+ /* If it is a long time since last failure, reset
+ * the counter and skip the delay this time. */
+ map->failed.count = 0;
+ } else if (map->failed.count > UMP_FAILED_LOOKUPS_ALLOWED) {
+ do_delay = 1;
+ }
+
+ map->failed.timestamp = jiffies;
+#endif /* UMP_RANDOM_MAP_DELAY */
+ } else {
+ ump_dd_reference_add(mem);
+ }
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RO);
+
+#if UMP_RANDOM_MAP_DELAY
+ if (do_delay) {
+ /* Apply delay */
+ schedule_timeout_killable(UMP_FAILED_LOOKUP_DELAY);
+ }
+#endif /* UMP_RANDOM_MAP_DELAY */
+
+ return mem;
+}
+
+static ump_dd_mem *ump_random_mapping_remove_internal(ump_random_mapping *map, int id)
+{
+ ump_dd_mem *mem = NULL;
+
+ mem = search(&map->root, id);
+
+ if (mem) {
+ rb_erase(&mem->node, &map->root);
+ }
+
+ return mem;
+}
+
+void ump_random_mapping_put(ump_dd_mem *mem)
+{
+ int new_ref;
+
+ _mali_osk_mutex_rw_wait(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ new_ref = _ump_osk_atomic_dec_and_read(&mem->ref_count);
+ DBG_MSG(5, ("Memory reference decremented. ID: %u, new value: %d\n",
+ mem->secure_id, new_ref));
+
+ if (0 == new_ref) {
+ DBG_MSG(3, ("Final release of memory. ID: %u\n", mem->secure_id));
+
+ ump_random_mapping_remove_internal(device.secure_id_map, mem->secure_id);
+
+ mem->release_func(mem->ctx, mem);
+ _mali_osk_free(mem);
+ }
+
+ _mali_osk_mutex_rw_signal(device.secure_id_map->lock, _MALI_OSK_LOCKMODE_RW);
+}
+
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int descriptor)
+{
+ ump_dd_mem *mem;
+
+ _mali_osk_mutex_rw_wait(map->lock, _MALI_OSK_LOCKMODE_RW);
+ mem = ump_random_mapping_remove_internal(map, descriptor);
+ _mali_osk_mutex_rw_signal(map->lock, _MALI_OSK_LOCKMODE_RW);
+
+ return mem;
+}
--- /dev/null
+/*
+ * Copyright (C) 2010-2011, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+/**
+ * @file ump_kernel_random_mapping.h
+ */
+
+#ifndef __UMP_KERNEL_RANDOM_MAPPING_H__
+#define __UMP_KERNEL_RANDOM_MAPPING_H__
+
+#include "mali_osk.h"
+#include <linux/rbtree.h>
+
+#define UMP_RANDOM_MAP_DELAY 1
+#define UMP_FAILED_LOOKUP_DELAY 10 /* ms */
+#define UMP_FAILED_LOOKUPS_ALLOWED 10 /* number of allowed failed lookups */
+
+/**
+ * The random mapping object
+ * Provides a separate namespace where we can map an integer to a pointer
+ */
+typedef struct ump_random_mapping {
+ _mali_osk_mutex_rw_t *lock; /**< Lock protecting access to the mapping object */
+ struct rb_root root;
+#if UMP_RANDOM_MAP_DELAY
+ struct {
+ unsigned long count;
+ unsigned long timestamp;
+ } failed;
+#endif
+} ump_random_mapping;
+
+/**
+ * Create a random mapping object
+ * Create a random mapping capable of holding 2^20 entries
+ * @return Pointer to a random mapping object, NULL on failure
+ */
+ump_random_mapping *ump_random_mapping_create(void);
+
+/**
+ * Destroy a random mapping object
+ * @param map The map to free
+ */
+void ump_random_mapping_destroy(ump_random_mapping *map);
+
+/**
+ * Allocate a new mapping entry (random ID)
+ * Allocates a new entry in the map.
+ * @param map The map to allocate a new entry in
+ * @param target The value to map to
+ * @return The random allocated, a negative value on error
+ */
+int ump_random_mapping_insert(ump_random_mapping *map, ump_dd_mem *mem);
+
+/**
+ * Get the value mapped to by a random ID
+ *
+ * If the lookup fails, punish the calling thread by applying a delay.
+ *
+ * @param map The map to lookup the random id in
+ * @param id The ID to lookup
+ * @param target Pointer to a pointer which will receive the stored value
+ * @return ump_dd_mem pointer on successful lookup, NULL on error
+ */
+ump_dd_mem *ump_random_mapping_get(ump_random_mapping *map, int id);
+
+void ump_random_mapping_put(ump_dd_mem *mem);
+
+/**
+ * Free the random ID
+ * For the random to be reused it has to be freed
+ * @param map The map to free the random from
+ * @param id The ID to free
+ */
+ump_dd_mem *ump_random_mapping_remove(ump_random_mapping *map, int id);
+
+#endif /* __UMP_KERNEL_RANDOM_MAPPING_H__ */
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h> /* kernel module definitions */
#include "ump_kernel_common.h"
#include "ump_kernel_memory_backend_os.h"
#include "ump_kernel_memory_backend_dedicated.h"
-extern unsigned long totalram_pages;
/* Configure which dynamic memory allocator to use */
int ump_backend = ARCH_UMP_BACKEND_DEFAULT;
module_param(ump_memory_size, uint, S_IRUGO); /* r--r--r-- */
MODULE_PARM_DESC(ump_memory_size, "The size of fixed memory to map in the dedicated memory backend");
-ump_memory_backend* ump_memory_backend_create ( void )
+ump_memory_backend *ump_memory_backend_create(void)
{
- ump_memory_backend * backend = NULL;
+ ump_memory_backend *backend = NULL;
/* Create the dynamic memory allocator backend */
if (0 == ump_backend) {
}
backend = ump_block_allocator_create(ump_memory_address, ump_memory_size);
} else if (1 == ump_backend) {
- ump_memory_size = totalram_pages * 4 * 1024 / 2;
DBG_MSG(2, ("Using OS memory backend, allocation limit: %d\n", ump_memory_size));
backend = ump_os_memory_backend_create(ump_memory_size);
}
return backend;
}
-void ump_memory_backend_destroy( void )
+void ump_memory_backend_destroy(void)
{
if (0 == ump_backend) {
DBG_MSG(2, ("Releasing dedicated memory: 0x%08x\n", ump_memory_address));
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2010 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "ump_osk.h"
#include <asm/atomic.h>
-int _ump_osk_atomic_dec_and_read( _mali_osk_atomic_t *atom )
+int _ump_osk_atomic_dec_and_read(_mali_osk_atomic_t *atom)
{
return atomic_dec_return((atomic_t *)&atom->u.val);
}
-int _ump_osk_atomic_inc_and_read( _mali_osk_atomic_t *atom )
+int _ump_osk_atomic_inc_and_read(_mali_osk_atomic_t *atom)
{
return atomic_inc_return((atomic_t *)&atom->u.val);
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2008-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include <linux/slab.h>
#include <asm/memory.h>
-#include <asm/uaccess.h> /* to verify pointers from user space */
+#include <asm/uaccess.h> /* to verify pointers from user space */
#include <asm/cacheflush.h>
#include <linux/dma-mapping.h>
ump_memory_allocation *descriptor;
} ump_vma_usage_tracker;
-static void ump_vma_open(struct vm_area_struct * vma);
-static void ump_vma_close(struct vm_area_struct * vma);
+static void ump_vma_open(struct vm_area_struct *vma);
+static void ump_vma_close(struct vm_area_struct *vma);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf);
#else
-static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address);
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address);
#endif
static struct vm_operations_struct ump_vm_ops = {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
static int ump_cpu_page_fault_handler(struct vm_area_struct *vma, struct vm_fault *vmf)
#else
-static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct * vma, unsigned long address)
+static unsigned long ump_cpu_page_fault_handler(struct vm_area_struct *vma, unsigned long address)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
- void __user * address;
+ void __user *address;
address = vmf->virtual_address;
#endif
MSG_ERR(("Page-fault in UMP memory region caused by the CPU\n"));
#endif
}
-static void ump_vma_open(struct vm_area_struct * vma)
+static void ump_vma_open(struct vm_area_struct *vma)
{
- ump_vma_usage_tracker * vma_usage_tracker;
+ ump_vma_usage_tracker *vma_usage_tracker;
int new_val;
- vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
BUG_ON(NULL == vma_usage_tracker);
new_val = atomic_inc_return(&vma_usage_tracker->references);
DBG_MSG(4, ("VMA open, VMA reference count incremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
}
-static void ump_vma_close(struct vm_area_struct * vma)
+static void ump_vma_close(struct vm_area_struct *vma)
{
- ump_vma_usage_tracker * vma_usage_tracker;
+ ump_vma_usage_tracker *vma_usage_tracker;
_ump_uk_unmap_mem_s args;
int new_val;
- vma_usage_tracker = (ump_vma_usage_tracker*)vma->vm_private_data;
+ vma_usage_tracker = (ump_vma_usage_tracker *)vma->vm_private_data;
BUG_ON(NULL == vma_usage_tracker);
new_val = atomic_dec_return(&vma_usage_tracker->references);
DBG_MSG(4, ("VMA close, VMA reference count decremented. VMA: 0x%08lx, reference count: %d\n", (unsigned long)vma, new_val));
if (0 == new_val) {
- ump_memory_allocation * descriptor;
+ ump_memory_allocation *descriptor;
descriptor = vma_usage_tracker->descriptor;
args._ukk_private = NULL; /** @note unused */
DBG_MSG(4, ("No more VMA references left, releasing UMP memory\n"));
- _ump_ukk_unmap_mem( & args );
+ _ump_ukk_unmap_mem(& args);
/* vma_usage_tracker is free()d by _ump_osk_mem_mapregion_term() */
}
}
-_mali_osk_errcode_t _ump_osk_mem_mapregion_init( ump_memory_allocation * descriptor )
+_mali_osk_errcode_t _ump_osk_mem_mapregion_init(ump_memory_allocation *descriptor)
{
- ump_vma_usage_tracker * vma_usage_tracker;
+ ump_vma_usage_tracker *vma_usage_tracker;
struct vm_area_struct *vma;
if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
return -_MALI_OSK_ERR_FAULT;
}
- vma = (struct vm_area_struct*)descriptor->process_mapping_info;
- if (NULL == vma ) {
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
+ if (NULL == vma) {
kfree(vma_usage_tracker);
return _MALI_OSK_ERR_FAULT;
}
#endif
- if (0==descriptor->is_cached) {
+ if (0 == descriptor->is_cached) {
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
}
- DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot ));
+ DBG_MSG(3, ("Mapping with page_prot: 0x%x\n", vma->vm_page_prot));
/* Setup the functions which handle further VMA handling */
vma->vm_ops = &ump_vm_ops;
/* Do the va range allocation - in this case, it was done earlier, so we copy in that information */
- descriptor->mapping = (void __user*)vma->vm_start;
+ descriptor->mapping = (void __user *)vma->vm_start;
atomic_set(&vma_usage_tracker->references, 1); /*this can later be increased if process is forked, see ump_vma_open() */
vma_usage_tracker->descriptor = descriptor;
return _MALI_OSK_ERR_OK;
}
-void _ump_osk_mem_mapregion_term( ump_memory_allocation * descriptor )
+void _ump_osk_mem_mapregion_term(ump_memory_allocation *descriptor)
{
- struct vm_area_struct* vma;
- ump_vma_usage_tracker * vma_usage_tracker;
+ struct vm_area_struct *vma;
+ ump_vma_usage_tracker *vma_usage_tracker;
if (NULL == descriptor) return;
/* Linux does the right thing as part of munmap to remove the mapping
* All that remains is that we remove the vma_usage_tracker setup in init() */
- vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
vma_usage_tracker = vma->vm_private_data;
return;
}
-_mali_osk_errcode_t _ump_osk_mem_mapregion_map( ump_memory_allocation * descriptor, u32 offset, u32 * phys_addr, unsigned long size )
+_mali_osk_errcode_t _ump_osk_mem_mapregion_map(ump_memory_allocation *descriptor, u32 offset, u32 *phys_addr, unsigned long size)
{
struct vm_area_struct *vma;
_mali_osk_errcode_t retval;
if (NULL == descriptor) return _MALI_OSK_ERR_FAULT;
- vma = (struct vm_area_struct*)descriptor->process_mapping_info;
+ vma = (struct vm_area_struct *)descriptor->process_mapping_info;
- if (NULL == vma ) return _MALI_OSK_ERR_FAULT;
+ if (NULL == vma) return _MALI_OSK_ERR_FAULT;
- retval = remap_pfn_range( vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
+ retval = remap_pfn_range(vma, ((u32)descriptor->mapping) + offset, (*phys_addr) >> PAGE_SHIFT, size, vma->vm_page_prot) ? _MALI_OSK_ERR_FAULT : _MALI_OSK_ERR_OK;;
DBG_MSG(4, ("Mapping virtual to physical memory. ID: %u, vma: 0x%08lx, virtual addr:0x%08lx, physical addr: 0x%08lx, size:%lu, prot:0x%x, vm_flags:0x%x RETVAL: 0x%x\n",
- ump_dd_secure_id_get(descriptor->handle),
- (unsigned long)vma,
- (unsigned long)(vma->vm_start + offset),
- (unsigned long)*phys_addr,
- size,
- (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
+ ump_dd_secure_id_get(descriptor->handle),
+ (unsigned long)vma,
+ (unsigned long)(vma->vm_start + offset),
+ (unsigned long)*phys_addr,
+ size,
+ (unsigned int)vma->vm_page_prot, vma->vm_flags, retval));
return retval;
}
__cpuc_flush_kern_all();
}
-void _ump_osk_msync( ump_dd_mem * mem, void * virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data * session_data )
+void _ump_osk_msync(ump_dd_mem *mem, void *virt, u32 offset, u32 size, ump_uk_msync_op op, ump_session_data *session_data)
{
int i;
/* Flush L1 using virtual address, the entire range in one go.
* Only flush if user space process has a valid write mapping on given address. */
- if( (mem) && (virt!=NULL) && (access_ok(VERIFY_WRITE, virt, size)) ) {
+ if ((mem) && (virt != NULL) && (access_ok(VERIFY_WRITE, virt, size))) {
__cpuc_flush_dcache_area(virt, size);
DBG_MSG(3, ("UMP[%02u] Flushing CPU L1 Cache. CPU address: %x, size: %x\n", mem->secure_id, virt, size));
} else {
if (session_data) {
- if (op == _UMP_UK_MSYNC_FLUSH_L1 ) {
+ if (op == _UMP_UK_MSYNC_FLUSH_L1) {
DBG_MSG(4, ("UMP Pending L1 cache flushes: %d\n", session_data->has_pending_level1_cache_flush));
session_data->has_pending_level1_cache_flush = 0;
level1_cache_flush_all();
} else {
if (session_data->cache_operations_ongoing) {
session_data->has_pending_level1_cache_flush++;
- DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush) );
+ DBG_MSG(4, ("UMP[%02u] Defering the L1 flush. Nr pending:%d\n", mem->secure_id, session_data->has_pending_level1_cache_flush));
} else {
/* Flushing the L1 cache for each switch_user() if ump_cache_operations_control(START) is not called */
level1_cache_flush_all();
}
}
- if ( NULL == mem ) return;
+ if (NULL == mem) return;
- if ( mem->size_bytes==size) {
- DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n",mem->secure_id));
+ if (mem->size_bytes == size) {
+ DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache\n", mem->secure_id));
} else {
DBG_MSG(3, ("UMP[%02u] Flushing CPU L2 Cache. Blocks:%u, TotalSize:%u. FlushSize:%u Offset:0x%x FirstPaddr:0x%08x\n",
- mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
+ mem->secure_id, mem->nr_blocks, mem->size_bytes, size, offset, mem->block_array[0].addr));
}
/* Flush L2 using physical addresses, block for block. */
- for (i=0 ; i < mem->nr_blocks; i++) {
+ for (i = 0 ; i < mem->nr_blocks; i++) {
u32 start_p, end_p;
ump_dd_physical_block *block;
block = &mem->block_array[i];
- if(offset >= block->size) {
+ if (offset >= block->size) {
offset -= block->size;
continue;
}
- if(offset) {
+ if (offset) {
start_p = (u32)block->addr + offset;
/* We'll zero the offset later, after using it to calculate end_p. */
} else {
start_p = (u32)block->addr;
}
- if(size < block->size - offset) {
- end_p = start_p + size - 1;
+ if (size < block->size - offset) {
+ end_p = start_p + size;
size = 0;
} else {
- if(offset) {
- end_p = start_p + (block->size - offset - 1);
+ if (offset) {
+ end_p = start_p + (block->size - offset);
size -= block->size - offset;
offset = 0;
} else {
- end_p = start_p + block->size - 1;
+ end_p = start_p + block->size;
size -= block->size;
}
}
- switch(op) {
+ switch (op) {
case _UMP_UK_MSYNC_CLEAN:
outer_clean_range(start_p, end_p);
break;
break;
}
- if(0 == size) {
+ if (0 == size) {
/* Nothing left to flush. */
break;
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#include "ump_kernel_linux.h"
/* is called from ump_kernel_constructor in common code */
-_mali_osk_errcode_t _ump_osk_init( void )
+_mali_osk_errcode_t _ump_osk_init(void)
{
if (0 != ump_kernel_device_initialize()) {
return _MALI_OSK_ERR_FAULT;
return _MALI_OSK_ERR_OK;
}
-_mali_osk_errcode_t _ump_osk_term( void )
+_mali_osk_errcode_t _ump_osk_term(void)
{
ump_kernel_device_terminate();
return _MALI_OSK_ERR_OK;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
* IOCTL operation; Allocate UMP memory
*/
-int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_allocate_s user_interaction;
_mali_osk_errcode_t err;
user_interaction.ctx = (void *) session_data;
- err = _ump_ukk_allocate( &user_interaction );
- if( _MALI_OSK_ERR_OK != err ) {
+ err = _ump_ukk_allocate(&user_interaction);
+ if (_MALI_OSK_ERR_OK != err) {
DBG_MSG(1, ("_ump_ukk_allocate() failed in ump_ioctl_allocate()\n"));
return map_errcode(err);
}
release_args.ctx = (void *) session_data;
release_args.secure_id = user_interaction.secure_id;
- err = _ump_ukk_release( &release_args );
- if(_MALI_OSK_ERR_OK != err) {
+ err = _ump_ukk_release(&release_args);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("_ump_ukk_release() also failed when trying to release newly allocated memory in ump_ioctl_allocate()\n"));
}
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2013-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
#endif
-int ump_allocate_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_allocate_wrapper(u32 __user *argument, struct ump_session_data *session_data);
#ifdef __cplusplus
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
/*
* IOCTL operation; Negotiate version of IOCTL API
*/
-int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_api_version_s version_info;
_mali_osk_errcode_t err;
return -EFAULT;
}
- version_info.ctx = (void*) session_data;
- err = _ump_uku_get_api_version( &version_info );
- if( _MALI_OSK_ERR_OK != err ) {
+ version_info.ctx = (void *) session_data;
+ err = _ump_uku_get_api_version(&version_info);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("_ump_uku_get_api_version() failed in ump_ioctl_get_api_version()\n"));
return map_errcode(err);
}
/*
* IOCTL operation; Release reference to specified UMP memory.
*/
-int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_release_s release_args;
_mali_osk_errcode_t err;
return -EFAULT;
}
- release_args.ctx = (void*) session_data;
- err = _ump_ukk_release( &release_args );
- if( _MALI_OSK_ERR_OK != err ) {
+ release_args.ctx = (void *) session_data;
+ err = _ump_ukk_release(&release_args);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("_ump_ukk_release() failed in ump_ioctl_release()\n"));
return map_errcode(err);
}
/*
* IOCTL operation; Return size for specified UMP memory.
*/
-int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_size_get_s user_interaction;
_mali_osk_errcode_t err;
}
user_interaction.ctx = (void *) session_data;
- err = _ump_ukk_size_get( &user_interaction );
- if( _MALI_OSK_ERR_OK != err ) {
+ err = _ump_ukk_size_get(&user_interaction);
+ if (_MALI_OSK_ERR_OK != err) {
MSG_ERR(("_ump_ukk_size_get() failed in ump_ioctl_size_get()\n"));
return map_errcode(err);
}
/*
* IOCTL operation; Do cache maintenance on specified UMP memory.
*/
-int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_msync_s user_interaction;
user_interaction.ctx = (void *) session_data;
- _ump_ukk_msync( &user_interaction );
+ _ump_ukk_msync(&user_interaction);
user_interaction.ctx = NULL;
return 0; /* success */
}
-int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_cache_operations_control_s user_interaction;
user_interaction.ctx = (void *) session_data;
- _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s*) &user_interaction );
+ _ump_ukk_cache_operations_control((_ump_uk_cache_operations_control_s *) &user_interaction);
user_interaction.ctx = NULL;
return 0; /* success */
}
-int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_switch_hw_usage_s user_interaction;
user_interaction.ctx = (void *) session_data;
- _ump_ukk_switch_hw_usage( &user_interaction );
+ _ump_ukk_switch_hw_usage(&user_interaction);
user_interaction.ctx = NULL;
return 0; /* success */
}
-int ump_lock_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_lock_s user_interaction;
user_interaction.ctx = (void *) session_data;
- _ump_ukk_lock( &user_interaction );
+ _ump_ukk_lock(&user_interaction);
user_interaction.ctx = NULL;
return 0; /* success */
}
-int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data * session_data)
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data *session_data)
{
_ump_uk_unlock_s user_interaction;
user_interaction.ctx = (void *) session_data;
- _ump_ukk_unlock( &user_interaction );
+ _ump_ukk_unlock(&user_interaction);
user_interaction.ctx = NULL;
/*
- * This confidential and proprietary software may be used only as
- * authorised by a licensing agreement from ARM Limited
- * (C) COPYRIGHT 2009-2010, 2012-2013 ARM Limited
- * ALL RIGHTS RESERVED
- * The entire notice above must be reproduced on all authorised
- * copies and copies may only be made to the extent permitted
- * by a licensing agreement from ARM Limited.
+ * Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
/**
-int ump_get_api_version_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_release_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_size_get_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_msync_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_cache_operations_control_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_switch_hw_usage_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_lock_wrapper(u32 __user * argument, struct ump_session_data * session_data);
-int ump_unlock_wrapper(u32 __user * argument, struct ump_session_data * session_data);
+int ump_get_api_version_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_release_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_size_get_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_msync_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_cache_operations_control_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_switch_hw_usage_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_lock_wrapper(u32 __user *argument, struct ump_session_data *session_data);
+int ump_unlock_wrapper(u32 __user *argument, struct ump_session_data *session_data);
--- /dev/null
+Building the UMP Device Driver for Linux
+----------------------------------------
+
+Build the UMP Device Driver for Linux by running the following make command:
+
+KDIR=<kdir_path> CONFIG=<your_config> BUILD=<build_option> make
+
+where
+ kdir_path: Path to your Linux Kernel directory
+ your_config: Name of the sub-folder to find the required config.h file
+ ("arch-" will be prepended)
+ build_option: debug or release. Debug is default.
+
+The config.h contains following configuration parameters:
+
+ARCH_UMP_BACKEND_DEFAULT
+ 0 specifies the dedicated memory allocator.
+ 1 specifies the OS memory allocator.
+ARCH_UMP_MEMORY_ADDRESS_DEFAULT
+ This is only required for the dedicated memory allocator, and specifies
+ the physical start address of the memory block reserved for UMP.
+ARCH_UMP_MEMORY_SIZE_DEFAULT
+ This specified the size of the memory block reserved for UMP, or the
+ maximum limit for allocations from the OS.
+
+The result will be a ump.ko file, which can be loaded into the Linux kernel
+by using the insmod command. The driver can also be built as a part of the
+kernel itself.
--- /dev/null
+#
+# Copyright (C) 2012, 2014 ARM Limited. All rights reserved.
+#
+# This program is free software and is provided to you under the terms of the GNU General Public License version 2
+# as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+#
+# A copy of the licence is included with the program, and can also be obtained from Free Software
+# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+#
+
+# default to building for the host
+ARCH ?= $(shell uname -m)
+
+# linux build system integration
+
+ifneq ($(KERNELRELEASE),)
+# Inside the kernel build system
+
+EXTRA_CFLAGS += -I$(KBUILD_EXTMOD)
+
+SRC = umplock_driver.c
+
+MODULE:=umplock.ko
+
+obj-m := $(MODULE:.ko=.o)
+$(MODULE:.ko=-y) := $(SRC:.c=.o)
+
+$(MODULE:.ko=-objs) := $(SRC:.c=.o)
+
+else
+# Outside the kernel build system
+#
+#
+
+# Get any user defined KDIR-<names> or maybe even a hardcoded KDIR
+-include KDIR_CONFIGURATION
+
+# Define host system directory
+KDIR-$(shell uname -m):=/lib/modules/$(shell uname -r)/build
+
+ifeq ($(ARCH), arm)
+ # when compiling for ARM we're cross compiling
+ export CROSS_COMPILE ?= arm-none-linux-gnueabi-
+ CONFIG ?= arm
+else
+ # Compiling for the host
+ CONFIG ?= $(shell uname -m)
+endif
+
+# default cpu to select
+CPU ?= $(shell uname -m)
+
+# look up KDIR based om CPU selection
+KDIR ?= $(KDIR-$(CPU))
+
+ifeq ($(KDIR),)
+$(error No KDIR found for platform $(CPU))
+endif
+
+all:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR)
+
+kernelrelease:
+ $(MAKE) -C $(KDIR) kernelrelease
+
+clean:
+ $(MAKE) ARCH=$(ARCH) -C $(KDIR) M=$(CURDIR) clean
+
+endif
--- /dev/null
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <asm/uaccess.h>
+#include "umplock_ioctl.h"
+#include <linux/sched.h>
+
+#define MAX_ITEMS 1024
+#define MAX_PIDS 128
+
+typedef struct lock_cmd_priv {
+ uint32_t msg[128]; /*ioctl args*/
+ u32 pid; /*process id*/
+} _lock_cmd_priv;
+
+typedef struct lock_ref {
+ int ref_count;
+ u32 pid;
+} _lock_ref;
+
+typedef struct umplock_item {
+ u32 secure_id;
+ u32 id_ref_count;
+ u32 owner;
+ _lock_access_usage usage;
+ _lock_ref references[MAX_PIDS];
+ struct semaphore item_lock;
+} umplock_item;
+
+typedef struct umplock_device_private {
+ struct mutex item_list_lock;
+ atomic_t sessions;
+ umplock_item items[MAX_ITEMS];
+ u32 pids[MAX_PIDS];
+} umplock_device_private;
+
+struct umplock_device {
+ struct cdev cdev;
+ struct class *umplock_class;
+};
+
+static struct umplock_device umplock_device;
+static umplock_device_private device;
+static dev_t umplock_dev;
+static char umplock_dev_name[] = "umplock";
+
+int umplock_debug_level = 0;
+module_param(umplock_debug_level, int, S_IRUSR | S_IWUSR | S_IWGRP | S_IRGRP | S_IROTH); /* rw-rw-r-- */
+MODULE_PARM_DESC(umplock_debug_level, "set umplock_debug_level to print debug messages");
+
+#define PDEBUG(level, fmt, args...) do { if ((level) <= umplock_debug_level) printk(KERN_DEBUG "umplock: " fmt, ##args); } while (0)
+#define PERROR(fmt, args...) do { printk(KERN_ERR "umplock: " fmt, ##args); } while (0)
+
+int umplock_find_item(u32 secure_id)
+{
+ int i;
+ for (i = 0; i < MAX_ITEMS; i++) {
+ if (device.items[i].secure_id == secure_id) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static int umplock_find_item_by_pid(_lock_cmd_priv *lock_cmd, int *item_slot, int *ref_slot)
+{
+ _lock_item_s *lock_item;
+ int i, j;
+
+ lock_item = (_lock_item_s *)&lock_cmd->msg;
+
+ i = umplock_find_item(lock_item->secure_id);
+
+ if (i < 0) {
+ return -1;
+ }
+
+ for (j = 0; j < MAX_PIDS; j++) {
+ if (device.items[i].references[j].pid == lock_cmd->pid) {
+ *item_slot = i;
+ *ref_slot = j;
+ return 0;
+ }
+ }
+ return -1 ;
+}
+
+static int umplock_find_client_valid(u32 pid)
+{
+ int i;
+
+ if (pid == 0) {
+ return -1;
+ }
+
+ for (i = 0; i < MAX_PIDS; i++) {
+ if (device.pids[i] == pid) {
+ return i;
+ }
+ }
+
+ return -1;
+}
+
+static int do_umplock_create_locked(_lock_cmd_priv *lock_cmd)
+{
+ int i_index, ref_index;
+ int ret;
+ _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
+
+ i_index = ref_index = -1;
+
+ ret = umplock_find_client_valid(lock_cmd->pid);
+ if (ret < 0) {
+ /*lock request from an invalid client pid, do nothing*/
+ return -EINVAL;
+ }
+
+ ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+ if (ret >= 0) {
+ } else if ((i_index = umplock_find_item(lock_item->secure_id)) >= 0) {
+ for (ref_index = 0; ref_index < MAX_PIDS; ref_index++) {
+ if (device.items[i_index].references[ref_index].pid == 0) {
+ break;
+ }
+ }
+ if (ref_index < MAX_PIDS) {
+ device.items[i_index].references[ref_index].pid = lock_cmd->pid;
+ device.items[i_index].references[ref_index].ref_count = 0;
+ } else {
+ PERROR("whoops, item ran out of available reference slots\n");
+ return -EINVAL;
+
+ }
+ } else {
+ i_index = umplock_find_item(0);
+
+ if (i_index >= 0) {
+ device.items[i_index].secure_id = lock_item->secure_id;
+ device.items[i_index].id_ref_count = 0;
+ device.items[i_index].usage = lock_item->usage;
+ device.items[i_index].references[0].pid = lock_cmd->pid;
+ device.items[i_index].references[0].ref_count = 0;
+ sema_init(&device.items[i_index].item_lock, 1);
+ } else {
+ PERROR("whoops, ran out of available slots\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+/** IOCTLs **/
+
+static int do_umplock_create(_lock_cmd_priv *lock_cmd)
+{
+ return 0;
+}
+
+static int do_umplock_process(_lock_cmd_priv *lock_cmd)
+{
+ int ret, i_index, ref_index;
+ _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
+
+ mutex_lock(&device.item_list_lock);
+
+ if (0 == lock_item->secure_id) {
+ PERROR("IOCTL_UMPLOCK_PROCESS called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+
+ ret = do_umplock_create_locked(lock_cmd);
+ if (ret < 0) {
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+
+ ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+ if (ret < 0) {
+ /*fail to find a item*/
+ PERROR("IOCTL_UMPLOCK_PROCESS called with invalid parameter, pid: %d\n", lock_cmd->pid);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+ device.items[i_index].references[ref_index].ref_count++;
+ device.items[i_index].id_ref_count++;
+ PDEBUG(1, "try to lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ if (lock_cmd->pid == device.items[i_index].owner) {
+ PDEBUG(1, "already own the lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
+
+ mutex_unlock(&device.item_list_lock);
+ if (down_interruptible(&device.items[i_index].item_lock)) {
+ /*wait up without hold the umplock. restore previous state and return*/
+ mutex_lock(&device.item_list_lock);
+ device.items[i_index].references[ref_index].ref_count--;
+ device.items[i_index].id_ref_count--;
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ device.items[i_index].references[ref_index].pid = 0;
+ if (0 == device.items[i_index].id_ref_count) {
+ PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ device.items[i_index].secure_id = 0;
+ }
+ }
+
+ PERROR("failed lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ mutex_unlock(&device.item_list_lock);
+ return -ERESTARTSYS;
+ }
+
+ mutex_lock(&device.item_list_lock);
+ PDEBUG(1, "got lock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+ device.items[i_index].owner = lock_cmd->pid;
+ mutex_unlock(&device.item_list_lock);
+
+ return 0;
+}
+
+static int do_umplock_release(_lock_cmd_priv *lock_cmd)
+{
+ int ret, i_index, ref_index;
+ _lock_item_s *lock_item = (_lock_item_s *)&lock_cmd->msg;
+
+ mutex_lock(&device.item_list_lock);
+
+ if (0 == lock_item->secure_id) {
+ PERROR("IOCTL_UMPLOCK_RELEASE called with secure_id is 0, pid: %d\n", lock_cmd->pid);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+
+ ret = umplock_find_client_valid(lock_cmd->pid);
+ if (ret < 0) {
+ /*lock request from an invalid client pid, do nothing*/
+ mutex_unlock(&device.item_list_lock);
+ return -EPERM;
+ }
+
+ i_index = ref_index = -1;
+
+ ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+ if (ret < 0) {
+ /*fail to find item*/
+ PERROR("IOCTL_UMPLOCK_RELEASE called with invalid parameter pid: %d, secid: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ mutex_unlock(&device.item_list_lock);
+ return -EINVAL;
+ }
+
+ /* if the lock is not owned by this process */
+ if (lock_cmd->pid != device.items[i_index].owner) {
+ mutex_unlock(&device.item_list_lock);
+ return -EPERM;
+ }
+
+ /* if the ref_count is 0, that means nothing to unlock, just return */
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
+
+ device.items[i_index].references[ref_index].ref_count--;
+ device.items[i_index].id_ref_count--;
+ PDEBUG(1, "unlock, pid: %d, secure_id: 0x%x, ref_count: %d\n", lock_cmd->pid, lock_item->secure_id, device.items[i_index].references[ref_index].ref_count);
+
+ if (0 == device.items[i_index].references[ref_index].ref_count) {
+ device.items[i_index].references[ref_index].pid = 0;
+ if (0 == device.items[i_index].id_ref_count) {
+ PDEBUG(1, "release item, pid: %d, secure_id: 0x%x\n", lock_cmd->pid, lock_item->secure_id);
+ device.items[i_index].secure_id = 0;
+ }
+ device.items[i_index].owner = 0;
+ up(&device.items[i_index].item_lock);
+ }
+ mutex_unlock(&device.item_list_lock);
+
+ return 0;
+}
+
+static int do_umplock_zap(void)
+{
+ int i;
+
+ PDEBUG(1, "ZAP ALL ENTRIES!\n");
+
+ mutex_lock(&device.item_list_lock);
+
+ for (i = 0; i < MAX_ITEMS; i++) {
+ device.items[i].secure_id = 0;
+ memset(&device.items[i].references, 0, sizeof(_lock_ref) * MAX_PIDS);
+ sema_init(&device.items[i].item_lock, 1);
+ }
+
+ for (i = 0; i < MAX_PIDS; i++) {
+ device.pids[i] = 0;
+ }
+ mutex_unlock(&device.item_list_lock);
+
+ return 0;
+}
+
+static int do_umplock_dump(void)
+{
+ int i, j;
+
+ mutex_lock(&device.item_list_lock);
+ PERROR("dump all the items begin\n");
+ for (i = 0; i < MAX_ITEMS; i++) {
+ for (j = 0; j < MAX_PIDS; j++) {
+ if (device.items[i].secure_id != 0 && device.items[i].references[j].pid != 0) {
+ PERROR("item[%d]->secure_id=0x%x, owner=%d\t reference[%d].ref_count=%d.pid=%d\n",
+ i,
+ device.items[i].secure_id,
+ device.items[i].owner,
+ j,
+ device.items[i].references[j].ref_count,
+ device.items[i].references[j].pid);
+ }
+ }
+ }
+ PERROR("dump all the items end\n");
+ mutex_unlock(&device.item_list_lock);
+
+ return 0;
+}
+
+int do_umplock_client_add(_lock_cmd_priv *lock_cmd)
+{
+ int i;
+ mutex_lock(&device.item_list_lock);
+ for (i = 0; i < MAX_PIDS; i++) {
+ if (device.pids[i] == lock_cmd->pid) {
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
+ }
+ for (i = 0; i < MAX_PIDS; i++) {
+ if (device.pids[i] == 0) {
+ device.pids[i] = lock_cmd->pid;
+ break;
+ }
+ }
+ mutex_unlock(&device.item_list_lock);
+ if (i == MAX_PIDS) {
+ PERROR("Oops, Run out of client slots\n ");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int do_umplock_client_delete(_lock_cmd_priv *lock_cmd)
+{
+ int p_index = -1, i_index = -1, ref_index = -1;
+ int ret;
+ _lock_item_s *lock_item;
+ lock_item = (_lock_item_s *)&lock_cmd->msg;
+
+ mutex_lock(&device.item_list_lock);
+ p_index = umplock_find_client_valid(lock_cmd->pid);
+ /*lock item pid is not valid.*/
+ if (p_index < 0) {
+ mutex_unlock(&device.item_list_lock);
+ return 0;
+ }
+
+ /*walk through umplock item list and release reference attached to this client*/
+ for (i_index = 0; i_index < MAX_ITEMS; i_index++) {
+ lock_item->secure_id = device.items[i_index].secure_id;
+
+ /*find the item index and reference slot for the lock_item*/
+ ret = umplock_find_item_by_pid(lock_cmd, &i_index, &ref_index);
+
+ if (ret < 0) {
+ /*client has no reference on this umplock item, skip*/
+ continue;
+ }
+ while (device.items[i_index].references[ref_index].ref_count) {
+ /*release references on this client*/
+
+ PDEBUG(1, "delete client, pid: %d, ref_count: %d\n", lock_cmd->pid, device.items[i_index].references[ref_index].ref_count);
+
+ mutex_unlock(&device.item_list_lock);
+ do_umplock_release(lock_cmd);
+ mutex_lock(&device.item_list_lock);
+ }
+ }
+
+ /*remove the pid from umplock valid pid list*/
+ device.pids[p_index] = 0;
+ mutex_unlock(&device.item_list_lock);
+
+ return 0;
+}
+
+static long umplock_driver_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+ int ret;
+ uint32_t size = _IOC_SIZE(cmd);
+ _lock_cmd_priv lock_cmd ;
+
+ if (_IOC_TYPE(cmd) != LOCK_IOCTL_GROUP) {
+ return -ENOTTY;
+ }
+
+ if (_IOC_NR(cmd) >= LOCK_IOCTL_MAX_CMDS) {
+ return -ENOTTY;
+ }
+
+ switch (cmd) {
+ case LOCK_IOCTL_CREATE:
+ if (size != sizeof(_lock_item_s)) {
+ return -ENOTTY;
+ }
+
+ if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size)) {
+ return -EFAULT;
+ }
+ lock_cmd.pid = (u32)current->tgid;
+ ret = do_umplock_create(&lock_cmd);
+ if (ret) {
+ return ret;
+ }
+ return 0;
+
+ case LOCK_IOCTL_PROCESS:
+ if (size != sizeof(_lock_item_s)) {
+ return -ENOTTY;
+ }
+
+ if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size)) {
+ return -EFAULT;
+ }
+ lock_cmd.pid = (u32)current->tgid;
+ return do_umplock_process(&lock_cmd);
+
+ case LOCK_IOCTL_RELEASE:
+ if (size != sizeof(_lock_item_s)) {
+ return -ENOTTY;
+ }
+
+ if (copy_from_user(&lock_cmd.msg, (void __user *)arg, size)) {
+ return -EFAULT;
+ }
+ lock_cmd.pid = (u32)current->tgid;
+ ret = do_umplock_release(&lock_cmd);
+ if (ret) {
+ return ret;
+ }
+ return 0;
+
+ case LOCK_IOCTL_ZAP:
+ do_umplock_zap();
+ return 0;
+
+ case LOCK_IOCTL_DUMP:
+ do_umplock_dump();
+ return 0;
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+static int umplock_driver_open(struct inode *inode, struct file *filp)
+{
+ _lock_cmd_priv lock_cmd;
+
+ atomic_inc(&device.sessions);
+ PDEBUG(1, "OPEN SESSION (%i references)\n", atomic_read(&device.sessions));
+
+ lock_cmd.pid = (u32)current->tgid;
+ do_umplock_client_add(&lock_cmd);
+
+ return 0;
+}
+
+static int umplock_driver_release(struct inode *inode, struct file *filp)
+{
+ int sessions = 0;
+ _lock_cmd_priv lock_cmd;
+
+ lock_cmd.pid = (u32)current->tgid;
+ do_umplock_client_delete(&lock_cmd);
+
+ mutex_lock(&device.item_list_lock);
+ atomic_dec(&device.sessions);
+ sessions = atomic_read(&device.sessions);
+ PDEBUG(1, "CLOSE SESSION (%i references)\n", sessions);
+ mutex_unlock(&device.item_list_lock);
+ if (sessions == 0) {
+ do_umplock_zap();
+ }
+
+ return 0;
+}
+
+static struct file_operations umplock_fops = {
+ .owner = THIS_MODULE,
+ .open = umplock_driver_open,
+ .release = umplock_driver_release,
+ .unlocked_ioctl = umplock_driver_ioctl,
+};
+
+int umplock_device_initialize(void)
+{
+ int err;
+
+ err = alloc_chrdev_region(&umplock_dev, 0, 1, umplock_dev_name);
+
+ if (0 == err) {
+ memset(&umplock_device, 0, sizeof(umplock_device));
+ cdev_init(&umplock_device.cdev, &umplock_fops);
+ umplock_device.cdev.owner = THIS_MODULE;
+ umplock_device.cdev.ops = &umplock_fops;
+
+ err = cdev_add(&umplock_device.cdev, umplock_dev, 1);
+ if (0 == err) {
+ umplock_device.umplock_class = class_create(THIS_MODULE, umplock_dev_name);
+ if (IS_ERR(umplock_device.umplock_class)) {
+ err = PTR_ERR(umplock_device.umplock_class);
+ } else {
+ struct device *mdev;
+ mdev = device_create(umplock_device.umplock_class, NULL, umplock_dev, NULL, umplock_dev_name);
+ if (!IS_ERR(mdev)) {
+ return 0; /* all ok */
+ }
+
+ err = PTR_ERR(mdev);
+ class_destroy(umplock_device.umplock_class);
+ }
+ cdev_del(&umplock_device.cdev);
+ }
+
+ unregister_chrdev_region(umplock_dev, 1);
+ } else {
+ PERROR("alloc chardev region failed\n");
+ }
+
+ return err;
+}
+
+void umplock_device_terminate(void)
+{
+ device_destroy(umplock_device.umplock_class, umplock_dev);
+ class_destroy(umplock_device.umplock_class);
+
+ cdev_del(&umplock_device.cdev);
+ unregister_chrdev_region(umplock_dev, 1);
+}
+
+static int __init umplock_initialize_module(void)
+{
+ PDEBUG(1, "Inserting UMP lock device driver. Compiled: %s, time: %s\n", __DATE__, __TIME__);
+
+ mutex_init(&device.item_list_lock);
+ if (umplock_device_initialize() != 0) {
+ PERROR("UMP lock device driver init failed\n");
+ return -ENOTTY;
+ }
+ memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+ memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+ atomic_set(&device.sessions, 0);
+
+ PDEBUG(1, "UMP lock device driver loaded\n");
+
+ return 0;
+}
+
+static void __exit umplock_cleanup_module(void)
+{
+ PDEBUG(1, "unloading UMP lock module\n");
+
+ memset(&device.items, 0, sizeof(umplock_item) * MAX_ITEMS);
+ memset(&device.pids, 0, sizeof(u32) * MAX_PIDS);
+ umplock_device_terminate();
+ mutex_destroy(&device.item_list_lock);
+
+ PDEBUG(1, "UMP lock module unloaded\n");
+}
+
+module_init(umplock_initialize_module);
+module_exit(umplock_cleanup_module);
+
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("ARM Ltd.");
+MODULE_DESCRIPTION("ARM UMP locker");
--- /dev/null
+/*
+ * Copyright (C) 2012-2014 ARM Limited. All rights reserved.
+ *
+ * This program is free software and is provided to you under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained from Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#ifndef __UMPLOCK_IOCTL_H__
+#define __UMPLOCK_IOCTL_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#ifndef __user
+#define __user
+#endif
+
+
+/**
+ * @file umplock_ioctl.h
+ * This file describes the interface needed to use the Linux device driver.
+ * The interface is used by the userpace Mali DDK.
+ */
+
+typedef enum {
+ _LOCK_ACCESS_RENDERABLE = 1,
+ _LOCK_ACCESS_TEXTURE,
+ _LOCK_ACCESS_CPU_WRITE,
+ _LOCK_ACCESS_CPU_READ,
+} _lock_access_usage;
+
+typedef struct _lock_item_s {
+ unsigned int secure_id;
+ _lock_access_usage usage;
+} _lock_item_s;
+
+
+#define LOCK_IOCTL_GROUP 0x91
+
+#define _LOCK_IOCTL_CREATE_CMD 0 /* create kernel lock item */
+#define _LOCK_IOCTL_PROCESS_CMD 1 /* process kernel lock item */
+#define _LOCK_IOCTL_RELEASE_CMD 2 /* release kernel lock item */
+#define _LOCK_IOCTL_ZAP_CMD 3 /* clean up all kernel lock items */
+#define _LOCK_IOCTL_DUMP_CMD 4 /* dump all the items */
+
+#define LOCK_IOCTL_MAX_CMDS 5
+
+#define LOCK_IOCTL_CREATE _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_CREATE_CMD, _lock_item_s )
+#define LOCK_IOCTL_PROCESS _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_PROCESS_CMD, _lock_item_s )
+#define LOCK_IOCTL_RELEASE _IOW( LOCK_IOCTL_GROUP, _LOCK_IOCTL_RELEASE_CMD, _lock_item_s )
+#define LOCK_IOCTL_ZAP _IO ( LOCK_IOCTL_GROUP, _LOCK_IOCTL_ZAP_CMD )
+#define LOCK_IOCTL_DUMP _IO ( LOCK_IOCTL_GROUP, _LOCK_IOCTL_DUMP_CMD )
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __UMPLOCK_IOCTL_H__ */
+